Idea:Yugandhar_Master
If the array contains more than $$$2$$$ elements then it is definitely possible to do given operation because array will definitely contain atleast $$$2$$$ $$$even$$$ numbers or atleast $$$2$$$ $$$odd$$$ numbers, in both cases sum of the numbers is $$$even(even+even =even, odd+odd=even)$$$.
Hence, we can do the given operation until array size becomes lesser than or equal to $$$2$$$.
If the array size is $$$2$$$, we can do operation only if both elements parity is equal. If the array size is $$$1$$$, we can’t do any operation.
The below proof mentioned for an array of atleast size $$$2$$$.
The minimum size of the array we obtain is determined by the number of $$$odd$$$ elements in the array, if number of $$$odd$$$ elements is $$$odd$$$ then the minimum size of the array is 2 because $$$(odd+odd+…+odd).._{odd times} \rightarrow (even+odd = odd)$$$, so array must contain an $$$odd$$$ element every time. So now we can determine our answer as $$$maximum(n-k,1+(cntodd \% 2))$$$, where $$$cntodd$$$ is the number of $$$odd$$$ elements in the array.
Time complexity : $$$O(n)$$$.
Note that you can also do simulation brutly, where time complexity : $$$O(Σ n^2)$$$.
#include <bits/stdc++.h>
using namespace std;
#define ll long long
#define fast \
ios_base::sync_with_stdio(0); \
cin.tie(0); \
cout.tie(0);
int main(){
fast;
ll t;
cin>>t;
while(t--){
int n,k;
cin>>n>>k;
int cnt=0;
for(int i=0;i<n;i++){
int x;
cin>>x;
if(x%2==1) cnt++;
}
int val=1;
if(cnt%2) val=min(n,2);
cout<<max(n-k,val)<<"\n";
}
}
Idea:Yugandhar_Master
The $$$Maximum MEX$$$ possible in the given sub-array is $$$MEX$$$ of the whole array, because $$$MEX$$$ may increase only by adding extra elements. Hence first calculate the $$$MEX$$$ of the given array and we will use the $$$\underline{two \ pointer \ technique}$$$ to get the $$$\bf{minimum}$$$ size of the subsegment whose $$$MEX$$$ is equal to $$$maximum MEX$$$.
Time Complexity : $$$O(nlogn)$$$, if you use maps or sets to find $$$MEX$$$.
Time Complexity : $$$O(n)$$$, if you use vectors to find to find $$$MEX$$$.
#include <bits/stdc++.h>
using namespace std;
#define ll long long
#define fast \
ios_base::sync_with_stdio(0); \
cin.tie(0); \
cout.tie(0);
int main(){
fast;
ll t;
cin>>t;
while(t--){
ll n;
cin>>n;
vector<ll> a(n);
for(ll i=0;i<n;i++) cin>>a[i];
vector<ll> b(n+1,0);
for(ll i=0;i<n;i++){
if(a[i]<=n) b[a[i]]++;
}
ll mex=0;
for(ll i=0;i<=n;i++){
if(b[i]==0){
mex=i;
break;
}
}
ll ans=n,l=0;
vector<ll> v(mex,0);
ll cnt=0;
for(ll i=0;i<n;i++){
if(a[i]<mex){
if(v[a[i]]==0) cnt++;
v[a[i]]++;
}
while(l<=i && cnt==mex){
ans=min(ans,(i-l+1));
if(a[l]<mex){
if(v[a[l]]==1) cnt--;
v[a[l]]--;
}
l++;
}
}
cout<<ans<<"\n";
}
}
Idea:Yugandhar_Master
Given operation is nothing but you can $$$rearrange$$$ your array as you wish.
So now the problem is re-arrange the array to decrease the value of $$$f(b)$$$.
Let the $$$maxi$$$ be the maximum element in $$$a$$$ and $$$mini$$$ be the minimum element in $$$a$$$. It is easy to see that both $$$maxi$$$ and $$$mini$$$ will be in $$$S_1(b)$$$ or $$$S_2(b)$$$, or $$$maxi$$$ in $$$S_1(b)$$$ and $$$mini$$$ in $$$S_2(b)$$$ and vice-versa.
Let’s consider about $$$f(b)$$$, what is happening here actually?
To minimize the value of $$$f(b)$$$, we have to minimize $$$R(S_1(b))*R(S_2(b))$$$(obviously from the definition), i.e., atleast we have to minimize one of the $$$R(S_1(b))$$$ or $$$R(S_2(b))$$$.
Let’s sort the array and traverse through the array to find the minimum value of $$$f(b)$$$.
For a given element what is the maximum element that decreases the range . yes, for a given $$$a_{i}(1 \le i \le \frac{n}{2})$$$ taking $$$(a_i,a_{i+1},….,a_{i+\frac{n}{2}-1})$$$ in either $$$S_1$$$ or $$$S_2$$$ and the remaining elements in other set will decreases the $$$f(b)$$$. hence for every $$$i(1 \le i \le \frac{n}{2})$$$ find the maximum element which decreases the $$$f(b)$$$. Choose anything which gives minimum $$$f(b)$$$ and construct the resultant array accordingly.
See the implementation below for better clarity.
Time complexity : $$$O(nlogn)$$$.
#include <bits/stdc++.h>
using namespace std;
#define ll long long
#define fast \
ios_base::sync_with_stdio(0); \
cin.tie(0); \
cout.tie(0);
int main(){
fast;
ll t;
cin>>t;
while(t--){
ll n;
cin>>n;
vector<ll> a(n);
for(ll i=0;i<n;i++) cin>>a[i];
if(n==2) cout<<a[0]<<" "<<a[1]<<"\n";
else{
sort(a.begin(),a.end());
ll val1=(a[n-1]-a[0]),ind=0,ans=(a[(n/2)-1]-a[0])*(a[n-1]-a[(n/2)]);
for(ll i=1;i<(n/2);i++){
ll res=val1*(a[(n/2)+i-1]-a[i]);
if(res<ans){
ans=res;
ind=i;
}
}
vector<ll> b(n);
ll i1=0;
for(ll i=ind;i<ind+(n/2);i++){
b[i1]=a[i];
i1+=2;
}
i1=1;
for(ll i=0;i<ind;i++){
b[i1]=a[i];
i1+=2;
}
for(ll i=ind+(n/2);i<n;i++){
b[i1]=a[i];
i1+=2;
}
for(auto i:b) cout<<i<<" ";
cout<<"\n";
}
}
}
Idea:Yugandhar_Master
Let’s try to solve the problem for single $$$m$$$.
The given $$$XOR$$$ operation is nothing but flipping characters, so rephrase the statement that, flip $$$m$$$ characters in $$$S$$$ to get maximum number of substrings which are equal to $$$“11”$$$.
To increase the number of substrings $$$“11”$$$ , it is optimal to get more $$$1’s$$$ in $$$T$$$. so we have to flip the characters in S which are equal to $$$0$$$.
Let $$$m \le cnt0$$$, where $$$cnt0$$$ is the number of $$$0’s$$$ in $$$S$$$. Consider two possible scenes in $$$S$$$ :
$$$1.$$$ $$$....1001....$$$
$$$2.$$$ $$$....101....$$$
If you change single $$$0$$$ to $$$1$$$ in $$$1^{st}$$$ case the answer will increase by $$$1$$$, but in second case the answer will increase by $$$2$$$(easy to see, right?).
Which implies that it is always optimal to change $$$0$$$ to $$$1$$$ in substring where contiguous $$$0’s$$$ are minimum.
The cases $$$....11000....$$$ and $$$....00011....$$$ are both similar where changing single $$$0$$$ to $$$1$$$ will increase answer by $$$1$$$.
Note the case where $$$....0000....$$$, i.e., all are $$$0’s$$$, in this case initially atleast we have to change two $$$0’s$$$ to increase our answer.
What if $$$m > cnt0$$$ ?
Yes, now we have to turn the initial $$$1’s$$$ in $$$S$$$ to $$$0’s$$$ which will decrease the answer by minimum (initial $$$1’s$$$ in $$$S$$$ means not flipped $$$1’s$$$ in $$$S$$$).
Again, lets consider two possible scenes in $$$S$$$.
$$$1.$$$ $$$....1(11)1....$$$
$$$2.$$$ $$$....(11)11....$$$
$$$1’s$$$ in the brackets are initial $$$1’s$$$ in $$$S$$$, now changing $$$1$$$ to $$$0$$$ in first case decreases the answer by $$$2$$$, but in second case answer decreases by $$$1$$$(easy to see, right?).
Which implies that changing initial $$$1’s$$$ to $$$0$$$ in segment where $$$S$$$ contains maximum $$$1’s$$$ is optimal.
The cases $$$....1000....$$$ and $$$....00011....$$$ are both similar where changing single $$$1$$$ to $$$0$$$ will decrease answer by $$$1$$$.
We can do precomputations on the above information. (Yeah I know it’s weird to implement things, but expecting some smarter implementations).
So, Time Complexity : $$$O(n)$$$.
#include <bits/stdc++.h>
using namespace std;
#define ll long long
#define fast \
ios_base::sync_with_stdio(0); \
cin.tie(0); \
cout.tie(0);
int main(){
fast;
ll t;
cin>>t;
while(t--){
ll n;
cin>>n;
string s;
cin>>s;
vector<ll> v1,v0;
ll ex1=0,ex0=0;
ll ind1=-1,ind2=-1;
for(ll i=0;i<n;i++){
if(s[i]=='1') ind2=i;
}
for(ll i=n-1;i>=0;i--){
if(s[i]=='1') ind1=i;
}
if(ind1!=-1){
ll cnt=0;
for(ll i=ind1;i<=ind2;i++){
if(s[i]=='0') cnt++;
else{
if(cnt>0) v0.push_back(cnt);
cnt=0;
}
}
sort(v0.begin(),v0.end());
for(ll i=1;i<(ll)v0.size();i++) v0[i]+=v0[i-1];
ex0=(ind1+(n-1-ind2));
}
else ex0=n;
ind1=-1;
ind2=-1;
for(ll i=0;i<n;i++){
if(s[i]=='0') ind2=i;
}
for(ll i=n-1;i>=0;i--){
if(s[i]=='0') ind1=i;
}
if(ind1!=-1){
ll cnt=0;
for(ll i=ind1;i<=ind2;i++){
if(s[i]=='1') cnt++;
else{
if(cnt>0) v1.push_back(cnt);
cnt=0;
}
}
sort(v1.begin(),v1.end());
for(ll i=1;i<(ll)v1.size();i++) v1[i]+=v1[i-1];
ex1=(ind1+(n-1-ind2));
}
else ex1=n;
ll n0=ex0,n1=ex1;
if((ll)v0.size()>0) n0+=v0.back();
if((ll)v1.size()>0) n1+=v1.back();
vector<ll> res0(n0+1),res1(n1+1);
ll j=0;
for(ll i=1;i<=n0;i++){
res0[i]=res0[i-1]+1;
if(j<(ll)v0.size() && i==v0[j]){
res0[i]++;
j++;
}
}
j=0;
for(ll i=1;i<=n1;i++){
res1[i]=res1[i-1]+1;
if(j<(ll)v1.size() && i==v1[j]){
res1[i]++;
j++;
}
}
ll cnt0=0,cnt1=0;
for(ll i=0;i<n-1;i++){
if(s[i]=='1' && s[i+1]=='1') cnt0++;
if(s[i]=='0' && s[i+1]=='0') cnt1++;
}
for(ll i=0;i<=n;i++){
ll k=i;
if(n0==n) cout<<max((ll)0,k-1)<<" ";
else if(n1==n) cout<<max((ll)0,n-k-1)<<" ";
else if(k<=n0) cout<<cnt0+res0[k]<<" ";
else{
k=n-k;
cout<<cnt1+res1[k]<<" ";
}
}
cout<<"\n";
}
}
Idea:Yugandhar_Master
It is quite difficult to get optimal operations for changing $$$a$$$ to $$$b$$$. But it was easy to change $$$b$$$ to $$$a$$$(i.e, reversing operation).
Now the problem is choose any $$$i(0 \le i \le n-1)$$$ and select $$$k$$$ elements from left side of $$$b_i$$$ and $$$k$$$ elements from right side $$$b_i$$$ and remove it’s sum from $$$b_i$$$.
It's easy to see that every element has a contribution on the left $$$k$$$ elements and right $$$k$$$ elements, so it’s obviously optimal to do operations first on bigger element in $$$b$$$. Otherwise this maximum $$$b$$$ comes to attack and decrease other element to less than $$$0$$$ $$$(less \ than \ maximum \ - \ maximum \ < \ 0)$$$.
We can decrease the element to as much as possible in a single operation, so every element was considered atmost $$$log(b_i)$$$ times in an operation.
We can use $$$BIT(Fenwick \ Tree)$$$ to handle the segmental operations(i.e., range query, point update).
Time complexity : $$$O(nlognlog(M/k))$$$, where $$$M$$$ is the maximum element in the array $$$b$$$.
#include <bits/stdc++.h>
using namespace std;
#define ll long long
#define fast \
ios_base::sync_with_stdio(0); \
cin.tie(0); \
cout.tie(0);
ll N,K;
template <typename T>
struct BIT {
ll N;
vector<T> tree;
BIT(ll n_) : N(n_+1) {
tree.resize(N);
}
void update(ll ind,T val) {
for(++ind;ind<N;ind+=ind&-ind) tree[ind]+=val;
}
T query(ll in) {
T sum=0;
for(++in;in>0;in-=in&-in) sum+=tree[in];
return sum;
}
T query(ll l, ll r) {
return query(r)-query(l-1);
}
};
int main(){
fast;
ll t;
cin>>t;
while(t--){
cin>>N>>K;
vector<ll> A(N),B(N);
for(ll i=0;i<N;i++) cin>>A[i];
for(ll i=0;i<N;i++) cin>>B[i];
BIT<ll> Bit(N);
for(ll i=0;i<N;++i)
Bit.update(i,B[i]);
ll sum = Bit.query(0,N-1);
auto get = [&](ll l, ll r) {
if(l>r) {
if(r+1>l-1)
return sum;
return sum-Bit.query(r+1,l-1);
}
return Bit.query(l,r);
};
if(K==0){
cout<<(A!=B?-1:0)<<"\n";
}
else{
auto index = [&](ll x) {
x%=N;
if(x<0)
x+=N;
if(x>=N)
x-=N;
return x;
};
vector<ll> ps(N+1);
priority_queue<pair<ll, ll>> pq;
for(ll i=0;i<N;++i) if(A[i]!=B[i]){
pq.emplace(B[i],i);
}
ll ops=0;
while(!pq.empty()){
ll y=pq.top().first;
ll p=pq.top().second;
pq.pop();
auto F = get(index(p-K),index(p+K));
if(A[p]>y%(F-y) && A[p]%(F-y)!=y%(F-y)) {
break;
}
B[p]=max((ll)A[p],y%(F-y));
ops+=(y-B[p])/(F-y);
Bit.update(p,B[p]-y);
sum+=B[p]-y;
if(B[p]==y) {
assert(A[p]!=B[p]);
break;
}
if(B[p]<A[p])
break;
if(A[p]!=B[p])
pq.emplace(B[p],p);
}
if(A==B) cout<<ops<<"\n";
else cout<<"-1\n";
}
}
}
F2: Award from Wuhudsm(Hard Version)
Idea:Yugandhar_Master
The useless part in the problem is number of queries, it is always optimal to put a single edge between two nodes and the remaining unnecessary edges will be considered as your wish. we can find optimal answer for every $$$k(1 \le k \le n-1)$$$ edges added to graph and we will answer every query in $$$O(1)$$$ time.
Let's solve the problem using $$$Dynamic \ Programming$$$.
Let $$$dp1[i][j]$$$ is maximum number of coins obtained when $$$j$$$ edges are choosen in the first $$$i+1$$$ nodes while considering $$$i^{th}$$$ edge is choosen.
Let $$$dp2[i][j]$$$ is maximum number of coins obtained when $$$j$$$ edges are choosen in the first $$$i+1$$$ nodes while considering $$$i^{th}$$$ edge is not choosen.
$$$1.$$$ $$$dp1[i][j] = \max_{k=1}^i(dp2[k][j-i+k]+d[k][i])$$$, where $$$d[k][i]$$$ means maximum coins obtained when adding every edge from $$$k^{th}$$$ node to $$$i^{th}$$$ node.
$$$2.$$$ $$$dp2[i][j] = max(dp2[i-1][j],dp1[i-1][j])$$$.
From this bruteforce $$$DP$$$, we can solve the problem in $$$O(n^3)$$$ which is too slow for both versions, Let's try to optimize for easier version first.
Note that $$$dp1[i][j]$$$ is dependent on $$$dp2[i][j]$$$ for only same differences in $$$(i-j)$$$. so just precompute the values of $$$d[k][i]$$$ and update the information of $$$dp2[i][j]$$$ on segment tree to get the maximum value of $$$dp2[k][j-i+k]+d[k][i]$$$ in $$$logn$$$ time. By this optimization we can solve the Easy Version of the problem with the Time Complexity : $$$O(n^{2}logn)$$$.
For solving Hard version we need some more classical data structure which will do the above operations quite faster.
Since we are only adding positive integers to the segment tree, there will only be merging of segments and not splitting of segments. Using a disjoint set data structure, we can maintain which segment each number belongs to in a reasonable amount of time.
From this optimization, now we can solve the problem in Time Complexity : $$$O(nm(invack(n)))$$$, where $$$invack(n)$$$ stands for inverse Ackermann function in the union-find complexity.
#include <bits/stdc++.h>
using namespace std;
#define ll long long
#define fast \
ios_base::sync_with_stdio(0); \
cin.tie(0); \
cout.tie(0);
ll n,m,maxi;
vector<vector<pair<ll,ll>>> adj(10010);
vector<ll> dp(10010),ans(10010),par(10010);
ll dp1[10010];
ll find(ll u){
if(par[u]==u) return u;
return par[u]=find(par[u]);
}
void push(ll cur,ll val){
if(val<=maxi){
par[cur]=cur+1;
return;
}
dp1[cur]=val-maxi;
maxi=val;
}
void update(ll cur,ll p,ll val){
if(find(1)>p) return;
ll u=find(p+1),v;
if(u>cur){
maxi+=val;
return;
}
dp1[u]-=val;
while(dp1[u]<=0){
v=find(u+1);
if(v>cur){
maxi-=dp1[u];
par[u]=v;
return;
}
dp1[v]+=dp1[u];
par[u]=v;
u=v;
}
}
int main(){
fast;
ll t;
cin>>t;
while(t--){
ll q;
cin>>n>>m>>q;
n--;
for(ll i=1;i<=n;i++){
adj[i].clear();
dp[i]=0;
ans[i]=0;
}
for(ll i=0;i<m;i++){
ll u,v,w;
cin>>u>>v>>w;
if(u>v) swap(u,v);
v--;
adj[v].push_back({u,w});
}
dp[0]=0;
for(ll i=1;i<=n;i++){
dp[i]=dp[i-1];
for(auto j:adj[i]) dp[i]+=j.second;
}
for(ll i=0;i<n;i++){
ans[n-i]=dp[n];
for(ll j=n;j>0;j--){
dp[j]=dp[j-1];
}
dp[0]=-1e18;
for(ll i1=1;i1<=i+1;i1++) par[i1]=i+2;
for(ll i1=i+2;i1<=n+1;i1++) par[i1]=i1;
fill(dp1,dp1+n+1,0);
maxi=-1e9;
for(ll i1=i+2;i1<=n;i1++){
push(i1,dp[i1-1]);
for(auto j:adj[i1]) if(j.first>=i+2) update(i1,j.first,j.second);
dp[i1]=max(dp[i1],maxi);
}
}
while(q--){
ll kk;
cin>>kk;
kk=min(kk,n);
cout<<ans[kk]<<"\n";
}
}
}