All ideas belong to MikeMirzayanov.

**Tutorial**

Tutorial is loading...

**Solution**

```
#include <bits/stdc++.h>
using namespace std;
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int t;
cin >> t;
while (t--) {
int n;
cin >> n;
vector<int> a(n);
for (auto &it : a) cin >> it;
sort(a.begin(), a.end());
bool ok = true;
for (int i = 1; i < n; ++i) {
ok &= (a[i] - a[i - 1] <= 1);
}
if (ok) cout << "YES" << endl;
else cout << "NO" << endl;
}
return 0;
}
```

**Tutorial**

Tutorial is loading...

**Solution**

```
#include <bits/stdc++.h>
using namespace std;
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int t;
cin >> t;
while (t--) {
int n;
cin >> n;
vector<int> a(n), b(n);
for (auto &it : a) cin >> it;
for (auto &it : b) cin >> it;
int mna = *min_element(a.begin(), a.end());
int mnb = *min_element(b.begin(), b.end());
long long ans = 0;
for (int i = 0; i < n; ++i) {
ans += max(a[i] - mna, b[i] - mnb);
}
cout << ans << endl;
}
return 0;
}
```

**Tutorial**

Tutorial is loading...

**Solution**

```
#include <bits/stdc++.h>
using namespace std;
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int t;
cin >> t;
while (t--) {
int n;
cin >> n;
vector<int> cnt(n + 1);
for (int i = 0; i < n; ++i) {
int x;
cin >> x;
++cnt[x];
}
int ans = 0;
for (int s = 2; s <= 2 * n; ++s) {
int cur = 0;
for (int i = 1; i < (s + 1) / 2; ++i) {
if (s - i > n) continue;
cur += min(cnt[i], cnt[s - i]);
}
if (s % 2 == 0) cur += cnt[s / 2] / 2;
ans = max(ans, cur);
}
cout << ans << endl;
}
return 0;
}
```

1399D - Binary String To Subsequences

**Tutorial**

Tutorial is loading...

**Solution**

```
#include <bits/stdc++.h>
using namespace std;
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int t;
cin >> t;
while (t--) {
int n;
string s;
cin >> n >> s;
vector<int> ans(n);
vector<int> pos0, pos1;
for (int i = 0; i < n; ++i) {
int newpos = pos0.size() + pos1.size();
if (s[i] == '0') {
if (pos1.empty()) {
pos0.push_back(newpos);
} else {
newpos = pos1.back();
pos1.pop_back();
pos0.push_back(newpos);
}
} else {
if (pos0.empty()) {
pos1.push_back(newpos);
} else {
newpos = pos0.back();
pos0.pop_back();
pos1.push_back(newpos);
}
}
ans[i] = newpos;
}
cout << pos0.size() + pos1.size() << endl;
for (auto it : ans) cout << it + 1 << " ";
cout << endl;
}
return 0;
}
```

1399E1 - Weights Division (easy version)

**Tutorial**

Tutorial is loading...

**Solution**

```
#include <bits/stdc++.h>
using namespace std;
vector<int> w, cnt;
vector<vector<pair<int, int>>> g;
long long getdiff(int i) {
return w[i] * 1ll * cnt[i] - (w[i] / 2) * 1ll * cnt[i];
}
void dfs(int v, int p = -1) {
if (g[v].size() == 1) cnt[p] = 1;
for (auto [to, id] : g[v]) {
if (id == p) continue;
dfs(to, id);
if (p != -1) cnt[p] += cnt[id];
}
}
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int t;
cin >> t;
while (t--) {
int n;
long long s;
cin >> n >> s;
w = cnt = vector<int>(n - 1);
g = vector<vector<pair<int, int>>>(n);
for (int i = 0; i < n - 1; ++i) {
int x, y;
cin >> x >> y >> w[i];
--x, --y;
g[x].push_back({y, i});
g[y].push_back({x, i});
}
dfs(0);
set<pair<long long, int>> st;
long long cur = 0;
for (int i = 0; i < n - 1; ++i) {
st.insert({getdiff(i), i});
cur += w[i] * 1ll * cnt[i];
}
cerr << cur << endl;
int ans = 0;
while (cur > s) {
int id = st.rbegin()->second;
st.erase(prev(st.end()));
cur -= getdiff(id);
w[id] /= 2;
st.insert({getdiff(id), id});
++ans;
}
cout << ans << endl;
}
return 0;
}
```

1399E2 - Weights Division (hard version)

**Tutorial**

Tutorial is loading...

**Solution**

```
#include <bits/stdc++.h>
using namespace std;
const int INF = 1e9;
int n;
vector<int> w, c, cnt;
vector<vector<pair<int, int>>> g;
long long getdiff(int i) {
return w[i] * 1ll * cnt[i] - (w[i] / 2) * 1ll * cnt[i];
}
void dfs(int v, int p = -1) {
if (g[v].size() == 1) cnt[p] = 1;
for (auto [to, id] : g[v]) {
if (id == p) continue;
dfs(to, id);
if (p != -1) cnt[p] += cnt[id];
}
}
vector<long long> get(int clr) {
set<pair<long long, int>> st;
long long cur = 0;
for (int i = 0; i < n - 1; ++i) {
if (c[i] == clr) {
st.insert({getdiff(i), i});
cur += w[i] * 1ll * cnt[i];
}
}
vector<long long> res;
res.push_back(cur);
while (cur > 0 && !st.empty()) {
int id = st.rbegin()->second;
st.erase(prev(st.end()));
cur -= getdiff(id);
res.push_back(cur);
w[id] /= 2;
st.insert({getdiff(id), id});
}
return res;
}
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int t;
cin >> t;
while (t--) {
long long s;
cin >> n >> s;
w = c = cnt = vector<int>(n - 1);
g = vector<vector<pair<int, int>>>(n);
for (int i = 0; i < n - 1; ++i) {
int x, y;
cin >> x >> y >> w[i] >> c[i];
--x, --y;
g[x].push_back({y, i});
g[y].push_back({x, i});
}
dfs(0);
vector<long long> v1 = get(1), v2 = get(2);
int pos = int(v2.size()) - 1;
int ans = INF;
for (int i = 0; i < int(v1.size()); ++i) {
while (pos > 0 && v1[i] + v2[pos - 1] <= s) --pos;
if (v1[i] + v2[pos] <= s) {
ans = min(ans, i + pos * 2);
}
}
cout << ans << endl;
}
return 0;
}
```

1399F - Yet Another Segments Subset

**Tutorial**

Tutorial is loading...

**Solution**

```
#include <bits/stdc++.h>
using namespace std;
vector<vector<int>> rg;
vector<vector<int>> dp;
int calc(int l, int r) {
if (dp[l][r] != -1) return dp[l][r];
dp[l][r] = 0;
if (l > r) return dp[l][r];
bool add = count(rg[l].begin(), rg[l].end(), r); // can't be greater than 1
dp[l][r] = max(dp[l][r], add + (l + 1 > r ? 0 : calc(l + 1, r)));
for (auto nr : rg[l]) {
if (nr >= r) continue;
dp[l][r] = max(dp[l][r], add + calc(l, nr) + (nr + 1 > r ? 0 : calc(nr + 1, r)));
}
return dp[l][r];
}
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int t;
cin >> t;
while (t--) {
int n;
cin >> n;
vector<int> l(n), r(n);
vector<int> val;
for (int i = 0; i < n; ++i) {
cin >> l[i] >> r[i];
val.push_back(l[i]);
val.push_back(r[i]);
}
sort(val.begin(), val.end());
val.resize(unique(val.begin(), val.end()) - val.begin());
for (int i = 0; i < n; ++i) {
l[i] = lower_bound(val.begin(), val.end(), l[i]) - val.begin();
r[i] = lower_bound(val.begin(), val.end(), r[i]) - val.begin();
}
int siz = val.size();
dp = vector<vector<int>>(siz, vector<int>(siz, -1));
rg = vector<vector<int>>(siz);
for (int i = 0; i < n; ++i) {
rg[l[i]].push_back(r[i]);
}
cout << calc(0, siz - 1) << endl;
}
return 0;
}
```

Why dsu didn't work in problem F.

89050221

i am finding all invalid pair and add a edge between them ...then i am finding the maximum independent set;

Actually, maximum independent set is pretty hard problem to solve and can be done in $$$O(n^3)$$$ using Kuhn's algorithm on

bipartitegraphs. The graph you build isn't even bipartite (you can easily obtain odd length cycle with segments $$$[1; 5]$$$, $$$[4; 8]$$$ and $$$[3; 6]$$$, they all have pairwise intersections). I don't know a good solution for this problem for non-bipartite graphs, just some randomized approaches which don't work well.thanks.

Would this work for F?.

First, co-ordinate compress the segments. After that, sort these by ending indices. Maintain a

dparray wheredp[i] = size of optimal subset where all segments have ending indices <= i. Consider theKthsegment to be the last segment included in our optimal subset and it's starting and ending indices to beLandRrespectively.Loop through the points from R->0 on the co-ordinate axis, then, dp[R] = max(dp[R],

number of segments inside range [L,R]+ dp[L-1]);The maximum of dp[i] would be our answer. I think this is the same idea as the editorial, couldn't understand that one exactly, so came up with this.

Note: The kth segment is considered to be inside [L,R].

Yes it works. Also, you don't need to coordinate compress. Here's the submission: 89075517

Isn't the solution you wrote O(n) complexity? If I am mistaken please correct me

In the solve function I'm also doing binary search, which adds a log factor.

$$$O(n^3)$$$?

A set is independent iff its complement is a vertex cover, so this is the same as computing minimum vertex cover. And by Konig theorem in bipartite graph minimum vertex cover equals maximum matching which can be computed faster than $$$O(n^3)$$$. You can of course construct a weighted bipartite graph with cost 1 if there was an edge in original graph or 0 otherwise and run maximum weighted matching (Kuhn a.k.a. Hungarian algorithm) but that's just insane.

I don't get your point.

Yes, thanks, I know about Konig's theorem and know that vertex cover and independent set (and maximum matching) can be converted to each other.

I didn't talk about Hungarian algorithm, I talked about dfs-like Kuhn algorithm for finding unweighted maximum matching. It's time complexity is $$$O(n^3)$$$ also. In practice, it's way faster, yes, but the complexity is still $$$O(n^3)$$$ (or, to be precise, $$$O(nm)$$$).

And you can construct clique in this problem pretty easy, so Dinic or Hopkroft-Karp would work with the same time complexity as this unweighted Kuhn matching.

Edit: Sorry, Dinic and Hopkroft-Karp will work a bit better ($$$O(m \sqrt{n})$$$ instead of $$$O(nm)$$$).Google-search phrase "Khun algorithm" and you'll find out why I got confused by your previous comment

Yes, I googled and this is why I tried to explain to you what am I talking about. I'm really surprised I didn't find any explanation of this algorithm in English (didn't go deep into search but anyway). I always thought this is very famous approach but now I realized all articles and lessons I heard about it were from Russian teachers.

Of course this approach is well known. It's simply that the algorithm you are talking about is not called "Khun algorithm", but "Hopcroft-Karp" or "turbo matching", Moreover simce the actual "Khun algorithm" aka Hungarian does run in $$$O(n^3)$$$ whilst Hopcroft Karp doesn't I did assume you actually mean Khun algorithm and got really confused on rest of your comment

Video Editorial of Problem D: Binary String to SubsequencesI had the same approach. At least i'm glad to know that i wasn't the only one. Thanks vovuh for the explanation :)

I tried the same and then I learned that finding maximum independent set is not as easy as I thought

Maybe because it doesn't have transivity.For example,Segment A=[2,3],B=[3,4],C=[4,5], A-B ans B-C are invalid,but A-C is valid.So you can't map in this way.

Video Explanations(hindi) - Question D- https://youtu.be/BRrPfZQ3ScU - Question C- https://youtu.be/txEX3EwWaSw - Question B- https://youtu.be/NxfA91zSNlI - Question A- https://youtu.be/tSboQu6Vu_k

Video explanations to everything else. (And also A-C lol)

Your videos are really cool, have been following & learning from you

But you didn't learn the main thing, he started YT when he was already very good at cp.

I made a YT while i am becoming good at CP :-P

I agree i am still learning, but i only make videos on the questions i am able to solve and 100% sure about. Its not like i am pretending that i have full knowledge and am god at cp. 1 month ago i was not able to solve 1 question.But after this YT, there is a source of motivation for me to learn, and now am able to solve more so i am happy.:-)

Video Editorial of Problem D: Binary String to SubsequencesI always see that this guy gets a downvote whenever he posts the link for the tutorial. why so ??

i was thinking the same, his explanation for D was nice, and for the first time i also posted today, got -14 votes, but its balanced now to 0. IDK why.

.

I completely understand this, but i make videos on what i am able to solve on my own, and those which i cant i don't even try to make as i dont want to share incorrect knowledge.

But why this dude you talked about, i was curious about him

.

TBH, I really like your idea of uploading videos irrespective of your rating. You want to make videos and hence you are making them. You aren't forcing anyone to watch your videos. If I had been in your shoes, I would never have the courage to start a channel and hence I'm really proud of the fact that you are uploading videos.

As per why my comment is getting so many downvotes, I myself have no idea. I'm getting good enough response on YouTube. Maybe someone who downvoted my comment can tell me why they did it and I can keep that in mind from next time?

I don't like the impression of making YT videos and commenting here twice (or maybe even once). I hate monetization of everything (specially educational) and I do believe better way to learn is try yourselves and if you have exhausted all thoughts read editorial (because its not spoon fed here).

On top of that commenting it in comments section of an official editorial irritates me.

I know I don't have completely sound arguments so I'd normally just down vote and move.

brother if you have guts then please can you explain me solution's like sKSama and namanbansal013 does. After every contest which I give I wait for their videos. If You have guts than please we'll welcome you!

I didn't mean to offend, Its perfectly fine to do so. Its just the way I see things.

To be specific I have issues with this kind of promotion (which again is fine but I don't like it).

it's fine if you don't like it but don't demotivate them at least they are doing something if you don't like then ignore but don't downvote them their effort's are on another level!

If I had disliked his content on youtube then what you say would be appropriate. But he had promoted his content here (twice) which I didn't like and only thing I can do to show that is downvote.

I completely dont agree with the claim that a division 2 user shouldn't post explanations. Before you downvote, let me explain why.

Firstly, some people are doing other websites than codeforces and have good ratings there. There are different systems, some speed based like cf, some more OI style.

Secondly, better rating does not imply better explanation hust as worse rating does not imply worse explanation. Some people are very good at explaining solutions to things they solve and even if they are not high rated it's a good source.

Third, I believe that a good editorial explains how one would approach a problem. Some things that are considered trivial by div1 users are not considered trivial by a div2 user, and perhaps an explanation from a div2 user would be more practical for a div2 user.

Fourth, some people use alts here or simply were high rating then dropped.

Fifth, about the "waste of time", as Feymann once said, the best way to check you understand something is to make someone else understand it aswell, and it also helps so why not?

Sixth, some people are just slow but are able to solve high rated problems, you shouldnt be ratist in general, not just in this case.

Seventh, as a specialist you dont have the right to make ratist arguments :)

If you have balls, I offer you a 2300 codeforces duel, now stop demotivating the community and start solving more problems :)

If you downvote, the least you can do is reply with a valid counter-argument, otherwise you are just ratist :)

Edit: So apparently, atleast 7 people saw rating and downvoted without a counter-argument. Either write one or duel me on codeforces. Some IOI medalists dont even do codeforces. You gave zero thought, saw blue (was CM before btw) and immediately downvoted. Very mature.

https://mirror.codeforces.com/contest/1399/submission/89052941

Q-3 Can someone tell me what's wrong with this solution ?

In the first for loop you are doing sum[a[i]+a[j]]++ which is wrong because lets say there are 3 people with weights {1,3,3} In the first iteration , you are using the zeroeth person thrice (when (i,j) is (0,0) (0,1) (0,2)). In the first iteration itself sum[4] will be 2 which is not possible since if the zeroeth person made a team with the first person for sum=4, he cannot make another team with the second person. As a person cannot be in 2 teams, this approach fails.

I did that in the last attempt sir, previously i used for(j=i+1;j<n;j++) but this also resulted in WA. The logic behind using that is in the first test case example : 1 2 3 4 5 6 -- The maximum weight can be 5 or 6. I am just increasing the frequency of the weight that can be achieved by maximum number of pairs. So that in the next loop i can select only those pairs which have weight equal to the weight with maximum frequency.

Yes, you have to select the weight with the maximum frequency. But the way you are creating the sum array is wrong because that loop is running n^2 times and creating almost n^2 pairs. The problem is that when you hash them to the sum variable, you are using one person more than once (in different pars), which leads to a wrong sum array.

what if I change the second array to for (j=i+1;j<n;j++) ? Will that do it?

Nope. AFAIK you cannot create a hash sum array for this question. You have to create a freq array of numbers and then run a loop from (2 to 2*n) and then search on the freq array for the sum, as given in the editorial.

I want to know the approach of E1 if it is slightly changed so that we have to find the minimum number of operations so that distance between the root and every leaf is less than equal to S.I have an approach in mind to this problem but I'm not sure if it would work. Can anyone please help me out. UPD : vovuh can u please provide your crucial insights to this.

Please tell me what's wrong in my E1 solution

I also did like this only. The only mistake I spotted in your code is that in your priority queue, you're arranging the edges by (weight * nx) [nx = no of leaves for this edge contributes to the sum].

Instead, it should be the reduction we can get if we make our operation in it, i.e. (weight * nx — (weight / 2) * nx).

Example to prove the contradiction, let's say weight1 = 7 and nx1 = 3, and weight2 = 3 and nx2 = 7. If we do the operation on 1st one, 7*3 = 21 will reduce to 3*3 = 9, hence the deduction is 12. On the other hand if we do on 2nd one, 3*7 = 21 will reduce to 1*7 = 7, hence the deduction is 14.

But in your code, 1st one will be operated. Hope you understood.

My submission — 89053607

Thanks so much!!!

Wait, if I arrange like this {weight * nx, nx,weight} why it's not correct again? then {21, 7, 3} has a higher priority than {21, 3, 7}. tbh, i didnt really consider this case, but i think my code does not fail here.

I would like to know, why ordering by weight*nx does not work? Thanks in advance.

See I've explained a contradictory case [{21, 7, 3} and {21, 3, 7}] in my comment above.

but if the second element in this triple is nx, 3, 7 and 7, 3 is not a contradiction. See my comment above.

No, it won't work like that. You've to keep the parameter which actually matters after doing the operation, i.e. the deduction it is making.

I can give contradictory cases to fail this too.

For example, weight1 = 8 and nx1 = 9, & weight2 = 9 and nx2 = 8. You say order by nx, then 1st one will be chosen, hence 4*9 = 36, the deduction will be 36, whereas if you take 2nd one, 4*8 = 32, the deduction will be 40.

Finally got it, Thanks.

For F, we also have a more memory efficient solution with the same time complexity.

First, let's sort all the segments in order of increasing segment length. Now, let $$$dp[i]$$$ represent the maximum number of segments we can take (including the current one), such that all of them are completely covered by the $$$i$$$th segment and they abide by the given constraints. To compute each $$$dp[i]$$$, we only need to consider segments that are completely covered by it (by definition). Notice that these segments must have length strictly less that the length of the $$$i$$$th segment, so we already know all of their $$$dp$$$ values. Therefore, to get the value of $$$dp[i]$$$, we will run normal segment dp on the set of covered intervals (finding the largest set of intervals such that no two overlap, where the size of an interval is its $$$dp$$$ value instead of just $$$1$$$). For each segment, computing its $$$dp$$$ value takes $$$\mathcal{O}(N)$$$, so our final time complexity is $$$\mathcal{O}(N^2)$$$. To compute the answer more easily, we can insert an imaginary $$$N+1$$$th segment with endpoints $$$[-10^9, 10^9]$$$, with the final answer being $$$dp[N+1] - 1$$$. The advantage of this solution is that we only need to consider the $$$\mathcal{O}(N)$$$ segments that are covered by segment $$$i$$$ when computing $$$dp[i]$$$, so our memory complexity is only $$$\mathcal{O}(N)$$$, which is easily within $$$256\textrm{MB}$$$.

Tried implementing a similar idea to achieve $$$O(N)$$$ memory complexity but my solution is TLEing at the last test case.

Here's the idea: Sort according to the end points of the segments and let $$$dp[i]$$$ = the maximum number of segments you can pick considering only the first $$$i$$$ (in the sorted list).

The transition involves trying to pick the $$$i^{th}$$$ segment, which requires running exactly the same dp on all subsets of the $$$i^{th}$$$ segment. Then compare this to not picking the $$$i^{th}$$$ segment, which corresponds to just considering the first $$$i-1$$$ segments.

I'm not sure of the time complexity, but I believe it should be $$$O(N^2)$$$. Would appreciate if somebody double checks this/gives ideas for optimization.

Here's the submission: 89072684

Commented Code:You're copying vector everytime you call it, pass it by a reference.

That fixed it, thanks!

Can you explain how to find this

finding the largest set of intervals such that no two overlap

Please see here.

We can sort the segments by their

endpointsand run a dp, wheredp[i]= Maximum value such that ith segment is included or not included in the optimal answer.For the transitions, we can use

binary searchto find the last segment which doesn't overlap with ith segment, let's call it prev.So,

dp[i]=max(dp[i-1],value[i]+dp[prev]).My code in Java: 89147950

How do you calculate value[i]? Edit:- Never mind, got it. Using a different sort for finding value[i].

My simple solution for C:

Let's create a function to get the maximum number of teams with weight m. In the function we iterate over all pairs of numbers and if their sum is equal to m and the numbers were not used before, mark the numbers and update the answer. Next, let's go over all possible weights. Due to small restrictions, such values are few. The answer to the problem will be the maximum of all such values

88999789

Initially I misread problem E. I thought of a different version for the problem and wasted 30 min of my time. The problem I thought of was to print the minimum number of operations so that max(w(root,v))<=S where v belongs to the set of leaves of the tree. Is this problem solvable? If yes, then can somebody suggest a solution for this version of the problem?

Haha I actually made the exact same mistake (then they came out with the clarification and I felt like such a fool... not the first time I've misread a problem). Didn't make much progress on that though.

I attempted problem C while taking the input in a vector (https://mirror.codeforces.com/contest/1399/submission/89066257) and in an array (https://mirror.codeforces.com/contest/1399/submission/89066399). The vector approach gives me a TLE but the array approach passes all test cases within time limit, I am allocating the memory at the start in a vector and not even using the push_back() function, is there any particular reason for this?'

You don't handle n=1 correctly and read outside of array bounds leading to strange values of s1 and s2. Because the array and vector have different sizes on the stack the memory read by a[1] and a[n-2] is different between your solutions so you end up with different behaviour.

My answer in the checker is correct, but

I am getting the following comment from the checker: wrong output format Expected int32, but "7882141163342456" found (test case 1)

https://mirror.codeforces.com/contest/1399/submission/89012361

your ans is not correct

See the jury's answer and contestant's answer, they both are same. And in the correct submission after that, I did not change my logic, just changed long long int to int

huh, 1 2 3 1 1 4 5 6 7 8 1 1 9 10 11 1 1 1 1 1 1 12 1 1 13 14 15 16 1 2 1 2 17 7882141163342456 7882141163342456... and 1 1 2 3 3 3 4 5 6 7 8 8 8 9 10 11 11 11 11 11 11 11 12 12 12 13 14 15 16 16 15 15 16 17 17 17 18 19 19... are exactly the same.

Oh sorry mybad, I misread something. I was checking only the 100002 part. Sorry for the hassle

What a fast editorial! Really helpful; Thank you!

Why is the complexity of a heap query in E1 log(nlogw) and not just log(n)? The number of items in the heap is constantly n so why is it not log(n)?

Does anyone have any other approach for E2

Before all, we calculate the number of leaves in every subtree using DFS. The contribute of the lowest edge between the subtree's root and the root is the number of leaves in the subtree multiplied by the edge's weight. Recall the contribute of one move is the diff of its edge's contribute after move. For example, for an edge of weight 5 whose subtree has 3 leaves, its contribute is $$$5*3=15$$$, and the contribute of its first move is $$$5/2*3-5*3=-4$$$.

We use a multi-set/priority_queue or other data-structure containing all available moves sorted by its contribute in non-decreasing order. When we do a move, we push the next move in data-structure (weight divided by 2). If two moves' contribute equal, which first? We first take the one of which next move has lower contribute. If the next move equal, we check the next of next, and so on. But this take too long time($$$\log W$$$), we have better method. If two moves' contribute equal, either their's

`weight - weight/2`

equal and the leaves number of their's edge equal or any one's`weight-weight/2`

equals the other's leaves number. First case, one has large weight will always win. Second case, one has little weight will always win. You can prove it easily.There're exactly 2 cases of the answer, odd or even. If the answer is even, we can think two one-cost-move as one two-cost-move, and do this process greedily. If the answer is odd, we can take a one-cost-move that has lowest contribute. This is my submission: 89086737.

I'm not sure whether it correct, although it get Accept before system test.

Nice approach I really liked it thanks.

And I think there's another approach. Do the process in all one-cost-move, and in all two-cost-move separately. We can get the minimal contribute (contribute is non-positive) of every possible cost using only one-cost-move, and the minimal contribute of every possible cost using only two-cost-move. And enumerate the number of one-cost-move, find minimal number of two-cost-move and update answer.

So as in E1 we calculate cost 1 edges and 2 edges using same approach and then find the answers separately and then try minimising 1 cost answer by keep on replacing 3 of the 1 cost moves with 2 cost moves?

sorry, I haven't tried E1...

E1 is just E2 with cost of every edge is 1

Iam sorry if someone felt offended by my comment. But I didn't have such intentions. I don't get why people down vote others comments. I am not spreading rumours or criticizing someone or putting something which hurts someone. I am really trying my best to grow and help others to grow along with me. Do you all know how ugly it would be if anyone including my recruiters who open my profile and see a negative contribution. Guys we are members of the same community please try to help others not critisize them. If anyone felt offended by this message iam sorry again.

Can you please share your approach for E1

At first we have to infer the following from the question.

So once he have the above intuition we can decide to calculate no of paths a particular edge is in using simple dfs traversal. Then we have to choose a data structure which has the following properties

So from the above properties we use Max-heap (priority queue) in c++.

We then insert every edge into priority queue

First parameter being the cost to operate on edge

i.e,(E/2) *N where E is the edge weight and N is the number of paths in which the edge is present

Second parameter being cost

Third parameter no of paths.

We keep on doing the following operations until effective cost is less than s(given in the question).

remove maximum in the heap

perform Operation on it

insert updated edge

here is the link to my submission

hey thank you for replying : https://mirror.codeforces.com/contest/1399/submission/89208650 (I am getting an TLE while using set() to accesing the diff and AC when using getdiff(id) ) Can you please explain Why AC:https://mirror.codeforces.com/contest/1399/submission/89208760

Don't know sorry

My video solutions to all problems + screencast (where I placed 17th? place). Sorry for uploading it that late (currently slow internet connection)

Enjoy watching.

https://youtu.be/yWD-Hcl_0Sk

UPD:After a system testing, I'm 15th

thank you so much

Why editorial before closing hacking phase?

hacking doesn't effect rating in div 3. so people can learn to hack here.

Well not exactly. If you successfully hack submissions of those better in the ranking, then you get a lower rank and subsequent increase in rating. But yes, there is little difference (unless there is massive hacking for a particular question), because unlike Div 2, we don't get points for successful hacking...

thanks for the info

Doesn't matter if someone with a faulty solution is not hacked because it will eventually fail the system testing.

Can someone help me with Problem D — my solution

Where do you want help here, it's completely incorrect.

why this hack is giving invalid input for problem c?

You are probably using this code on some online compiler, copying the output from there and then pasting it as manual input. But online compilers doesn't really give you complete output for such big codes, they probably stop at around 500th test case among your 1000 cases (you can check that by writing

cout<<i<<" "<<50<<endl;).So instead, compile it on your pc (eg: codeblocks) and then use files, get output onto a file, submit that file.

Also notice that the code you want to hack which gave TLE on some online compiler(due to incorrect input), may not really give TLE.

i am using codeforces generator.

https://mirror.codeforces.com/contest/1399/submission/89071282 Problem E1 Giving TLE in test case 2 approach same as that of editorial can anyone explain .why?

Submission : 89071344

Image : (https://ibb.co/x1sGJ18)

The submission, I have made for 1399E1 - Weights Division (easy version), gets AC. However, as I run it on my Sublime IDE, I get the following (error? maybe, I'm not sure exactly) :

Error(You can also see the image link).

I had a similar error, during the contest and after I decided to upsolve. I feared a WA, so I didn't try submitting, but could not debug. Now I have made my solution similar to the author's, but the issue still persists. Could someone take a look? I have not cleared any

`vectors`

, neither is my pointer invalid for the`set`

, so I'm not sure, what`free(): invalid pointer`

means.P.S.It was a nice problem-set, really enjoyed it!

Try compiling with the flags

`-g -D_GLIBCXX_DEBUG`

. It will catch most common errors in STL containers and provide some helpful information about them.In this case, you are trying to dereference an iterator after you just erased its contents from a set. The error produced by the debugger is "Error: attempt to dereference a singular iterator.".

Thanks a lot for the reply. I'll definitely keep this flag in mind. However, that was not what was causing the error for me.

Why?I simply commented out the while loop part and the printing of the answer part. The error was still there.

But, after doing some work, I finally found it. It was in the DFS function I had written.

With ErrorWithout ErrorSo

`cnt[-1]`

was the issue. I'm not sure if this should cause an issue in some testcase, because the`editorial`

has same code.Please help,why is it getting WA (Problem D)? https://mirror.codeforces.com/contest/1399/submission/89007537

Can anyone please help me in explaining,what is the thought process behind D like how are you able to arrive at the solution?

Well, I haven't read the editorial yet, because I got an AC during the contest itself, but here is how I approached the problem.

So, we got a string of

`1s`

and`0s`

and the length is n. The task is to make partitions, i.e. divide it into subsequences which are either of the form`10101..`

or`01010..`

.So let me assign group

`1`

to my first character. And simultaneously, I'll try to decide what is the next character my group`1`

needs.ObviouslyIf s[0]='1' this means my group 1 will need a 0, otherwise it'll need a 1.

Now here comes the second element. I just need to check, whether it'll fulfill the

`hunger`

of group 1 (that is it gives the required element), or whether it'll just make another group 2 of it's own and wait for a suitable match. This is the way, we need to do for all the rest of the positions.Now to solve it effectively, I just need to maintain the group numbers for those waiting for

`0s`

and`1s`

, and so I'll need a`map`

and I'll make a map of`sets`

, because of it's`O(n) time insert and delete`

(I'll need these operations, because the groups will keep swapping there needs between 0s and 1s).My Submission : 89003229

Congratulations on great editorial!

In problem E1 I got tle on test case 2 when used set and after replacing set by priority queue it got AC in 0.2 sec.

Can somebody explain it clearly how too much time difference

TLE code- https://mirror.codeforces.com/contest/1399/submission/89021834

AC code- https://mirror.codeforces.com/contest/1399/submission/89025348

Upd: faker_faker thanks got it!!

`set`

doesn't have duplicates, so you go into infinite loop.AC on making it

`multiset`

: 89075373Greedy solution to E2 using two priority queues (PQ1 for cost 1 edges, PQ2 for cost 2 edges). My while loop logic (sum > S) was (select 1 of the following):

1) If decreasing the best cost-1 edge is sufficient, do so and end immediately.

2) If decreasing the best cost-2 edge is sufficient, do so and end immediately. Note doing this check after check (1) is safe, since if (1) failed, we need to pay at least 2 in any case.

3) Easy case: if either of the priority queues is empty, you have to choose the one that has edges.

4) Determine the reduction in sum (call RED1) if you choose edges from PQ1 twice. (Edge case: remember that the second edge chosen might be the same as the first edge, after reducing by half). Determine the reduction in sum (call RED2) if you choose one edge from PQ2. If RED1 >= RED2, then perform the operation on the best edge in PQ1. Else perform the best edge in PQ2.

Submission: https://mirror.codeforces.com/contest/1399/submission/89076498

In (4) is it not sufficient to check if RED2/2 >=RED1(here red1 is choosing from pq1 once) and if it is true perform operation on pq2 else on pq1 ? can you give a counter example of it?

The proof for my original logic seemed more straightforward whereas I couldn't quickly reason why your alternative (which I also considered) would work. And indeed, I get WA when trying the alternative.

Unfortunately, I can't seem to think of a (small) counterexample.

It turns out that this alternative is also correct. I tried this approach and got WA on test 21(like you). I also managed to prove it but still something was wrong. Finally, after a few hours of analysing this problem I found out that I(and you) was missing a one little case when the last edge you are decreasing is a cost-2 edge and you can undo the last cost-1 edge-decreasing. Hope that helps :D

On the standings page, what is the difference between a “+” and a “+1” for a problem?

Does anyone have a working python solution for E1?

Why 89023478 got a TLE?(Solution to D) Can anyone tell please?

You are erasing the first element of vector.

`vector.erase()`

has a time complexity equal to the length of the vector because when you delete the first element you have to shift the entire vector to the left one position. As you're calling`erase()`

multiple times inside a loop, when the vector length becomes large, you get TLE. Instead of erasing the first element, you can erase the last element using`pop_back()`

which has a constant time complexity because the order in which you insert '0' and '1' in the existing subsequences doesn't matter (as long as you're maintaining minimum number of subsequences).Thank you bro! I couldn't find why! Thanks a lot!

In problem D,each character of the string belongs to exactly one subsequence. In the second line print n integers a1,a2,…,an (1≤ai≤k), where ai is the number of subsequence the i-th character of s belongs to. Ex: 4 0011 Ans: 2 1 2 2 1 I don't understand where 1 2 2 1 (second line print) come from. You divide 0011 to 01 and 01 ( 2 subsequence ) and each character should belongs to 1 subsequence. So should it print 1 1 1 1 instead of 1 2 2 1? Am I misunderstood something?

.....where ai is the (index) number of (the) subsequence, the i-th character of s belongs to.

Even if it wasn't clear to you, it was kinda obvious :)

Actually here, all the characters(0 or 1) are belong to exactly one sub-sequence. So, from your point of view the result should always be 1 1 1 1... right? But no, actually here it's meant that if there are n sub-sequences, number them from 1 to n. then print which character belongs to which sub-sequence. For example: 4 0011 has 2 valid sub-sequences, let number them as 1st sub-sequence and 2nd sequence. So let first 0 is belong to 1st sub-sequence, then 2nd 0 should obviously belong to 2nd sub-sequence. Now, first 1 can either be belong to 1st sub-sequence or 2nd, and the last 1 would be in the alternate sub-sequence. So result can either be 1 2 2 1 or 1 2 1 2 or 2 1 1 2 or 2 1 2 1. I think you have got this now, right?

Regarding problem F, I can't figure out how does it mean. The first example↓, why the output is 3? In my understanding, it should be 1, because only (2,3) or (2,4) can be chosen. Otherwise, we will have the intersecting, ex: (2,4) & (2,3) have intersecting.

`input: 4 1 5 2 4 2 3 3 4 output: 3`

Is there anyone can help? THANKS.`Your task is to choose the maximum by size (the number of segments) subset of the given set of segments such that each pair of segments in this subset either non-intersecting or one of them lies inside the other one.`

what does this statement mean? so confused. ><

This test in E1 problem 10 28 8 2 8 5 1 4 6 1 10 10 2 7 7 2 1 9 2 1 2 1 5 4 1 9 3 2 5 Why output is 7 ?

Can someone please explain how the answer for this test case is 7 instead of 8??

The methodology I seem to get is :-

Can someone explain as to how is it possible to do any better than this ? Thanks in advance!

Can Anyone explain the proof of solution D? I didn't get the proof given in the editorial.

Can anyone tell will this work in E2?? In E2 can we maintain two multisets ,say one for overall costs and second only for edges with cost 1.Now we proceed similarly to E1 ,when we will encounter a edge with cost one we will simply include that in answer and continue. If we encounter a edge with cost two ,we will check for best two edges with cost 1 ,if we get better result than the cost 2 edge we will include thiese two otherwise cost 2 will only be taken.

I think it will work. I am applying same thing but storing only edge with cost 1 in priority_queue and cost 2 in other priority_queue.

Yes, It worked.

RIP C :(

Can somebody tell me a test-case where my solution for D fails ? Link to solution : — https://mirror.codeforces.com/contest/1399/submission/89049595

Try this input:

Correct output:

Your output:

I'm just trying to help. pks18 and Greatest789, in such cases, where you want to find a test case, you can use

`stress testing`

with an AC solution.Take a look at this blog

Since yesterday I have been reading the question incorrectly. Until I saw the tutorial, I thought it was like that

"Find minimum numbers of moves such that sum of weights of paths from root to every leaf is less then or equal

`S`

"Suppose root is

`a`

and leaves are`x`

and`y`

. Distance from`a`

to`x`

is`p`

and`a`

to`y`

is`q`

.`p`

and`q`

should be less then or equal to`S`

but`p+q`

can be greater then`S`

.Example5 50

1 3 100

1 5 10

2 3 123

5 4 55

ans is 7

By the way, how to solve this?

Can someone explain what's wrong with my code? (Problem E1)

https://mirror.codeforces.com/contest/1399/submission/89052647

I had used a max heap instead of ordered set but my approach was the same as that of the editorial. My solution gave TLE in Test 5.Is heap slower than set or did I do something wrong? Soln. Link: https://mirror.codeforces.com/contest/1399/submission/89023880

Heaps are actually faster then sets, your solution is giving TLE, as you are passing graph(vector g) by value, which increases the time complexity. Pass Graph(vector g) by reference or make it a global variable.

can someone help me with 89027795 . I got TLE on test case 16 while in custom invocation its showing only 30 ms as well in my pc .

I have a very different solution to E2 (no binary search, only linear DP). You can read about it here:

SolutionI can try to explain and prove my solution ^_^

Let's define impact of an edge as $$$(w-\left\lfloor\frac{w}{2}\right\rfloor)*l$$$, where $$$w$$$ is the current weight of that edge, and $$$l$$$ is the number of leaves in the subtree of that edge. So w occurs in

`sum`

$$$l$$$ times.Now let's find out, after performing, say, $$$i$$$ moves on edges with $$$c==1$$$ and $$$c==2$$$, the maximum sum of impact we can obtain. The resultant

`sum`

obviously decreases by sum of impact.Now, we do dp. $$$dp[i].first=max(dp[i−1].first+b1[dp[i−1].second.first+1],dp[i−2].first+b2[dp[i−2].second.second+1)$$$ where, $$$dp[i]=$$$ {maximum sum of impact for cost $$$i$$$, {number of moves with $$$c==1$$$ type edges, number of moves with $$$c==2$$$ type edges}}

$$$b1[j]=$$$ maximum sum of impact with $$$j$$$ moves of $$$c==1$$$ type. Similar for $$$b2[j]$$$.

Now an insight: The number of moves is bounded above by $$$nlog(nWmax)≤50n$$$ for the given constraints.

So now we can just do linear search and output the answer just when

`sum`

$$$−dp[i].first$$$ becomes less than or equal to $$$S$$$.Even though DP relation is intuitive, here is the proof:

Obviously, the

Unable to parse markup [type=CF_MATHJAX]

or $$$2$$$ type, so we max over those. Now, lets say for $$$1$$$ type, we do not take the $$$(dp[j−1].second.first+1)^{th}$$$ $$$1$$$-type move:UPD: A part of the previous proof was wrong. Here is the complete correct proof for the DP recurrence.

Structure theorem: Let number of $$$1$$$ and $$$2$$$-type moves $$$(n1[i],n2[i])$$$. Then for adjacent elements

$$$|n1[i]−n1[i−1]|≤1$$$

$$$0<=n2[i]−n2[i−1]<=1$$$ (can be proved by easy induction)

From here, we can easily compare the optimal solution and the solution of our DP+Greedy.

can someone tell me the testcase where my code is failing for E1

link :- https://mirror.codeforces.com/problemset/submission/1399/89096473

well it is failing at 22nd case of 2nd pretest but it is not visible so please help me out

Heyy, you need to use a custom comparator in your priority queue so that your top element is always the one that results in max decrease in the sum.

`class Compare { public: bool operator() (pair<int, int> a, pair<int, int> b) { int x = a.second/2; int y = b.second/2; return a.first * (a.second - x) < b.first * (b.second - y); } };`

Here the 1st element of the pair is the no of leaves and the 2nd element is the weight.

hello everyone!

Solutions of C given is in O(n^2). This can question can be solved in better complexity? If you have any idea suggest

No, I don't think so. We positively need to iterate over all possible values of sum, and for each sum, we need to find how many groups we can make. And after that, we'll find the maximum number of teams. So, the order has to be

`O(n^2)`

.how to install c++17 in local computer

If you're in Linux and you have the appropriate GCC version (>5), you can just use the flag

`-stdc++=17`

to use c++17.no i'm a window user

Install a higher version of Mingw on windows. The lower versions don't support C++17.

For Problem F, why the following greedy algorithm is not correct (WA on test 2):

Code is here: https://mirror.codeforces.com/contest/1399/submission/89045515

problem D why my submission giving sometimes TLE and sometimes WA. https://mirror.codeforces.com/contest/1399/submission/89094614

you can see this test case says it all. 2 set was starting from 0 and your code is giving the next element as 0 also in the second subsequence.

yup I got it thanks

here is an alternative solution to problem C, simple brute force for all possible s, and then using 2sum to calculate.

vovuh Can you look into my submission where it is giving WA or can you provide me a test case where it is failed?

Easiest DIV3 Ever

Yes easiest when you cheat and your solution gets skipped ;)

Nice reply LoL.

Problem E1testcaseHow does this can be done in just 12 steps,

Maybe you are not taking the difference as the factor to decide which edge should be halved first.

Sorry if this is a stupid question, but can anyone tell me if my method of finding leaves is incorrect. I am just trying out all those elements whose adj[u].size()==1 Here is my function input, and check - vector<vector> adj(n+1); for(int i=0; i<n-1; i++) { int v, u, w; cin>>v>>u>>w; adj[v].pb(u); adj[u].pb(v); } for(int i=1; i<=n; i++) { if(i!=1&&adj[i].size()==1) { leaves.pb(i); } }

For n=2, it will consider both the vertices as leaves. In fact, for any tree where the root has only 1 degree will have it as a leaf. Ask yourself if that's what you really want or whether this makes a problem in your implementation (whatever you want to do with the leaves). You can start from 2 as you know that 1 is root. But yes, other than this corner case, your logic is ok.

Can anyone explain what is wrong in this prob-D I am getting wrong on (tc 70) of 2 test case:- https://ideone.com/yXMA6y

Same problem. I am getting answer as 4. While the jury's answer is 3. https://mirror.codeforces.com/contest/1399/submission/89116568

If anyone need detail explanation for D Here

The time limit for D is 2s, so can O(N^2) solution work for D? My solution for D

No, O(n^2) would work if n was around 10^3.

But the time limit for D is 2s, so shouldn't the cap be 10^10 approx?

Not at all. Your logic is correct if 2*(10^5) = 10^10 . lol

Can someone explain the meaning of this line of code given in the solution code for problem D. What is auto [to,id]? Are we iterating over pairs corresponding to g[v]? for (auto [to, id] : g[v]) { if (id == p) continue; dfs(to, id); if (p != -1) cnt[p] += cnt[id]; }

Yes, we are iterating over pairs corresponding to g[v].

I couldn't solve C because I was searching for the value of S rest of the time but I wrote my algorithm well...I didn't read the limit of the problem constraints well and feeling bad about it when it was accepted after just adding an extra loop now.....

## include <bits/stdc++.h>

using namespace std; typedef long long ll; typedef vector vi; typedef deque di; typedef pair<int,int> pi;

## define pb push_back

## define mp make_pair

## define mh make_heap

## define pf push_front

## define IOS ios::sync_with_stdio(0);cin.tie(0);cout.tie(0);

## define INF 1000000000

## define FOR(i,a,b) for(ll i=a;i<b;i++)

int main() { //freopen("contest.txt","r",stdin); //freopen("contest.txt","w",stdout); IOS ll t,n; cin>>t; while(t--) { cin>>n; ll a[n]; FOR(i,0,n) { cin>>a[i]; } sort(a,a+n); ll sum=0; ll best=-1; for(int index=2;index<=2*n;index++){ ll l=0,r=n-1; while(r>l) { if((a[l]+a[r])>index) r--; else if((a[l]+a[r])<index) l++; else { sum+=1; l++;r--; }

}

Spoiler...

Spoiler...

In a fair world you would have got 100 extra penalty for bad formatting.

Actually after pasting the code the fonts have changed like that along with the spaces and lines.Why is my format not good?

There are several options. Above the editor box there are some symbols, click them. A useful one is called "Spoiler", another one "Block".

This is a spoilerInitially hidden text. One can use formatting here, too.

vovuh Can you please explain about the time complexity of your solution of F?

In problem 1399C - Boats Competition, the valid range for the smallest distinct weight $$$i$$$, where $$$i < s-i$$$, could have been written as $$$[\max(1,s-n),\lfloor\frac{s+1}{2}\rfloor-1]$$$. 89178688

In question E1: Why dividing the max w[i]*cnt[i] does not work?

hey firstly nice thinking, but it will give incorrect output as we are not just dividing weight but also finding its floor.

here is an example.

Lets w[1]=2, cnt[1]=11 , and w[2]=5 , cnt[2]=4;

according to your approach: (2*11)-22/2=11, (5*4)-20/2=10 , as first one is greater therefore we will use it.

But actually:

w[i]*cnt[i]- floor(w[i]/2)*cnt[i] =>

(2*11)-(1*11)=11, (5*4)-(2*4)=12

here actually second one is greater. hopefully you understand, and got the point.

if my code was passed during the contest how can u give tle later? i knew a different method with sets which would have passed but u gave me AC which should have stayed as it is. giving WA after the hacking period is totally ok but TLE is not pls look into it

Initially I got TLE in E1, after long long(hehe) search it turned out that I was(by mistake) using pair<long,long> instead of pair<long long,long long> in my PQ and looks like it's responsible for a huge runtime increase(900ms vs TLE).

Is just type conversion responsible for this?AC: 89159051

TLE: 89159067

only difference between these two is PQ data type

Hint: 89162649 is your submission resubmited with an assert. Can you see what the problem is?

Thanks for response,

I can now see that 0's coming out of PQ are resulting in TLE but I'm not sure whether it's a coincidence that they are 0's and not some random negative numbers as is usually the case with overflows.

Hint: 89166892

Try to guess why the 0's are coming. Experiment.

Can D be solved without any extra space pos0 and pos1?

I think the time complexity in E1 and E2 needs checking. The heap (or any other container) in E1 has only $$$n$$$ elements, corresponding to all the nodes of the tree, and the loop runs $$$O(nlogw)$$$ times. So the complexity should be $$$O(nlogw logn)$$$. In E2, though, the heaps

DOhave $$$O(nlogw)$$$ elemnts, and they run $$$O(nlogw)$$$ times as above. So the complexity is $$$O(nlogw log(nlogw))$$$.When I started writing this comment, the complexity in E1 was wrong, E2 was correct. Now I see that E1 has been fixed,

ANDcopied on E2 as well which made E2 incorrect. Am I missing something?The current complexity for both problems is correct. You have a good explanation for E1, and the explanation of E2 is obvious too. We run the same simulation as in E1, which runs in $$$O(n \log{n} \log{w})$$$ (because the worst case is the same and it does not depend on edges costs) and then do two pointers on

arrays, not heaps. These arrays really contain $$$O(n \log{n} \log{w})$$$ elements both but we process them in linear time of their length.Oops. I forgot about the 2 pointer part. I was just mimicing the E1 solution. Thanks.

In solution for problem F Yet Another Segments Subset if you modify the line

if(l>r) return dp[l][r];

to

if(l>r) return 0;

and make it the first line of cal(int l,int r)

then we will not need those ternary operators later on!!

Can someone tell me why 1LL was multiplied in the problem E1?

Can someone please explain solution of question F? I am not able to understand it after so many tries.

Problem E2: In the 3rd test case of the input:

`2 100`

`1 2 409 2`

Why is the answer "6"?

For 1st coin, 409 -> 204 -> 102 ->51 . This takes 3 steps and similarly, 3 steps for the 2nd coin gives 51 and summing both give 102 which is greater than 100.

Can anyone explain what am I missing?

Why is my solution using multisets giving tle(on test case 2) in problem E(easy version)?

89485849 1399E1 - Weights Division (easy version).

Using priority queues is NOT helpful either as it is giving tle on test case 5.

D using Binary Search

Spoiler...#include<bits/stdc++.h> using namespace std;

## define speed ios_base::sync_with_stdio(false); cin.tie(NULL)

## define ll long long int

## define ul unsigned long long int

## define PI 3.14159265

## define mod 1000000007

int main() { int t; cin>>t; while(t--) { int n; cin>>n; string s; cin>>s; vector v(n,-1); set o ,z; for(int i=0;i<n;i++) { if(s[i]=='0') z.insert(i); else o.insert(i); } int cnt = 1; for(int i=0;i<n;i++) { if(v[i] == -1) { // cout<<i<<'\n'; int a = i; v[i] = cnt; if(s[i] == '1') o.erase(o.find(a)); else z.erase(z.find(a)); char c = s[i]; while((c == '1' && z.upper_bound(a) != z.end()) || (c == '0' && o.upper_bound(a) != o.end())) { auto it1 = z.upper_bound(a) , it2 = o.upper_bound(a); if(c == '1' && z.upper_bound(a) != z.end()) c = '0', a = *it1, v[*it1] = cnt, z.erase(it1) ; else c = '1', a = *it2, v[*it2] = cnt, o.erase(it2) ; } cnt++; } }

return 0; }

What is the purpose of the line:

cerr << cur << endl;

In the solution of E1?

`I saw Geothermal got some memory issues because of using map`

He was not the only contestant!

Hi! For problem E2, my idea is: for edges cost 2 coins, it should be more expensive than the edge with similar weight to it, but cost only 1 coin. So I tried to maintain a set, which contains {g(i) , i} , where g(i) = (3 — c[i]) * (w[i] * cnt[i] — w[i] / 2 * cnt[i]) , where 1 <= c[i] <= 2 , w[i] being the weight of the i-th edge, and cnt[i] being the number of leaves under the subtree of the i-th edge. What my code does is, whenever the current sum is greater than S, we should take the largest pair in the set, sum -= (w[top] * cnt[top] — w[i] / 2 * cnt[top]) and erase {g(top) , top} update w[top]:= w[top]/2, and re-insert {g(top) , top} into the set. After implementing this greedy, I failed at test case 21. I wonder why this greedy fails, if anyone happens to be able to give a counter-example to my approach, please let me know! My implementation: https://mirror.codeforces.com/contest/1399/submission/90381838

I think I figured out. if we can finish in reduction of 3, but 2 * 2 > 3 * 1 this is the counter example

Can someone help me to find what's wrong in my code(I also provided a little bit explanation in the code) : 90432879

https://mirror.codeforces.com/contest/1399/submission/93405575

i got TLE on tc 6, but i thought it was O(N log N) , any one can tell me where's my fault

I'm pretty sure it has got to do with the use of erase(). A comment above mentioned this: if you have an array of

nlength and you erase at index 0, you will have to move alln-1 elements to the left by one. This is perhaps why there's TLE.I don't understand how the time complexity is $$$O(n^2)$$$ in

F. Can someone please tell me how the total number of transitions is $$$O(n^2)$$$. Thanks in advance. vovuhIn problem E2 it is also possible to just fix parity of the answer and solve it separately for odd and even result, and in each turn adding a cost of 2 either by the best with the cost of 2 or by two bests of a cost of 1. My code is not the prettiest but just in case I will leave a link to the submission.

Way simpler approach for problem F:

Do coordinate-compression on bounds of segments. We then iterate over segments in increasing order of size. For each segment, we will run O(n) computation which will find the answer for the problem if the required range was that of the segment.

How to compute answer for each segment? Just sort segments by right border and store dp[r] = maximal number of segments we can get in [currentSegmentLeftBound, r]. Transitions are also very simple. Suppose we try to include segment s into selection, transition is dp[s.R] = max(dp[s.R, dp[s.L — 1] + score[s]).

Implementation: link