Idea: BledDest

**Tutorial**

Tutorial is loading...

**Solution (awoo)**

```
for _ in range(int(input())):
n = int(input())
s = input()
print(sum([s.count(chr(ord('A') + i)) >= i + 1 for i in range(26)]))
```

1914B - Preparing for the Contest

Idea: Roms

**Tutorial**

Tutorial is loading...

**Solution (BledDest)**

```
#include<bits/stdc++.h>
using namespace std;
void solve()
{
int n, k;
cin >> n >> k;
vector<int> a(n);
for(int i = 0; i < n; i++) a[i] = n - i;
reverse(a.end() - k - 1, a.end());
for(int i = 0; i < n; i++)
{
if(i) cout << " ";
cout << a[i];
}
cout << endl;
}
int main()
{
int t;
cin >> t;
for(int i = 0; i < t; i++)
solve();
return 0;
}
```

Idea: Roms

**Tutorial**

Tutorial is loading...

**Solution (Neon)**

```
fun main() = repeat(readLine()!!.toInt()) {
val (n, k) = readLine()!!.split(" ").map { it.toInt() }
val a = readLine()!!.split(" ").map { it.toInt() }
val b = readLine()!!.split(" ").map { it.toInt() }
var (res, sum, mx) = intArrayOf(0, 0, 0)
for (i in 0 until minOf(n, k)) {
sum += a[i]
mx = maxOf(mx, b[i])
res = maxOf(res, sum + mx * (k - i - 1))
}
println(res)
}
```

Idea: BledDest

**Tutorial**

Tutorial is loading...

**Solution (awoo)**

```
for _ in range(int(input())):
n = int(input())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
c = list(map(int, input().split()))
def get_best3(a):
mx1, mx2, mx3 = -1, -1, -1
for i in range(len(a)):
if mx1 == -1 or a[i] > a[mx1]:
mx3 = mx2
mx2 = mx1
mx1 = i
elif mx2 == -1 or a[i] > a[mx2]:
mx3 = mx2
mx2 = i
elif mx3 == -1 or a[i] > a[mx3]:
mx3 = i
return (mx1, mx2, mx3)
ans = 0
for x in get_best3(a):
for y in get_best3(b):
for z in get_best3(c):
if x != y and x != z and y != z:
ans = max(ans, a[x] + b[y] + c[z])
print(ans)
```

1914E1 - Game with Marbles (Easy Version)

1914E2 - Game with Marbles (Hard Version)

Idea: BledDest

**Tutorial**

Tutorial is loading...

**Solution (adedalic)**

```
#include<bits/stdc++.h>
using namespace std;
#define fore(i, l, r) for(int i = int(l); i < int(r); i++)
#define sz(a) int((a).size())
typedef long long li;
// S = sum a_i - sum b_i
// if Bob made all steps: then S = 0 - sum (b_i - 1)
// each Alice step: S += (a_i - 1) + (b_i - 1) i. e. the bigger (a_i + b_i) the better
int n;
vector<int> a, b;
inline bool read() {
if(!(cin >> n))
return false;
a.resize(n);
b.resize(n);
fore (i, 0, n)
cin >> a[i];
fore (i, 0, n)
cin >> b[i];
return true;
}
inline void solve() {
vector<int> ids(n);
iota(ids.begin(), ids.end(), 0);
sort(ids.begin(), ids.end(), [&](int i, int j) {
return a[i] + b[i] > a[j] + b[j];
});
li S = 0;
fore (i, 0, n) {
if (i & 1)
S -= b[ids[i]] - 1;
else
S += a[ids[i]] - 1;
}
cout << S << endl;
}
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
int tt = clock();
#endif
// ios_base::sync_with_stdio(false);
// cin.tie(0), cout.tie(0);
cout << fixed << setprecision(15);
int t;
cin >> t;
for(int i = 0; i < t; i++)
{
if(read()) {
solve();
#ifdef _DEBUG
cerr << "TIME = " << clock() - tt << endl;
tt = clock();
#endif
}
}
return 0;
}
```

1914F - Programming Competition

Idea: BledDest

**Tutorial**

Tutorial is loading...

**Solution (Neon)**

```
#include <bits/stdc++.h>
using namespace std;
const int N = 222222;
int n;
int sz[N];
vector<int> g[N];
void init(int v) {
sz[v] = 1;
for (int u : g[v]) {
init(u);
sz[v] += sz[u];
}
}
int calc(int v, int k) {
int tot = 0, mx = -1;
for (int u : g[v]) {
tot += sz[u];
if (mx == -1 || sz[mx] < sz[u]) mx = u;
}
if (tot == 0) return 0;
if (sz[mx] - k <= tot - sz[mx])
return (tot - k) / 2;
int add = tot - sz[mx];
return add + calc(mx, max(0, k + add - 1));
}
int main() {
ios::sync_with_stdio(false); cin.tie(0);
int t;
cin >> t;
while (t--) {
cin >> n;
for (int i = 0; i < n; ++i) g[i].clear();
for (int i = 1; i < n; ++i) {
int p; cin >> p;
g[p - 1].push_back(i);
}
init(0);
cout << calc(0, 0) << '\n';
}
}
```

1914G1 - Light Bulbs (Easy Version)

1914G2 - Light Bulbs (Hard Version)

Idea: BledDest

**Tutorial**

Tutorial is loading...

**Solution (BledDest)**

```
#include<bits/stdc++.h>
using namespace std;
const int MOD = 998244353;
int add(int x, int y)
{
return (((x + y) % MOD) + MOD) % MOD;
}
int mul(int x, int y)
{
return (x * 1ll * y) % MOD;
}
mt19937_64 rnd(98275314);
long long gen()
{
long long x = 0;
while(x == 0)
x = rnd();
return x;
}
vector<int> c;
vector<int> g;
int process_block(int l, int r)
{
int ans = 0;
while(l < r)
{
if(g[l] != -1 && g[l] < r)
l = g[l];
else
{
ans++;
l++;
}
}
return ans;
}
void solve()
{
int n;
scanf("%d", &n);
int size = 0, cnt = 1;
c.resize(n * 2);
g.resize(n * 2, -1);
for(int i = 0; i < 2 * n; i++)
{
scanf("%d", &c[i]);
--c[i];
}
vector<long long> val(n);
for(int i = 0; i < n; i++) val[i] = gen();
map<long long, int> last;
long long cur = 0;
last[0] = 0;
for(int i = 0; i < n * 2; i++)
{
cur ^= val[c[i]];
if(cur == 0)
{
size++;
cnt = mul(cnt, process_block(last[0], i + 1));
last.clear();
}
else if(last.count(cur))
{
g[last[cur]] = i + 1;
}
last[cur] = i + 1;
}
printf("%d %d\n", size, cnt);
c.clear();
g.clear();
}
int main()
{
int t;
scanf("%d", &t);
for(int i = 0; i < t; i++) solve();
}
```

I love long and detailed tutorials which help me understand the problems better. Thank you awoo, BledDest, Roms!

G2 is great.

but difficult

D can also be solved using DP. Let

be the maximum amount of friends we had met after "day" days (ifdp[day][i][j][k], it means that we had met on 1st activity — ifi == 1, we hadn't and so on)i == 0Every day we have one of these options:

if

, we can do 1st activity,i != 1if

, we can do 2nd activity,j != 1if

, we can do 3rd activity,k !=1do not anything, just stay at home

So:

we can go todp[day][0][j][k](dp[day + 1][1][j][k]),dp[day + 1][0][j][k] = max(dp[day + 1][1][j][k], dp[day][0][j][k] + action[0][day + 1]todp[day][i][0][k],dp[day + 1][i][1][k]todp[day][i][j][0],dp[day + 1][i][j][1]todp[day][i][j][k](dp[day + 1][i][j][k])dp[day + 1][i][j][k] = max(dp[day + 1][i][j][k], dp[day][i][j][k]Total complexity:

O(8 * n)my solution: 237985070

God solution

just do bitmask dp kappa who needs 4D dp when you can do it in 2D

Sure, we can use bitmask DP there, but it's DIV3 contest and 4th problem, so I don't think, that every one, who tried to solve this problem, knows what bitmask DP is. My solution is much more easier than bitmask dp

i thought the same but couldn't implement it. Thank you for the insight.

Hey, I used bitmask DP during my upsolving. Please provide valuable feedback, highly appreciated. Here's my submission : 238357460

Looks good) I advise you to input 2D arrays using one more for, like:

It's better, because u don't need to think about "how many time should I input 1D vector". And, of course, it takes less time on writing

Hmm, thnx

hm.. which clowns downvote my comments and why lol

How to solve F ? read the editorial but failed to grasp it properly.

I solved it in an easier (but equivalent) way. Note that the graph of organization is a tree since everyone has exactly one superior. Also, in the BFS tree of the organization, you can always match two people on the same level as they are not superior to each other(there are no cycles). Also, for a lower level node to be matched in an higher level, it has lvl[i] — 1 choices.(with one of them being its ancestor)

Now, you can match two people on the same level or match two people from different levels such that one is not the ancestor of another. You can do this by matching at max lvl[i] — 1 unpaired nodes of the lower level with the nodes of the current level first, and then matching the rest of the nodes in the same level together.

Submission for reference

This can be proved by taking the invariant that we only have the nodes on the same path from a root that are unpaired during this process and then using contradiction.

"This can be proved by taking the invariant that we only have the nodes on the same path from a root that are unpaired during this process and then using contradiction.". Can you please elaborate on the proof?

A node in a higher level has only one ancestor in a lower level. So, we can make cross edges between an unpaired node from a higher level with all except one node in the current level(Step 1). After this, we pair all the nodes at the same level (Step 2).

Now, if there were more than one path from a node to a leaf with an unpaired node, there would be two cases:

If unpaired nodes are at the same level, but this was taken care of in step 2

If they are on different levels, this was taken care of in step 1(Note that one is not an ancestor of another since we have two paths).

Hence, this invariant holds for all levels up to the root. Also, these unpaired nodes occur at the lowest possible level(since higher unpaired nodes are matched first).

Now, if this answer weren't optimal, we would have at least two nodes of this path in the solution. Since they are at the lowest possible level, they have only two choices, like in the above two steps, which leads to a contradiction. Hence, our solution is optimal.

Nice solution. I thought of matching levelwise during the contest but couldn't think of approach on how to handle unmatched node from a level, because it can't be matched with any of it's ancestors. I couldn't think that we can easily handle it like this because atmost one node would be unmatched at a level and it can be matched with any other node on next level other than it's parent node.

In your solution, why do you have to go iterate from n to 2 to find the answer?. Isnt the answer will be only (sum of lvl2 to lvln)/2?

Edit : Lol I think i know the answer. I forgot to put or consider when a node is paired with its ancestor. We have to iterate to handle that case

It won't work. A graph that is like a straight line is one extreme counterexample for that.

G can also be solved using Strongly Connected Components. Using the same logic as the editorial, we need to separate the array into blocks. After we have the array separated into blocks, we need 2 things to compute the answer, for every x (1 <= x <= n) we need to see how far left we can go (ill call that dp_l) and how far right we can go (ill call that dp_r) when turning x ON.

Lets make a graph consisting of N nodes, let first[x] be the first time x appears in the array, and last[x] be the last time x appears in the array. We create 2 directed edges for every node x, one going from x to the leftmost first[j] and one going from x to the rightmost last[j] (where first[x] <= j <= last[x]). this can be done using any RMQ data structure.

By running any SCC algorithm, we can transform this graph into a DAG. After that, its easy to compute dp_l and dp_r by just running two dfs on our DAG, one to maximize dp_r, and another to minimize dp_l

Then, for each group let its left border be L and rightborder R, also let cnt[group] be all the nodes in this group such that it has dp_l = L and dp_r = R. To get the answer, we multiply the cnt value for every group.

for reference here's my submission

Awesome solution! I just would like to note that you don't actually need SCCs, as those 2 edges you create for every node are bidirectional: if you can go from x to last[j] and first[j] (first[x] <= j <= last[x]), you can also go from last[j] to x and first[j] to x.

That's because j is of course inside the interval [first[x], last[x]] and it is guaranteed that last[j] is at least last[x] and first[j] is at most first[x]. This implies the following inequalities: first[x] <= j <= last[x] <= last[j] and first[j] <= first[x] <= j <= last[x], which makes the edges bidirectional.

Omg you're right

Great observation.

I really enjoyed this solution because it displays the incremental ideas and findings I -likely others too- got while working on the problem.

In case anyone is curious about how the solution looks like without the SCC, I followed up by using DisjointSet and merging x with a[left_most] and a[right_most] for every x. Essentially replaced "create 2 directed edges for every node x" with "merge both endpoints with x".

Then it's easy to get dp for each group and compute answer. for reference here's my submission

I use SCC to solve this problem, too. First, create a digraph of n nodes. Then, for every color x, create the edges from x to every color in the interval (first[x], last[x]). That is, connect color x to the colors between them. After the graph is built, run an SCC Algorithm. Also, use a map to find the largest closed interval, and the number we can choose in the interval is the size of SCC. The number of edges may reach O(n^2), which cannot pass the hard version, so we need to use a data structure, like a segment tree, to reduce the edges and keep the SCCs. My submission

I understood the overall logic but could you kindly explain in a bit more detail ?

First, we separate the original array into several subarrays, the endpoints of which can light all the intervals. For example, for array 3 1 2 1 3 2 4 5 5 4, we can separate it into two arrays 3 1 2 1 3 2 and 4 5 5 4, since after lighting 3, all the subarray 3 1 2 1 3 2 can be lit. It can be shown that the separated subarrays here are independent. Then, for each separated subarray, if color v can be lighted after u is lighted, we create an edge u->v in the digraph. That is, we need to create the edges from u to the colors between two lights of color u. In this digraph, if there is a path from u to v, then after lighting u, v can be lit, too. If all the vertices of a subarray are reachable from u, then in order to light all the vertices in the interval, we just need to light u. All the vertices in the same SCC of u can light all the vertices, since they are mutually reachable. Also, the endpoints of an interval (e.g. 3 and 2 in 3 1 2 1 3 2) can light all the vertices, so we just need to find the size of SCC of an endpoint, and the number of lights can be chosen in this subarray is the size of SCC of an endpoint.

Could you explain how are you using segment tree to reduce the number of edges in the graph?

Create a segment tree, and we can consider this tree as a digraph. Each node of the tree is a vertex in the graph, and the leaf represents the original lights. If we want to add edges from u to the vertices in the interval [l, r], just connect u to some vertices on the segment tree, similar to set the lazy tag, which is O(log n) and still keeps the strong connectivity.

I think the definition of superior line is misleading because in clause 2, you are implying x can be a superior of the direct superior. This is saying we can consider a superior only if it is parent (direct superior) or grandparent (superior of the direct superior). I think the line should have been "is a superior of a superior" not "direct superior". I actually spent lot of time trying to solve the case where we can pair a node with its great grandparents and above. Didn't end up solving.

This is a regular definition of a proper ancestor in the tree.

Suppose $$$A$$$ is the direct superior of $$$B$$$, $$$B$$$ is the direct superior of $$$C$$$, $$$C$$$ is the direct superior of $$$D$$$. Is $$$A$$$ a superior of $$$D$$$? According to the definition, then either $$$A$$$ should be a direct superior of $$$D$$$ (which is false) or a superior of its direct superior, which is $$$C$$$. And it's quite easy to see that the second condition is true.

Oh I see, so it's a recursive definition. I read it as "direct superior of direct superior" who is grandparent only. Thanks for clarification should have asked during competition :)

When the difference between sz[mx] and the second largest subtree size is > k, how does the sz[mx] — k <= tot — sz[mx] condition work? Shouldn't subtracting k from sz[mx] change the new max to the second largest subtree size?

Resolved.

Stcodeforce Could you kindly elaborate how do you understand

`sz[mx] — k <= tot — sz[mx]`

condition? I cannot understand it til now :(Assume we are solving the problem for a node u with k already matched vertices in the subtrees of u. We are free to decide which vertices are already matched. After making those decisions, we want sz[mx] <= tot — sz[mx]. In other words, we want sz[mx] as far from tot — sz[mx] as possible.

Suppose we match a vertex that isn't in the subtree mx. Then sz[mx] stays the same and tot decreases which brings sz[mx] closer to tot — sz[mx]. If we match a vertex that does belong to the subtree mx, then tot — sz[mx] stays the same and sz[mx] decreases. This brings sz[mx] farther from tot — sz[mx].

Therefore it's always optimal to match nodes from subtree mx and we arrive at the condition sz[mx] — k <= tot — sz[mx].

Stcodeforce I finally got it. Thanks a lot!

In E, what if ai+bi =aj+bj for some pair? The answer changes on which order we place i and J. How does the editorial handle this? (2,7)&(7,2) when placed in different order will result in different answers.

It gives the same answer. If A = [2, 7] & B = [7, 2]

If Alice picks (2,7) first then we have A =[1, 0] & B = [0, 1], so A-B = 0

If Alice picks (7,2) first then we have A = [0, 6] & B = [6, 0], so A-B = 0

We can also use Kosaraju's Algorithm for solving G1 in O(n²). :)

Like this : 238163692

I solved it using kosaraju in O(n)

we only need 2n edges in our graph the others are redundant

for each endpoint from 1 to 2n, I added an edge from the last range that contains this endpoint to this endpoint's range.

here: 237980086

Oh thanks!! I learned a new thing about graphs today!

Thanks for sharing!

excellent solution. Took me 2 hours to understand this but it's very enlightening how you reduced the complexity of the graph.

can u explain the reason behind deg array , why u incremented for only those edges , which are not in same component ,and to find final ans , u took only those deg whose value is 0??

deg[u] = the in-degree of the SCC u (number of edges that connect another SCC != u to u)

to calculate the final answer we need to turn on any light in each SCC where the in-degree is 0 because if it is greater than zero then turning on any light in this SCC is not sufficient to turn on all other lights in the component

The problems were enjoyable and informative. F and G especially as they clear important graph concepts. Thank you for the great contest!

"I'm still a bit confused about the conclusion below. Can anyone provide an explanation or proof?" Thanks a lot. " This is a well-known problem with the following solution. Let tot be the total number of objects and mx be the type that has the maximum number of objects (maximum value of sz ). If the number of objects of type mx is at most the number of all other objects (i. e. szmx≤tot−szmx ), then we can pair all objects (except 1 if the total number of objects is odd). Otherwise, we can create tot−szmx pairs of the form (mx,i) for all i except mx . "

A similar logic is found in this recent problem: 1907C - Removal of Unattractive Pairs.

Am also trying to figure it out myself... Here's my thought process:

Pair up nodes belonging to the tree of size $$$sz_{max-1}$$$ with the tree of $$$sz_{max-2}$$$, the leftovers are then paired with the nodes belonging to the tree of size $$$sz_{max-3}$$$ etc. until we reached either $$$sz_{max}$$$ or $$$sz_{max}-1$$$ left, depending on whether it's odd or even. Then pair all those nodes with the nodes belonging to the tree of $$$sz_{max}$$$.

Claim: We can always do it until the base case is reached. Suppose not, that means we're left with nodes from a singular subtree of size > $$$sz_{max}$$$. This leads to a contradiction.

Do let me know if there are any mistakes as I only roughly thought about it.

i first thought about it too. But consider this case: assume their sizes are

`10 4 4 4`

following your calculation, it becomes:

`6 4 4`

->`2 4`

->`2`

so 2 nodes are left and cant be paired up. But in this case we can actually pair all them up.

Sorry if it wasn't clear. Here is what I meant using your example:

In sorted order, the size of the tree would be 4,4,4,10. We ignore 10 for now [Note the proof was that we pair the tree with size $$$sz_{max-1}$$$ not $$$sz_{max}$$$ ], and repeatedly pair the nodes using the procedure until we have 10 left. (Lets name the subtrees be T1, T2, T3, T4)

We pair T2 and T3 until we have 10 left [Note, we do not start pairing from T4]. In this case, just pairing 1 from each tree would be enough, then pair the remaining 10 from T1,T2,T3 with the nodes from T4.

thank u sir

Hello, kelzzin2_fan. Your proof helped me a lot. But I couldn't understood the condition used in the problem(

`sz[mx] - k <= tot - sz[mx]`

). Could you kindly elaborate on the condition(`sz[mx] - k <= tot - sz[mx]`

) please?`(i. e. szmx≤tot−szmx ), then we can pair all objects (except 1 if the total number of objects is odd).`

In problem F, why is it possible to pair them up all in this case? Could anyone prove it is true?

If

szmxis smaller than elements outside its subtree, then you can chooseszmxelements from thetot—szmx(call them remaining elements) and use them to pair withszmxelements.for example, Let's consider several groups of size 7 2 2 2 2 2 2.

here

szmx= 7 and remaining elements = 12;so you can take elements from (2,2,2,2,2,2) one by one and pair them with each element of 7. (It won't matter which group you take to pair the element. You are concerned with the number of grp only)

after you have paired 7 elements you are still left with 1, 2,2.

Out of these elements, you can again use the above analogy to pair 2 elements with the other 2 elements.

and all you will be left with is 1 element(as total elements were odd, 1 element will always be left behind).

And also take example of 7, 2, 2

here you can match only remaining 4 with 7 to get 4 pairs.

3 will be left out. for which you have to see recursively

First, consider two rows of boxes with length x. We will put 2x colored balls in these boxes, and match those in the same column if their colors differ, and try to make as many pairs as possible (note that the maximum number of pairs that we can make is x, and if the total size is odd it is obvious that one is always left over, so we are considering an even-numbered case).

Now, let’s consider the maximum number of balls with the same color(which coorresponds to”szmx”). If szmx is below x(the number of rows), we can completely contain szmx in the first row. Therefore, if we pack the same color of balls in order from the front of row, the color of balls in the same collmuns will be all differnt, since szmx was the maximum number(and below x), other colors will also have less balls than x and never filled in the same collumn if we pack those consecutively.

On the other hand, if szmx is larger than x, no matter how we pack these balls it doesn’t fit in one row, and It will be optimal to pair szmx with the rest one-on-one (note that if szmx is larger than x, the sum of others will be below x).

There is a nice problem using ideas similar to this in AtCoder, so I suggest you solve it if you are interested : https://atcoder.jp/contests/abc227/tasks/abc227_d

Nice way of thinking about it, thanks!

Awesome Visualization! Thanks

can someone explain the intuition behind process block function of g2. Is there any general set of problems I can study on this topic?

What does this means in problem G1?

"the number of sets S of minimum size"

I am not able to understand how it's calculated, for the given testcases.

Thanks

Suppose S is a set that satisfies the condition that all the light bulbs are on in the end by performing the given operations. Then, out of all possible 'S', some would have a minimum size; we need to tell that minimum size and how many such 'S' are there.

Thanks Man

Can you tell me why the answer of the following testcase is 16, not 32?

5

4 1 2 3 4 2 1 3 5 5

How to solve E1 using only brute force approach, without the use of sorting.

store 3 max elements and there index by maintaining three variables for each array and at last manually compare all cases.

I asked for problem E1(Game with Marbles (Easy Version))

you can do it in n! ways which is the number of ways to arrange it.

Can use recursion or maybe next_permuation method and simulate and find maximum.

Use DFS or next_permutation(C++) or something and brute force the order.

Yes, but by using it only different scores can be calculated, how the optimal score be calculated?

Use DFS (or next_permutation), and calculate all patterns reachable from the current situation (assuming that both players act optimally) and adopt the optimal value for the current player.

This "foreseeing" brute force method is quite usual, and it can be applied to other game problems (It is called "retrograde analysis").

Code : https://mirror.codeforces.com/contest/1914/submission/238277270

You can use recursion, and handle alice or bob chance alternatively. If it's alice turn, you would compute all possible elements you can choose and choose the one that gives max answer to you, and if it's bob's turn, you choose the one that gives minimum

how to estimate probability of case when XOR-hashing fails in G2? i understand that intuitively this bound is small and everything should work well but have no idea how to find this bound

For any $$$n$$$ numbers, the probability of any two different subsets having equal XOR is approximately $$$1/2^k$$$, where $$$k$$$ is the size of the XOR-basis of those $$$n$$$ numbers.

Which I have proven it here.

Now, if those $$$n$$$ numbers are chosen

randomlyfrom $$$1$$$ to $$$1e18$$$, then there is a high chance that at least $$$k \geq 20$$$ (size of basis). I can give you a rough idea of what the XOR basis is and why $$$k \geq 20$$$ below.(you can know more about xor-basis here.)

Say $$$a_1$$$ is the first random number, $$$a_2$$$ is the second random number.

Then the probability that $$$a_2 == a_1$$$ is $$$1/10^{18}$$$ (forget them being equal).

$$$a_3$$$ = 3rd => the probability that $$$a_3$$$ can be represented as any subset XOR of $$${a_1, a_2}$$$ is $$$2^2/10^{18}$$$ (again forget it).

.

.

.

.

$$$a_{20}$$$ = 20th random number. The probability that $$$a_{20}$$$ can be represented as any subset XOR of $$${a_1, a_2, ..., a_{19}}$$$ is $$$2^{19}/1e18 = 1/2^{44}$$$. (which is again too low.)

[And

XOR basis vectoris some smallest set of $$$k$$$ numbers $$$x = [{x_1, x_2, ..., x_k}$$$] using which you can represent each of your $$$n$$$ numbers as some subset XOR combination of $$$x$$$.] [And all above 20 (or/and some more) numbers are actually one of the XOR basis for the $$$n$$$ random numbers.]So, you get the idea, the size of the XOR basis is at least 20. So, the probability of failing is at least $$$1/2^{20}$$$.

(although 20 is very lenient number., it will be around 50+ but <= 64.)

thanks!!

could someone tell me why is there 'return add + calc(mx, max(0, k + add — 1));' -1 done in the k+add-1 parameter which is passed in the calc function? What's the reason for subtracting 1 from there?

one of the matched nodes is taken to be the

mxnodethus now, in the subtree under mx, there will

one lessmatched nodes.Can you explain why k is also added in this? I think it should be just

`add - 1`

: the number of matched nodes in the subtree under mx nowMy doubt concerning the provided solution to F in the editorial: BledDest

According to the tutorial, we are assuming the

kmatched nodes are from the subtree undermx, as it is comparing:sz[mx]-k >= tot-sz[mx]If the size of the

mxandmx-1children are comparable, this can lead to a case where extra teams are counted as shown in the example:see the illustration of the counter example...

Here k = 4: removes the 4 nodes from the mx branch. Now this leaves 2 nodes in the mx branch. However mx-1 branch has 6 nodes left, all one under the other. Thus it will lead to making of only 4 teams. But the formula computes 5 teams.

A

more optimal strategyis to distribute the k matching nodes in the order ofdecreasing subtree size.Eg if the subtree sizes are 7 4 4 2, and

if k = 3: distribute as 7(1), 4(1), 4(1), 2(0)

if k = 6: distribute as 7(2), 4(2), 4(1), 2(1)

This leaves out all the different types of nodes for matching.

Would like to get opinion on this from others...

we always look to distribute k optimally. but in the case when the condition is not met:

`sz[mx] - k > tot - sz[k]`

, all the k matched nodes belong to mx-th child.Funnily enough,before reading the editorial I thought that my solution was different from the one that was planned(

can someone please guide me to some blogs regarding the xor hashing technique that has been used in G2. I am not able to understand how generating random makes it obvious that the segment would become 0 only when each value is present even number of times like (0,1,1,0). But let's assume that for (0,1,2...) the randomly generated numbers are (1,14,15..) then the subarray (1,14,15) would also become zero and it would create problems in our solution.

It would be really helpful if you could suggest some blogs or give the explanation of above doubt.

I have same query also look array=[1,2,3,4] it is creating xor = 0 at 3 (1^2^3). It is making no sense?

actually, the fact is that it is based on the probabilities. So , what we do is that we first change each arr[i] to random_value(arr[i]). Now if we take two of these random values and perform xor operation then the number we get is also random.Now that probability that the this number equal to 0 (when we don't want it to become 0) is very very low as the number hence generated is uniformly random.Since the number is uniformly random the probability of it becoming 0 is 1/10^18 (which is total numbers that are possible). Now notice that this probability increases as we perform the operation more and more times. In this question we are doing around 10^10 checks at max , so our probability will become ~1/10^8 which is pretty low for our solution to fail.

Note that it is possible to generate a test case on which the above solution might fail because probability is low but not equal to 0.

However since the number generation is random , so to find such a test case is very very difficult.So basically you should make sure that the random numbers generated must be different each time so that someone would not able to generate a test case for your solution.

I didnt get in the

Fsolution, in the last line ofcalcfunction: why`k + add - 1`

is passed?Shouldn't it be just

`add - 1`

? Because`add - 1`

will be the number of matched nodes now in the subtree undermx.k also needs to be passed further

this is because of the condition did not meet:

`sz[mx] - k > tot - sz[mx]`

hence, all the k matched nodes still assigned to the mx-th child

Had this condition been met, it was not necessary all these nodes belong to the mx-th child node, rather would be in a way so as to maximize pairing.

I've observed a discrepancy in the rating increase on my account. After solving three questions, my rating only increased by 32 points (from 970 to 1002). However, I've noticed other users who have gained a higher rating increase, such as 53 points (from 1065 to 1118) after solving two questions. I want to express my concern and kindly request a reevaluation or clarification on the rating calculation process. I understand the importance of fairness and accuracy in such systems. Could you please assist me in understanding the reason behind this difference? I appreciate your attention to this matter and look forward to a resolution. Thank you.

Most probably, it is because that person you are talking about(1065-->1118) had given 5 or lesser contests before this one, the rating changes are much more favourable to the user in the first 6 contests.

I didn't understand problem E2.Can someone help me with this please

Explaining the logic why sorting according to

`ai + bi`

makes sense.Consider current score as S.

Now, think of two index i and j, where values in a and b are still > 0.

Let these two index be such that Alice takes one, and Bob takes the other.

Final effect on Score after the 2 moves:

Case1 (Alice takes ith, Bob takes jth marble) :

`S1 = S + (a[i]-1) — (b[j]-1)`

Case2 (Alice takes jth, Bob takes ith marble) :

`S2 = S + (a[j]-1) — (b[i]-1)`

If S1 > S2:

`S + (a[i]-1) — (b[j]-1) > S + (a[j]-1) — (b[i]-1)`

`(a[i]-1) + (b[i]-1) > (a[j]-1) + b[j]-1)`

`(a[i] + b[i]) > (a[j] > b[j])`

Therefore, if both players move optimally,

They take the marble from the index which has the largest sum, a[k] + b[k]

Hence, the approach:

Sort and construct the score based on

`(a[k] + b[k])`

is a correct approach.What does it mean by "given m types of object" in editorial F ? I still don't understand it. What is the object?

In E2, instead of subtracting 1 each time, you can just subtract n%2 at the end, because the number of moves is n and Alice starts first.

Nothing much but it could be useful in another problem.

For reference: 238021477

For Problem F, Just iteratively paired 2 leaves of lowest depth, and removed them.

Submission : 238023214

i did same. any idea why it worked?

about F. we can search for the heaviest chain on the tree in the first place. call nodes that's on the chain chain_node, and the others match_node.

for any chain_node, all of its children that's not on the chain is not available until we come to the node.

cruising down the chain from the root node 1, if we have any match_node, we use it to match the current chain_node.

during the process if it occurs that we run out of match_nodes, it means so far on the chain,no matter how hard we try we are destined to leave one more chain_node unmatched. so just skip the node.

having finished, if we have some match_nodes left, we can definitely match them optimally, which is res/2(round down).

why? just imagine there comes more nodes appending the rear of the chain, just as many as the rest match_nodes. So now we use all the match_nodes. And to get to the original tree, we withdraw the extra chain_nodes one by one, and at the same time match_nodes get released one by one. Since we can freely choose how the match_nodes get returned, just return them evenly.

hello Can anyone tell why it gives me Runtime Error here in problem D? mySubmission

The comparator you provided does not satisfy

strict weak ordering.To be specific, an element can never be

smallerthan itself.Also, if a < b is true, then b < a can never be true.

Check out this 238882105 and compare with yours.

thx alot!

Solutions for problem F is mind-blowing. Like a new world. But can anyone explain why a[mx] <= tot — a[mx] the sol would be tot / 2. I can see it when sketching it out but can't prove it.

That's because you can find

`tot / 2`

pair of nodes and the nodes within each pair are from different nodes.You can prove it by induction.

Assuming initially there is no child has more nodes than all other children combined. Each time, you can pick two nodes from two children with maximum sizes and make a pair with them. The rest of the nodes still satisfy the initial conditions. So you can keep taking two nodes at a time until you run out of nodes.

thank you

In the solution of G2, the author suggested xor hassing. Can you suggest me some more useful hassing?

Please help me solve this query: If I have arr=[1,2,3,4] we get xor of first three numbers as 0 but it is known that 0 comes when all elements in arr occur even times? What if random numbers generated in gen() function are like this array What

nowadays every editorial has hints , please start giving hints in your editorial , humble request awoo

BledDest Is there any simple implementation for G1 with inner loop to find minimal closed segments and inner closed segments ? I tried the naive method but am getting TLE and not sure how to eliminate the extra O(n) in the inner loop.

my submission: 239199046

Why in problem F, for test case n=5 and p={1,2,3,4} we have answer=0 if we can form a pair with 2 and 5? Please explain if I am wrong.

Solved G2 using Disjoint Sets 239790750

In author's solution of G2, if I understand correctly, sequences like [1, 2, 3] will have XOR as 0, even though it ain't a minimal closed segment. So to reduce the probability of this happening, we replace each color with a random 64bit number. But there is always a chance of it failing? Hmm.. I don't like this. Please correct me if I am wrong.

G1(easy) code(c++) with comments:

My CodeMy submission :239986061

nice explanation for G1 and G2

BleDestMy G2 solution uses DSU:

https://mirror.codeforces.com/contest/1914/submission/240883989

Does anyone's solution for this test case for problem C in this contest hand a result of 48 compared to an intended result of 79?

8 1 10 6 3 9 5 1 10 5 8 8

Can someone explain for this test case how the correct program chooses the right quest next?

Here is the submission:

240879100

For the D question I have written like this but it is showing memory limit exceeded. but why? how to optimise it?

Hi BledDest I don't understand the intuition behind the recursive idea in Problem F. Let's say we have $$$sz_1, sz_2 , \dots sz_m$$$ and assume that $$$sz_1$$$ is the maximum subtree. Assume that $$$sz_1 > tot - sz_1$$$. In that case we are saying that we take solution for subtree 1 (saying that $$$k$$$ nodes inside this subtree are already matched with something outside).

But, let's say we could match some nodes from remaining subtrees $$$[2 .. m]$$$ among themselves. For example, some nodes from subtree 2 could get matched to some nodes of subtree 4 and so on. In that case, the $$$k$$$ that would be passed to the recursive function could be lesser. Any matching within subtrees $$$[2 .. m]$$$ reduces the pairings by 1, but decreases $$$k$$$ by 2 meaning that subtree 1 can now use two additional vertices and could potentially increase the answer by 2. Just thoughts. Maybe there is some flaw in my argument

Solved G in a bit different way. First we split array into minimal closed segments staring from the first element, then solve the problem for every segment separately. It's enough to count the number of positions such that, if we light them up, eventually light the first element of segment. Let the segment be $$$(a_0, a_1, ..., a_{2m-1})$$$, then consider

colour segments$$$[l_i; \;r_i]$$$ with equal elements on their borders i.e. $$$a_{l_i} = a_{r_i}$$$ $$$(i = \overline{1,m})$$$ and they are sorted in increasing order by $$$l_i$$$. Then position $$$i \in \{0, 1, ..., len-1\}$$$ with corresponding colour segment $$$[l_j; \;r_j]$$$ ($$$i = l_j$$$ or $$$i = r_j$$$) is "good" if and only if $$$i = l_1$$$ OR $$$i = r_1$$$ OR $$$r_1 \in [l_j; \;r_j]$$$ OR $$$[l_j; \;r_j]$$$ contains the "good" position.We can maintain

`std::set`

with indices of "good" positions and`std::set`

with segments between "good" positions. First we insert left and right borders of segments containing $$$r_1$$$. Then for every between-segment $$$[l; \;r]$$$ we check if at least one of colour segments with left border in $$$[l; \;r]$$$ intersects with $$$r+1$$$ (we can do this with segment tree for maximum containing right borders for every left border). If such segment exists, we increment the answer by 2, add its left and right borders in set of "good" positions, recalculate the between-segments, else we just discard the segment because none of its positions can light up the entire closed segment.It's not the easiest way to solve the problem but it was understandable for me. The implementation much harder than in edutorial: https://mirror.codeforces.com/contest/1914/submission/278439465