Thanks for participating in the round! I hope you enjoyed the problems!
#include <bits/stdc++.h>
using namespace std;
int main(){
int t; cin >> t;
for(int i = 0; i < t; i++){
long long n, s;
cin >> n >> s;
cout << s / (n * n) << "\n";
}
return 0;
}
It can be proven that (try to prove it!) if some $$$k$$$ works, then $$$k=\displaystyle \left \lfloor \frac{n-1}{2} \right \rfloor$$$ has to work. This means that there is always an answer using $$$n$$$ or $$$n-1$$$ elements, depending on parity.
#include <bits/stdc++.h>
using namespace std;
int main(){
ios::sync_with_stdio(0);cin.tie(0);cout.tie(0);
int t; cin >> t;
for(int test_number = 0; test_number < t; test_number++){
int n; cin >> n;
vector <long long> a(n);
for(int i = 0; i < n; i++){
cin >> a[i];
}
sort(a.begin(), a.end());
vector <long long> prefix_sums = {0};
for(int i = 0; i < n; i++){
prefix_sums.push_back(prefix_sums.back() + a[i]);
}
vector <long long> suffix_sums = {0};
for(int i = n - 1; i >= 0; i--){
suffix_sums.push_back(suffix_sums.back() + a[i]);
}
bool answer = false;
for(int k = 1; k <= n; k++){
if(2 * k + 1 <= n){
long long blue_sum = prefix_sums[k + 1];
long long red_sum = suffix_sums[k];
if(blue_sum < red_sum){
answer = true;
}
}
}
if(answer) cout << "YES\n";
else cout << "NO\n";
}
return 0;
}
1646C - Factorials and Powers of Two
Actually, there is no problem in repeating $$$1$$$, $$$2$$$, or any other power of two. This is because it can be proven that (try to prove it!) those sums are not optimal.
#include <bits/stdc++.h>
using namespace std;
const long long MAXAI = 1000000000000ll;
int get_first_bit(long long n){
return 63 - __builtin_clzll(n);
}
int get_bit_count(long long n){
return __builtin_popcountll(n);
}
int main(){
ios::sync_with_stdio(0);cin.tie(0);cout.tie(0);
int t; cin >> t;
for(int test_number = 0; test_number < t; test_number++){
long long n; cin >> n;
//Computing factorials <= MAXAI
vector<long long> fact;
long long factorial = 6, number = 4;
while(factorial <= MAXAI){
fact.push_back(factorial);
factorial *= number;
number++;
}
//Computing masks of factorials
vector<pair<long long, long long>> fact_sum(1 << fact.size());
fact_sum[0] = {0, 0};
for(int mask = 1; mask < (1 << fact.size()); mask++){
auto first_bit = get_first_bit(mask);
fact_sum[mask].first = fact_sum[mask ^ (1 << first_bit)].first + fact[first_bit];
fact_sum[mask].second = get_bit_count(mask);
}
long long res = get_bit_count(n);
for(auto i : fact_sum){
if(i.first <= n){
res = min(res, i.second + get_bit_count(n - i.first));
}
}
cout << res << "\n";
}
return 0;
}
#include <bits/stdc++.h>
using namespace std;
const int MAXN = 400005;
vector<int> g[MAXN];
bool vis[MAXN];
int pa[MAXN];
//DFS to compute the parent of each node
//parent of node i is stored at pa[i]
void dfs(int v){
vis[v] = 1;
for(auto i : g[v]){
if(!vis[i]){
pa[i] = v;
dfs(i);
}
}
}
pair<int, int> dp[MAXN][2];
//Computes the value of function f, using dp
//the second coordinate of the pair is negated (to take maximums)
pair<int, int> f(int x, int y){
pair<int, int> & res = dp[x][y];
if(res.first >= 0) return res;
res = {y, y ? -((int) g[x].size()) : -1};
for(auto i : g[x]){
if(i != pa[x]){
pair<int, int> maxi = f(i, 0);
if(y == 0) maxi = max(maxi, f(i, 1));
res.first += maxi.first;
res.second += maxi.second;
}
}
return res;
}
vector<int> is_good;
//Recursive construction of the answer
//is_good[i] tells whether vertex i is good or not.
void build(pair<int, int> value, int v){
if(value == f(v, 0)){
is_good[v] = 0;
for(auto i : g[v]){
if(i != pa[v]){
build(max(f(i, 0), f(i, 1)), i);
}
}
}else{
is_good[v] = 1;
for(auto i : g[v]){
if(i != pa[v]){
build(f(i, 0), i);
}
}
}
}
int main(){
ios::sync_with_stdio(0);cin.tie(0);cout.tie(0);
int n; cin >> n;
for(int i = 0; i < n - 1; i++){
int u, v;
cin >> u >> v; u--; v--;
g[u].push_back(v);
g[v].push_back(u);
}
if(n == 2){
cout<<"2 2\n1 1\n";
return 0;
}
pa[0] = -1;
dfs(0);
for(int i = 0; i < n; i++) dp[i][0] = {-1, -1}, dp[i][1] = {-1, -1};
pair<int, int> res = max(f(0, 0), f(0, 1));
cout << res.first << " " << -res.second << "\n";
is_good.resize(n);
build(res, 0);
for(int i = 0; i < n; i++){
if(is_good[i]) cout << g[i].size() << " ";
else cout << "1 ";
}
cout << "\n";
return 0;
}
This solution uses the fact that $$$m\le 10^6$$$, other solutions do not use this, and work for much larger values of $$$m$$$ (like $$$m\leq 10^{18}$$$, but taking the answer modulo some big prime number). Try to solve the problem with this new constraint!
#include <bits/stdc++.h>
#define fore(i,a,b) for(ll i=a,ggdem=b;i<ggdem;++i)
using namespace std;
typedef long long ll;
const int MAXM = 1000006;
const int MAXLOGN = 20;
bool visited_mul[MAXM * MAXLOGN];
int main(){
ll n, m; cin >> n >> m;
vector<ll> mul_quan(MAXLOGN);
ll current_vis = 0;
fore(i, 1, MAXLOGN){
fore(j, 1, m+1){
if(!visited_mul[i * j]){
visited_mul[i * j] = 1;
current_vis++;
}
}
mul_quan[i] = current_vis;
}
ll res=1;
vector<ll> vis(n + 1);
fore(i, 2, n+1){
if(vis[i]) continue;
ll power = i, power_quan = 0;
while(power <= n){
vis[power] = 1;
power_quan++;
power *= i;
}
res += mul_quan[power_quan];
}
cout << res << "\n";
return 0;
}
1646F - Playing Around the Table
#include <bits/stdc++.h>
#define fore(i, a, b) for(int i = a; i < b; ++i)
using namespace std;
const int MAXN = 1010;
//c[i][j] = number of cards player i has, with the number j
int c[MAXN][MAXN];
//extras[i] is the stack of repeated cards for player i.
vector<int> extras[MAXN];
int main(){
ios::sync_with_stdio(0);cin.tie(0);cout.tie(0);
int n; cin >> n;
fore(i, 0, n){
fore(j, 0, n){
int x; cin >> x; x--;
c[i][x]++;
if(c[i][x] > 1) extras[i].push_back(x);
}
}
vector<vector<int>> res;
//First part
while(true){
//oper will describe the next operation to perform
vector<int> oper(n);
//s will be the first non-diverse player
int s = -1;
fore(i, 0, n){
if(extras[i].size()){
s = i;
break;
}
}
if(s == -1) break;
//last_card will be the card that the previous player passed
int last_card = -1;
fore(i, s, s + n){
int real_i = i % n;
if(extras[real_i].size()){
last_card = extras[real_i].back();
extras[real_i].pop_back();
}
oper[real_i] = last_card;
}
res.push_back(oper);
fore(i, 0, n){
int i_next = (i + 1) % n;
c[i][oper[i]]--;
c[i_next][oper[i]]++;
}
fore(i, 0, n){
int i_next = (i + 1) % n;
if(c[i_next][oper[i]] > 1) extras[i_next].push_back(oper[i]);
}
}
//Second part
fore(j, 1, n){
vector<int> oper;
fore(i, 0, n) oper.push_back((i - j + n) % n);
fore(i, 0, j) res.push_back(oper);
}
cout << res.size() << "\n";
for(auto i : res){
for(auto j : i) cout<< j + 1 << " ";
cout << "\n";
}
return 0;
}
Additionally, ak2006 has made video editorials for problems B, C and D.
For
Problem B
, in Java,Arrays.sort(a)
method gotTLE
forint[]
, and it worked forInteger[]
. Wasted more than an hour, without realising this:(
, due to different algorithms used in the sorting methods.a
i used the sort() function and it didn't give me any TLE in C++
C++ sort() always works in $$$O(NlogN)$$$, while Java's Arrays.sort() has a worst case time complexity of $$$O(N^2)$$$.
If you are/were getting a WA/RE verdict on any of the problems from this contest, you can get a small counter example for your submission on cfstress.com
Problems added: "A, B, C, D, E, F".
If you are not able to find a counter example even after changing the parameters, reply to this thread, mentioning the contest_id, problem_index and submission_id.
Wow that's amazing.
Editorial to D and E feel more complecated than the problems, no great help. I think not solving D and E is caused mostly by lack of math skills. Unfortunatly same lack of math skills makes it hard to understand the editorials.
I was able to solve E, and I think it's not that difficult. Here's how I solved it
Submission should be easy to understand 148377896
Thanks.
D's editorial seems fine , If someone has at least tried the problem He/She will be completely able to connect here ..
where do you need math in D editorial?
Failed C system tests with the correct solution using pypy :(
This solution for problem E works for much larger values of $$$m$$$ (like $$$m\leq 10^{18}$$$). We just need to take modulo at some places.
E can also be solved with the inclusion-exclusion principle and also works with large $$$m$$$. 148397426
can you share some resources on
Inclusion-Exclusion Principle
please :)I'm not an expert on that and this is the only English resource that I've read so far: https://cp-algorithms.com/combinatorics/inclusion-exclusion.html#the-verbal-formula.
For problem E, the same solution can be extended to work with large $$$n$$$ as well. Observe that in the part where you do the inclusion-exclusion, the only thing that matters is the $$$lcm$$$ of all the numbers. Therefore if you store only the lcm, you can get an $$$O(polylog(n))$$$ algorithm.
This solution can solve the problem for $$$n \leq 10^{15}, m \leq 10^{18}$$$.
Code: 148407574
woah, but how that works?
Can someone please explain why I am getting TLE in C?
My solution is: https://mirror.codeforces.com/contest/1646/submission/148397036
Most likely quite late already,
But it fails because you are using int for 'i1' and looping until it reaches 1e12, while INT_MAX is 2^31 = ~2.147B = ~2*1e9.
There might be some other conditions to make it fail, but I checked only until this, since this should fail it already.
Don't know why my solution Math.floor(s/(n*n)) in JavaScript didn't work for Problem A, anybody know y?
Try n = 1, s = 10^18 — 1 on your solution (it ouputs 10^18). I don't know js but obviously it's due to typing and precision errors.
Actually, there is no problem in repeating 1, 2, or any other power of two. This is because it can be proven that (try to prove it!) those sums are not optimal.
I spent about 30 minutes in C just to add additional things to my implementation to avoid the cases where i have both 1's or 2's. If I noticed this I would probably finish this problem within 20 minutes. Thanks for the Editorial!
You can consider that you are supposed to represent the number as a sum of powers of two or factorials of numbers greater than 2. In other words, don't consider 1 and 2 as factorials. This way you won't have to worry about repetition.
Additional for problem E :- I used inclusion-exclusion and it will work for m<=1e18 https://mirror.codeforces.com/contest/1646/submission/148374136
yay, editorial!!!
Hi I wanted to write this yesterday, but I went to sleep so I am writing it now in the hope that no one else discovered it (which should have pretty low chances)
So F is from the All Russian MO. See this and if you want this as well.
I had alternate solution to F during testing.
First given the input, let's rearrange it in such a way that each column is a permutation of $$$[1,n]$$$, we can do this using $$$n$$$ bipartite matchings in $$$O(n^4)$$$ or $$$O(n^3 \sqrt n)$$$. This way we can already assign which card goes to which player. This reduces the problem to solving $$$n$$$ permutations of cards but doing so in parrallel, where on the $$$i$$$-th operation, player $$$(i+j) \% n$$$ will pass one of the cards in the $$$j$$$-th permutation to the next player.
It is not to hard to figure how to solve a each permutation in $$$n(n-1)$$$ moves using a simple dfs.
code: 148411890
Why doesn't greedy work in C? We always try to take the closest powerful number and compute answer. Where does it fail? Something like this 148381259
Try this case 1 768
Answer should be 2 :) Tried greedy for complete two hours.
For D, I discovered that there was this https://courses.grainger.illinois.edu/cs473/sp2011/lectures/09_lec.pdf — but this lecture does not completely solve the problem — you still need to figure out which nodes was chosen from the tree dp.
For 1646E - Power Board if we even use unordered_set in place of visited (used in editorial's code), it is giving a TLE for m=1e6.
Unordered set operations are said to be O(1), right? then why that's happening or am i calculating the complexity wrong ?
code
https://mirror.codeforces.com/blog/entry/62393
Also unordered_map gave me MLE during contest so I had to switch to vector or bitset for marking the numbers
my code of
problem D
is working fine on the local machine but giving the wrong output on OJ. Can anyone please point out my mistake? My SubmissionMulitply with -1LL wherever u want to negate.
Thanks a lot, buddy. But can you please tell me why it was giving such a output? I have just changed
(-graph[vertice].size())
to this(-1ll*graph[vertice].size())
and it passed ^_^Method size() returns an unsigned int, which is casted to signed by the second expression, but not the first.
thanks
I made video Solutions for A-D in case someone is interested.
you are wonderful !
How to solve the Additional comment of Problem E? I need help.
I was having a hard time understanding PIE solutions but I think I figured it out, heres my explanation
Following the editorial the problem boils down to solving how many distinct integers are in the set
for an integer $$$x$$$ in $$$S$$$, let $$$mask(x)$$$ be a k bit number where the i-th bit is on iff $$$x = (i+1)y$$$ for some integer $$$y\leq m$$$. let $$$f(i)$$$ be the number of integers $$$x$$$ such that $$$mask(x) = i$$$.the final answer is $$$f(1) + f(2) + f(3) + \cdots + f(2^{k}-1)$$$ .
but f is hard to compute instead we will do PIE on f using another value that we can compute easily, let $$$g(i)$$$ be the sum of $$$f(j)$$$ such that $$$i$$$ is a submask of $$$j$$$ then $$$f(1) + f(2) + f(3) + \cdots + f(2^{k}-1) = \sum_{n=1}^{2^{k}-1}{(-1)^{popcount(n)}g(n)}$$$. To compute $$$g(i)$$$, let $$$a_{0},a_{1},a_{2} \cdots$$$ be the index of the bits are on in $$$i$$$ (sorted in increasing order) then if $$$x$$$ is counted by $$$g(i)$$$, $$$x$$$ has to be divisble by $$$lcm(a_{0},a_{1},a_{2} \cdots)$$$ since $$$x = a_{0}b_{0} = a_{1}b_{1} = a_{2}b_{2}\cdots$$$. Thus $$$g(i)$$$ counts how many distinct $$$x$$$ in $$$S$$$ can be written as $$$lcm*y$$$. we can notice that $$$y\leq \frac{m}{lcm/a_{0}}$$$ thus $$$g(i) = \frac{m}{lcm/a_{0}}$$$ which can be computed in $$$O(k)$$$. Back to the original problem we can precompute and solve the above subproblem for all possible $$$k$$$ in $$$O(k2^{k})$$$ yielding a total complexity of $$$O(k2^{k} + n)$$$. $$$k$$$ is atmost $$$\log n$$$ so this runs fast enough
Thank you very much!I have understood your explanation! PIE refers to inclusion-exclusion principle,right? (I didn't heard of this statement,but I guess it from your formula ahahahaha). Besides, On the basic of your explanation, I find an important detail during implementation: we can't do PIE simply on range[1,k*m]. it must be done in blocks:[1,m],[m+1,2*m],……,[(k-1)*m+1,k*m]. That's because divisor 1 isn't illegal in range [m+1,2*m], divisor 2 isn't legal in range[2*m+1,3*m] and so on. But the complexity din't change.
I'd be grateful iff you can briefly explain why $$$y\le m/(\frac {lcm}{a_0})$$$.
upd: I realized soon after. first the biggest $$$b_i$$$ must belong to $$$a_0$$$,since x is the same, and for $$$a_0b_0$$$, $$$b_0\le m$$$,so $$$x\le a_0m$$$,which means $$$y*lcm\le a_0m$$$,and so on.
Why does greedy fail in problem C?
" In case both options (making it good or not) work, you have to choose to not make it good, as you do not know if its father was good or not."
How does this not affect making maximum number of good nodes?
If you are constructing the answer in the subtree of x, it has to have value v and f(x,0)=f(x,1)=v (both options work), then if you choose not to make vertex x good, the maximum is not affected because f(x,0)=f(x,1), so there is a way to have the same number of good vertices.
Please any one explain in question 1646A Square counting
When I firstly submit the code lli n,m; cin>>n>>m; lli ans=m/pow(n,2); cout<<ans<<nl; This is giving me wrong answer
But when I submit below code It is working fine lli n,m; cin>>n>>m; lli ans=m/(n*n); cout<<ans<<nl;
This is because the
pow
function returns a floating-point number type. Using floating-point numbers in this problem could produce a wrong answer due to precision issues or because you are printing a non-integer number as the answer.Can someone explain how the solution for problem A works for cases where sum/(n*n) is greater than n+1? for e.g if sum is 100 and n=4 then aforementioned solution will yield ans=6 but the size of array is just n+1(i.e 5),here's my code for this particular problem>>148312599
That is because the statement says:
It is guaranteed that the value of s is a valid sum for some sequence satisfying the above constraints.
So, n = 4 and s = 100 is not a valid input.
In problems C, what does __builtin_clzll() and __builtin_popcountll() do? Help O:
__builtin_clz() returns how many leading 0s are there when writing a number in binary, and __builtin_popcount() returns how many 1s are there.
For example, 5 is 101 in binary, and a int has 32 bits, so __builtin_clz(5) is 29, and __builtin_popcount(5) is 2.
__builtin_clzll() is the long long version of __builtin_clz(), __builtin_clzll(5) is 61, because a long long has 64 bits. So is __builtin_popcountll().
Oh o: Thanks a lot!
Could anybody explain the logic behind problem C? thanks in advance!!
Did you checked out the video of ak2006? That may be useful to you.
Hi there! Sorry for writing in English.. Our friends from all around the world here will understand that I am tired.
I am pretty sure that you can do 1646F in O(n^2) time, so better than the tutorial solution. Here's the thinking.
Afiu (as far as I understand).. 1646F.. can also be done with Max-Flow right?.. So just have an 1 capacity edge between piece[player: i, card: c]--> piece[player: i+1, card: next_expected_card_after_[c]]. So there are at most O(n^2) nodes and thus with lift-to-front, you can pull O(n^6) complexity. But with simplification above — that matching "cards of player i" to "cards of player i+1" is in fact independent of matchings for [i+1, i+2] and [i-1, i], then you can do bipartite matching and get just n * O(n^3) = O(n^4).
In fact, taking advantage of the fact that — for there to be a solution, you need to have all cards next[c] for all cards c for player i, as part of the cards of player i+1, then you can simplify the bipartite matching step to a simple HashMapping: next[c] --> what is the position where next[c] is found for player i+1. This way, the bipartite matching step goes down to O(n), and total running time to O(n^2) right?.. So better than the tutorial solution.
What do you think about this problem-->generalization-->optimization(particularization)-->solution "simplification" of this problem to get to a better solution? Am I missing something in my argument?
Mircea
Ps_unique. The original line of thinking was around: "Now, For 1646F — can't you get O(n^3) by simply doing an heuristic matching all the way up until you are left with just say maximum "non-solidity depth" 2 or 3 (so the maximum of the number of cards some player required to become solid)? And then just do bipartite matching — with FF flow?.. which works in O(n^3) since maximum flow is O(n) and E=O(n^2)? And in average case maybe perform as good as (n^2) since there should be around O(1) outgoing edges/card / player, no?"
Ps2. So the bipartite matching in this problem, 1646F is actually a sort of bipartite "multimatching" over a set of just {1,2,..,n} semantically distinct values (elements).
hey in problem b suppose we have two 10s does that mean count of 10s is 1?