Idea: BledDest
Tutorial
Tutorial is loading...
Solution (BledDest)
#include<bits/stdc++.h>
using namespace std;
void solve()
{
string s;
cin >> s;
int n = s.size();
int idx = -1;
for(int i = 0; i + 1 < n; i++)
if(s[i] == s[i + 1])
idx = i;
if(idx == -1)
{
if(s.back() == 'a') cout << (s + "b") << endl;
else cout << (s + "a") << endl;
}
else
{
string t = "a";
if(s[idx] == 'a') t = "b";
cout << s.substr(0, idx + 1) + t + s.substr(idx + 1) << endl;
}
}
int main()
{
int t;
cin >> t;
for(int i = 0; i < t; i++)
solve();
}
Idea: BledDest
Tutorial
Tutorial is loading...
Solution (Neon)
#include <bits/stdc++.h>
using namespace std;
int main() {
int t;
cin >> t;
while (t--) {
int n;
cin >> n;
vector<string> s(2);
for (auto& x : s) cin >> x;
int ans = 0;
for (int i = 1; i < n - 1; ++i) {
bool ok = true;
ok &= (s[0][i] == '.' && s[1][i] == '.');
ok &= (s[0][i - 1] != s[1][i - 1]);
ok &= (s[0][i + 1] != s[1][i + 1]);
ok &= (s[0][i - 1] == s[0][i + 1]);
ans += ok;
}
cout << ans << '\n';
}
}
Idea: BledDest
Tutorial
Tutorial is loading...
Solution (adedalic)
import java.util.LinkedList
fun main() {
repeat(readln().toInt()) {
val n = readln().toInt()
val s = readln()
var ans = 0L
val bracketPositions = LinkedList<Int>()
for (i in s.indices) {
var c = s[i]
if (c == '_') {
c = if (bracketPositions.isEmpty()) '(' else ')'
}
if (c == ')') {
ans += i - bracketPositions.pollLast()
}
else
bracketPositions.addLast(i)
}
println(ans)
}
}
Idea: BledDest
Tutorial
Tutorial is loading...
Solution (Neon)
#include <bits/stdc++.h>
using namespace std;
const int INF = 1e9;
int main() {
ios::sync_with_stdio(false); cin.tie(0);
int t;
cin >> t;
while (t--) {
int n;
cin >> n;
vector<int> a(n);
for (auto& x : a) cin >> x;
vector<vector<int>> g(n);
for (int i = 1; i < n; ++i) {
int p;
cin >> p;
g[p - 1].push_back(i);
}
auto check = [&](auto&& self, int v, int x) -> bool {
if (x > INF) return false;
bool isLeaf = true;
if (v) x += max(0, x - a[v]);
for (auto u : g[v]) {
isLeaf = false;
if (!self(self, u, x)) return false;
}
return (!isLeaf || x <= a[v]);
};
int l = 1, r = INF;
while (l <= r) {
int mid = (l + r) / 2;
if (check(check, 0, mid)) {
l = mid + 1;
} else {
r = mid - 1;
}
}
cout << a[0] + l - 1 << '\n';
}
}
Idea: BledDest
Tutorial
Tutorial is loading...
Solution 1 (awoo)
#include <bits/stdc++.h>
#include "ext/pb_ds/assoc_container.hpp"
#define forn(i, n) for (int i = 0; i < int(n); i++)
using namespace __gnu_pbds;
using namespace std;
template <typename T> using ordered_set = tree<T, null_type, less<T>, rb_tree_tag, tree_order_statistics_node_update>;
struct query{
int i, j;
};
int main() {
cin.tie(0);
ios::sync_with_stdio(false);
int n, m;
cin >> n >> m;
vector<int> a(n);
forn(i, n) cin >> a[i];
vector<vector<query>> q(n + 1);
forn(j, m){
int i, x;
cin >> i >> x;
--i;
q[x].push_back({i, j});
}
forn(i, n + 1){
sort(q[i].begin(), q[i].end(), [](const query &a, const query &b){
return a.i > b.i;
});
}
vector<int> ord(n);
iota(ord.begin(), ord.end(), 0);
sort(ord.begin(), ord.end(), [&](int i, int j){
return a[i] > a[j];
});
vector<int> cur(n + 1);
vector<char> ans(m);
ordered_set<int> alive(ord.begin(), ord.end());
for (int lvl = 1; lvl <= n; ++lvl){
for (int k = 1; k <= n; ++k){
if (cur[k] >= n) break;
int x = alive.order_of_key(cur[k]);
int nxt = x + k - 1 >= int(alive.size()) ? n : *alive.find_by_order(x + k - 1);
while (!q[k].empty() && q[k].back().i <= nxt){
ans[q[k].back().j] = (a[q[k].back().i] >= lvl);
q[k].pop_back();
}
cur[k] = nxt + 1;
}
while (!ord.empty() && a[ord.back()] == lvl){
alive.erase(ord.back());
ord.pop_back();
}
}
for (auto x : ans) cout << (x ? "YES" : "NO") << '\n';
return 0;
}
Solution 2 (awoo)
#include <bits/stdc++.h>
#define forn(i, n) for (int i = 0; i < int(n); i++)
using namespace std;
struct query{
int i, j;
};
int main() {
cin.tie(0);
ios::sync_with_stdio(false);
int n, m;
cin >> n >> m;
vector<int> a(n);
forn(i, n) cin >> a[i];
vector<vector<query>> q(n + 1);
forn(j, m){
int i, x;
cin >> i >> x;
--i;
q[x].push_back({i, j});
}
forn(i, n + 1) sort(q[i].begin(), q[i].end(), [](const query &a, const query &b){
return a.i > b.i;
});
int P = min(1000, n + 1);
vector<char> ans(m);
for (int k = 1; k < P; ++k){
int cur = 1;
int cnt = 0;
forn(i, n){
bool fl = false;
if (a[i] >= cur){
++cnt;
fl = true;
if (cnt == k){
++cur;
cnt = 0;
}
}
while (!q[k].empty() && q[k].back().i == i){
ans[q[k].back().j] = fl;
q[k].pop_back();
}
}
}
vector<int> sum1(n), sum2(n);
int p2 = ceil(sqrt(n + 2));
auto add = [&](int i){
int bl = i / p2;
for (int j = bl + 1; j * p2 < n; ++j)
++sum1[j];
for (int j = i; j < (bl + 1) * p2 && j < n; ++j)
++sum2[j];
};
int mx = n / P + 5;
vector<vector<int>> pos(mx);
forn(i, n){
if (a[i] < mx)
pos[a[i]].push_back(i);
else
add(i);
}
for (auto &it : pos) reverse(it.begin(), it.end());
for (int k = P; k <= n; ++k){
while (true){
int mn = n;
int who = -1;
forn(lvl, mx) if (!pos[lvl].empty()){
int i = pos[lvl].back();
if (mn < i) continue;
int cnt = sum1[i / p2] + sum2[i];
if (a[i] >= cnt / k + 1){
mn = i;
who = lvl;
}
}
if (who == -1) break;
add(mn);
pos[who].pop_back();
}
for (auto it : q[k]){
int lvl = a[it.i];
ans[it.j] = (lvl >= mx || pos[lvl].empty() || pos[lvl].back() > it.i);
}
}
for (auto x : ans) cout << (x ? "YES" : "NO") << '\n';
return 0;
}
Idea: BledDest
Tutorial
Tutorial is loading...
Solution (BledDest)
#include<bits/stdc++.h>
using namespace std;
const int MOD = 998244353;
int add(int x, int y)
{
x += y;
while(x >= MOD) x -= MOD;
while(x < 0) x += MOD;
return x;
}
int mul(int x, int y)
{
return (x * 1ll * y) % MOD;
}
int main()
{
int n, x, m;
cin >> n >> x >> m;
vector<int> fib = {0, 1};
for(int i = 2; i <= 30; i++)
fib.push_back(fib[i - 1] + fib[i - 2]);
int max_sum = fib[x] * n;
vector<vector<int>> dp(max_sum + 1, vector<int>(n + 1));
dp[0][0] = 1;
for(int i = 1; i <= x; i++)
for(int j = 0; j < max_sum; j++)
for(int k = 0; k < n; k++)
{
if(j + fib[i] <= max_sum)
dp[j + fib[i]][k + 1] = add(dp[j + fib[i]][k + 1], dp[j][k]);
}
vector<int> cost(max_sum + 1, 1e9);
cost[0] = 0;
for(int j = 1; j <= max_sum; j++)
for(int i = 1; i <= 30; i++)
if(j >= fib[i])
cost[j] = min(cost[j], cost[j - fib[i]] + 1);
int ans = 0;
for(int i = 0; i <= max_sum; i++)
if(cost[i] == m)
ans = add(ans, dp[i][n]);
cout << ans << endl;
}
Problem D can be solved in $$$O(n)$$$ with just a basic DFS. The idea is to maximize the minimum in each subtree. Submission: 273547075
just what i do.
Thanks for the great solution. Can you please explain, when node==0 , why we are not using this part?
You only care about the final value in the node 0(1). So you maximize the minimum in node 0's subtree(excluding node 0) and then use the operation as much as the minimum allows.
Thanks, Can you provide the intuition for your algo and how and why it works, I was on almost the same path for a few minutes in the contest but didn't connect much. Can you please explain>
This was my thought process. I want to use the operation on node 1 as many times as possible. The number of times I will be able to do it is equal to the minimum value in the subtree(excluding node 1) of node 1. Now to extend the logic, the maximum number of times I will be able to do it is equal to the minimum value in subtrees of node 1's children. This logic naturally looks like dynamic programming.
DP transitions: if the value of the current node is greater than the value of $$$dp[node]$$$, we don't change anything, otherwise we use the operation while $$$a[node] < dp[node]$$$, in math words $$$dp[node] = \lfloor\frac{a[node] + dp[node]} {2} \rfloor $$$.
Thank you so much, mate.
Thank you for your solution, I have difficulty understanding (a[node] + dp[node]) / 2 part.
Is this because we try to evenly distribute values between a node and it's parent? So that we can ensure root node can get maximum value?
Yeah, it is just a faster way of doing:
since if you don't do anything, the value in the current node will become the minimum. $$$\lfloor \frac{a[node] + dp[node]}{2} \rfloor > a[node]$$$
Can you please explain the dp state that you defined ??
$$$dp[node]$$$ means the minumum value in the subtree of $$$node$$$.
So for
3
0 3 3
3 1
why can't it's maximum be 3 but be 2?
I created video editorial for D: Maximize the Root.
Same solution without using additional dp array: https://mirror.codeforces.com/contest/1997/submission/273543097
can u explain the format of input in d?i didnt get who is the parent,please explain by taking 1st test case,who is whose parents..
Input: 4 0 1 0 2 1 1 3
Tree:
Note: The given array is $$$p_2, p_3, p_4, ..., p_n$$$, in this case the array is $$$1, 1, 3$$$, that means that node 2's parent is node 1, node 3's parent is 1 and node 4's parent is node 3.
oo got it,thanks
Can anyone tell me what is wrong with this solution i got a overflow error even though everything is in long long 273894619
got it just put lazy < 2e9 then flag = 0 or else the lazy will propogate to high values 273916460
Nvm i got it . Thanks for pointing out that it is because of overflow .
Yeah just put if(dec > 2e9) return 0
276099991
You are correct. But you don't even need the memoization. A single pass down the tree suffices. Here's a slightly cleaned up version of my contest solution: 273914043
cool!I appreciate your style.
Checkout my clean solution 273559093
Why did I time out $$$O(nlog^2n)$$$ on E.
I'm wondering a similar thing. Why do $$$O(n\cdot\log^4(n))$$$ solutions time out but $$$O(n \sqrt{n})$$$ solutions pass?
$$$log^4(200000) \approx 790$$$ and $$$\sqrt{200000} \approx 447$$$. They really aren't that different. Plus, $$$\log$$$ is a tiny thing compared to square root.
You took the logarithm in base 10, $$$log^4_2{200000} \approx 96161$$$.
Ah yes, thanks for pointing that out. It looks like google defaults to $$$\log_{10}$$$. No wonder why $$$O(n \cdot \log^4 (n))$$$ solutions don't pass lol
log is base 2, not 10.log(200000)=17.6,
because
it's because of iq
I fsted D and E :(
I hope there will be strong pretests next time.
I enjoyed all of the problems I saw in the contest
A was easy but still is a good problem for Div 2 A.
My only complaint is that for problem B, it wasn't really emphasized that there was one connected component at the start. It was there in text but the diagram had more than one component. Ideally that part would have been in bold.
For problem C, I misread the problem (which was completely my fault) and thought that the data corruption affected both random even and odd characters. If anyone has a solution to that problem I'd be interested. The greedy doesn't work as it is possible that by adding a closing bracket at your current position you stop the bracket sequence from becoming a regular bracket sequence. (Funnily enough, I actually the working code for this problem as a part of my attempt at a solution for the problem where random even and odd characters are corrupted.)
Problem D was a classic tree DP problem that I almost solved but ran out of time because of how much time I wasted on problem B and C.
Very nice and educational contest even though I completely under-performed.
D was binary search not DP i thought.
You can solve it with both. To me DP was more intuitive but the intended solution was binary search.
I solved it by recognising pattern for the optimal filling of the brackets(for the given constraints).
IDK why it works, maybe I shall prove it sometime.
280823450
B was basically an IQ problem
Why are you so obsessed with iq? You also literally solved it in the contest.
I am not so obsessed with IQ. It is just an interest of mine.
B being an IQ problem doesn't really have to do with whether or not I solved it. It is an IQ problem because you clearly can't train for such problems. I bet you can't even find a similar problem on codeforces.
Anyway, it seems like most div. 2 A-C are IQish problems. To me, it just felt like this B was a full-on IQ problem. I bet you could put that problem on an IQ test and no one would bat an eye.
Isn't your whole point that you can never reach red because your iq isn't high enough. Or am I mistaken? So if you can solve so called "IQ problems" than why can't you reach red?
Also I think that your claim that you can't train for such problems is clearly ridiculous. Many GMs were stuck at Newbie for months and couldn't solve div.2 A-C. Has their IQ changed?
Well, to be fair, it was an easier IQ problem. There are harder IQ problems out there, like that one B2 from a few contests ago. Though I'd classify that B2 as an IQish problem, not a full-on IQ problem. I was just letting the authors know that they made (whether on accident or on purpose) an IQ problem.
The sad thing is that the IQish problems don't seem to go away completely as you search for higher-rated problems. There is no feeling like going to a problem's editorial and seeing that it was an IQ/IQish problem all along. Of course, there are worse feelings, like getting sent to hell probably, but there is no feeling quite like failing to solve an IQ problem.
Probably, because most of them started codeforces when they were like 12. On average, cognitive ability increases substantially from 12-25, but it mostly stops increasing by age 18. Show me the profile of one of these users, though. I'd be happy to be proven wrong.
According to Emory School of Medicine, "IQ is an abbreviation for Intelligence Quotient. “Intelligence,” as measured by IQ tests is rather narrowly defined. An IQ is intended as a predictor of the level of abilities a child will need to be successful in school. In the general population this score becomes relatively stable after about four years of age." So no, IQ doesn't generally change when you're an adolescent.
Here are two profiles that I think have inspiring rating graphs.
What they mean by that is that individuals' IQ scores generally stabilize by the age of 4. This means that, if a person scores 115 at age 4 (this is in reference to other 4-year-olds), they will likely score around 115 at age 34 (this is in reference to people around the age of 34). It doesn't mean that cognitive ability doesn't increase past 4. Of course a 34 year old is gonna be smarter than a 4 year old, on average.
With that being said, I think that both of those users you have mentioned started way before they were 18. So they had lower cognitive ability when they started than they do now.
These are called "ad-hoc problems."
You are overusing the term IQ problem.
The given grid contains at most 1 connected region
missed the above statement in B, because my mind covered with the pictures in the statement , assumed there can be multiple components, because of which much time got wasted
Exactly bro
excatly bro , I got it .
Amazing Contest! but I think there is a problem, rating changes applied to all participants even those whose rating greater than 2100 this could be a mistake maybe.
The B Question was not correctly framed.The ambiguity in the sentences were high.
Alternative approach to problem E.
I observed that, if we do not fight against monster
i
for somek = x
, then we will not fight against it fork < x
either. So I found the maxk
for each monster that would make them run away from the fight, and for each input ifx <= res[i]
then we do not fight with them. My proof was prayers, so anyone is welcome to either prove it right or wrong.The rest is straight forward: We will do binary search for each monster. Let's take some fixed k. If we will not fight with monster
i
, then we have already fought with at leastk*a[i]
monsters out if firsti-1
. If that's the case then we should look for a greater k. Now we must be able to check how many of them we did not fight, meaning how many of them hadres[j] >= k
forj < i
. We can do it with simple sum segment tree, keeping how many monsters hadk = x
in the leaf nodes.The reason I cannot prove the first paragraph is that, when we check
k < x
, it's true that we will have more frequent level-ups, however it's also true that we will skip more monsters, so if we create an equation, both sides would be decreasing.Accepted solution: 273561881
A proof in plain language:
Consider two
k
s,k_0<k_1
, they walk in the sequence simultaneously.When
k_0
levels up,k_1
still stays on the old level because more monster is required for him. This will result ink_0
skipping some monsters thatk_1
fought.But that's fine because either
k_1
's level never catches up withk_0
, or if it catches up, at that timek_0
already made some progress at that level, and now they will fight the same monsters.So
k_1
can never fought more monsters thank_0
do.Another way to do this with a similar approach is store store an ordered multiset of all
res[j]
forj < i
and find the order of keyk
.Proof for the first paragraph:
Proposition to be proved is $$$(\forall i \exists ans[i] $$$ such that $$$query(i, x) \leftrightarrow ans[i] < x)$$$. Assume that $$$i$$$ is the minimum index for which the statement does not hold. Hence $$$\exists k$$$ such that $$$query(i, k) \wedge \neg query(i, k + 1)$$$. Therefore $$$card(j < i, ans[j] = k) \geq a[i] + 1$$$. We define $$$t := max(j < i, ans[j] = k)$$$. $$$(k*a[t] \leq \Sigma_{k' = 0}^{k - 1} card(j < t, ans[j] = k') \wedge k*a[t] + a[t] > \Sigma_{k' = 0}^k card(j < t, ans[j] = k'))$$$ which means that $$$a[t] > a[i] \wedge \neg query(t, k)\Rightarrow \neg query(i, k)$$$. Thus we achieve proof by contradiction.
My post contest solution with explanatory comments. Your explanation helped. Thanks <3
https://mirror.codeforces.com/contest/1997/submission/273966239
In Problem F, can anyone explains me this line how this would be the most optimal:
this is just checking that the minimum number of Fibonacci numbers used to represent an integer is equal to m .
I had a solution for problem E that's a bit simpler conceptually. (Doesn't require the harmonious series observation or changing the timeframe from
i
tok
)It's based on the following observation:
If a monster is fought when
k=k_0
, it will be fought whenk
is larger.Given above, we can walk from position
1
ton
, and maintain the number of monster fought for eachk
. For every new monster, based on observation, we only need to increase the count by1
for a suffix. That can be done with BIT or your favorite range query data structures.Now we still need to decide what's the smallest
k
for each position, or the which suffix should we operate on.Again based on the observation, this can be found by binary search. When checking whether we will fight that monster for
k
, we make a query to our BIT to calculate the current level fork
.The overall complexity is still
O(nlog^2n)
.You can also query the BIT directly(find first) and the complexity will be $$$O(n \log n)$$$.
how to find pref[x] / x < a in log(n) with point updates?
On each segment store the rightmost value. Apply updates lazily. If the binary search condition is false for rightmost, then it is false for the entire segment. Find the first element by crawling the tree.
Submission — https://mirror.codeforces.com/contest/1997/submission/273770957
Hmm can you explain more? Thanks!
IMO one
log
is from binary search and another is from BIT, it's not clear to me how we can drop one of the two?[Edit] I figured out how to do it... Basically you look at 1, 10, 100, 1000... and combine finding sum and checking the binary search condition.
A simpler implementation of C without stack/vector. 273589519
i thought of brackets reducing in pair of 2 and guessed by looking at pretest. although i am not very clear why this works and it would be great if someone could explain it.
Problem C can be solved in constant time and space with the formula
len(s)/2 + 2*s.count(‘(‘)
for an input string s, just have counter for open parenthesis and update as the input comes in.If anyone has a good proof, I would be interested in seeing it.
i'm really curious and wanna know the linear $$$ O(n + m) $$$ of mine passes in ~$$$500ms$$$ but at the same time the binary search solutions having time complexity of $$$ O(n\log_2 n + m\log_2 n) $$$ pass in ~$$$200ms$$$.
Solution For C
I can't explain much about the solution. I just observed some pattern and code it up and worked.
Can someone please explain why my question D doesn't work? I am just using basic DFS, and trying to find what the maximum value can I add to vertex 1. Submission: https://mirror.codeforces.com/contest/1997/submission/273756060
can someone please explain the B problem, i really dont understand from either tutorial and vid soln
There are only two layouts in which blocking a cell creates 3 regions. Try brute forcing every cell and you will notice that only two layouts can be added to the answer.
oh f yeah, thanks
Merge Sort Tree for E
For problem E can someone explain this part :
int nxt = x + k - 1 >= int(alive.size()) ? n : *alive.find_by_order(x + k - 1);
I just realised B says at most 1 connected component and here I was thinking of using articulation points and all lol
E can be solved in $$$O(nlogn)$$$ using the "binary search on fenwick tree" approach. You can see 273651754.
Another solution for E is a parallel binary search or divide and conquer approach also in O(n * log(n)). My solution: 273896973
Can anyone tell me why my submission for D overflows the stack? As far as I can tell, I only visit each node once.
Problem E can be solved in $$$O(n \log n)$$$. I've written a blog for it.
Can anybody explain how to find k-th element, greater than x with fenwick? :)
Process the queries offline. Sort queries by decreasing x.
Make an array of [value, index] pairs in the original array. Sort that by decreasing value. Maintain a pointer for the array of [value, index] pairs, initially starting at 0 Now to solve the query [x, k] in log^2(n) time:
while the pointer is pointing at a value greater than x,update the fenwick tree at index from 0 to 1 Binary search for the correct position
Do you know any log(n) solution with Fenwick? :( Trick with binsearch is classic. In this case complexity is O(n log^3(n)), not O(n log^2(n)) (task E)
Hello
Can someone help me find the error in my submission : 273811843 it showed runtime error in test 6.
Maybe my solution for Problem D is the most simply one with only 30 lines. Just use dfs(p) to refer how many can be offered by all p's children. This solution is very fast Code
Bro used one liner condition to handle so that it looks pretty short, however it doesn't count as "most simply one"., as it is nothing more than greedy approach most people have known.
I solved problem D with a basic dfs traversal.https://mirror.codeforces.com/contest/1997/submission/273877479
I have a doubt in F. Can you please explain where am I wrong. After placing all chips on the line after doing some operations we always end up with 1 or 2 chips finally. So, cost is always 1 or 2.
. Proof - Because lets say i>2 have a even number of chips at i. Lets say 4, now by op 1 it becomes 1 3 1 Op-2 — 0 2 2 Op-2 — 1 1 1 Op-2 — 0 0 2 Like this every even pile can be reduced to 2. And every odd pile to 1. After this, to ones at diff pos can be combined into 1 or 2. 10001 goes to 110111 after few 1st operations. Again this can be combined and made into 1 or 2.
So, how can there be a solution for m>2
E and F is so dificult, i had no idea for this ;-;
Can anyone tell me what is wrong with this solution for problem D, i got a overflow error even though everything is in long long 273894619
I have one doubt regarding Problem E. In this problem when i used segmented tree my solution exceeded the time limit in test case 12 but when i did the same with fenwick tree it passed . Can anyone tell me the reason behind it? although the updation and query time complexity for both the data structure is same ,that is O(logN).273965828 Fenwick Tree , 273964980 Segmented tree.
fenwick tree has a better constant factor than segment tree.
Problem D can also be solved in
O(n)
by using basic Divide and Conquer. Submission: 274014881Thank you for such well detailed E problem solution BledDest.
I just wanted to know do problem E allows n(loglogn)logn^2, Cause I am getting TLE at testcase 7.
am I getting something wrong here in B: suppose input is this: n=7 ...x... ...x.x.
now wouldn't this configuration give us 3 connected region, even though answer for given setup is 0 (is 1 free cell alone a connected region?): ...xx.. ...x.x.