Idea: shorya1835 Preparation: wakanda-forever
Solution
Tutorial is loading...
Code
#include <bits/stdc++.h>
using namespace std;
using ll = long long;
int main(){
int tt;
cin >> tt;
while(tt--){
int n;
string s;
cin >> n >> s;
int ans = 0;
string p = s;
sort(p.begin(), p.end());
for(int i = 0; i < n; i++) if(s[i] != p[i]) ans++;
cout << ans / 2 << '\n';
}
return 0;
}
2140B - Another Divisibility Problem
Idea: wakanda-forever Preparation: wakanda-forever
Solution
Tutorial is loading...
Code
#include<bits/stdc++.h>
using namespace std;
int main(){
int t;
cin >> t;
while(t--){
int x;
cin >> x;
cout << 2*x<< '\n';
}
return 0;
}
Idea: shorya1835 Preparation: wakanda-forever
Solution
Tutorial is loading...
Code
#include <bits/stdc++.h>
using namespace std;
using ll = long long;
int main(){
int tt;
cin >> tt;
while(tt--){
int n;
cin >> n;
vector<long long> a(n);
for(int i = 0; i < n; i++){
cin >> a[i];
}
ll ans = 0;
for(int i = 0; i < n; i++){
if(i % 2) ans -= a[i];
else ans += a[i];
}
ll init = ans;
for(int i = 0; i < n; i++) ans = max(ans, init + i - (i % 2));
ll min_even = LLONG_MAX / 2, min_odd = LLONG_MAX / 2;
for(int i = 0; i < n; i++){
if(i % 2){
ans = max(init + i + a[i] + a[i] - min_even, ans);
min_odd = min(min_odd, i - a[i] - a[i]);
}
else{
ans = max(init + i - a[i] - a[i] - min_odd, ans);
min_even = min(min_even, i + a[i] + a[i]);
}
}
cout << ans << '\n';
}
return 0;
}
2140D - A Cruel Segment's Thesis
Idea: shorya1835 Preparation: shorya1835
Solution
Tutorial is loading...
Code
#include<bits/stdc++.h>
using namespace std;
typedef long long ll;
typedef vector<int> vi;
typedef vector<ll> vl;
typedef set<int> si;
typedef pair<int, int> pi;
typedef pair<ll, ll> pl;
#define fo(i, a, b) for (ll i = a; i < b; i++)
#define F first
#define S second
#define pb push_back
#define mp make_pair
int main() {
cin.tie(0)->sync_with_stdio(0);
cin.exceptions(cin.failbit);
ll T, n;
cin >> T;
while (T--) {
cin >> n;
vector<pl> a(n);
fo(i, 0, n)cin >> a[i].first >> a[i].second;
ll ans = 0;
fo(i, 0, n)ans += a[i].second - a[i].first + 1;
ans += n / 2;
vector<pair<ll, pl>> b(n);
fo(i, 0, n) {
b[i].second = a[i];
b[i].first = a[i].first + a[i].second;
}
sort(b.begin(), b.end());
if (n % 2 == 0) {
fo(i, 0, n) {
ans += a[i].second;
}
fo(i, 0, n / 2) {
ans -= b[i].first;
}
cout << ans - n - n / 2 << "\n";
}
else {
ll k = 0, w = 0;
fo(i, 0, n) {
w += a[i].second;
}
fo(i, 0, n / 2+1) {
w-= b[i].first;
}
fo(i, 0, n / 2 + 1) {
k = max(k, w + b[i].S.F);
}
w += b[n / 2].first;
fo(i, n / 2 + 1, n) {
k = max(k, w - b[i].S.S);
}
cout << ans + k - n - n / 2 << "\n";
}
}
}
2140E1 - Prime Gaming (Easy Version)
Idea: Divine_Spark Preparation: shorya1835
Solution
Tutorial is loading...
Code
#include <bits/stdc++.h>
using namespace std;
typedef long long ll;
int removebit(int n, int bit)
{
int msk = (1 << 22);
int filtermask = msk - (1 << bit);
return ((n >> 1) & filtermask) | (n & ((1 << bit) - 1));
}
const ll MOD = 1e9+7;
class mint
{
public:
ll val;
static ll mod_exp(ll a, ll b)
{
ll res = 1;
a = a % MOD;
while (b > 0)
{
if (b % 2 == 1)
res = (res * a) % MOD;
b /= 2;
a = (a * a) % MOD;
}
return res;
}
static ll gcdExtended(ll a, ll b, ll *x, ll *y)
{
if (a == 0)
{
*x = 0, *y = 1;
return b;
}
ll x1, y1;
ll gcd = gcdExtended(b % a, a, &x1, &y1);
*x = y1 - (b / a) * x1;
*y = x1;
return gcd;
}
static ll modInverse(ll a)
{
ll x, y;
ll g = gcdExtended(a, MOD, &x, &y);
g++;
ll res = (x % MOD);
if (res < 0)
res += MOD;
return res;
}
mint() { val = 0; }
mint(ll x)
{
val = x % MOD;
if (val < 0)
val += MOD;
}
mint &operator+=(const mint &other)
{
val += other.val;
if (val >= MOD)
val -= MOD;
return (*this);
}
mint &operator-=(const mint &other)
{
val -= other.val;
if (val < 0)
val += MOD;
return (*this);
}
mint &operator*=(const mint &other)
{
val = (val * other.val) % MOD;
return (*this);
}
mint &operator/=(const mint &other)
{
val = (val * modInverse(other.val)) % MOD;
return (*this);
}
mint &operator=(const mint &other)
{
val = other.val;
return (*this);
}
mint operator+(const mint &other) const { return mint(*this) += other; }
mint operator-(const mint &other) const { return mint(*this) -= other; }
mint operator*(const mint &other) const { return mint(*this) *= other; }
mint operator/(const mint &other) const { return mint(*this) /= other; }
bool operator==(const mint &other) const { return val == other.val; }
mint operator++()
{
++val;
if (val == MOD)
val = 0;
return (*this);
}
mint operator++(int)
{
val++;
if (val == MOD)
val = 0;
return mint(val - 1);
}
mint operator--()
{
--val;
if (val == -1)
val = MOD - 1;
return (*this);
}
mint operator--(int)
{
val--;
if (val == -1)
val = MOD - 1;
return mint(val + 1);
}
// ^ has very low precedence, careful!!
template <typename T>
mint &operator^=(const T &other)
{
val = mod_exp(val, other);
return (*this);
}
template <typename T>
mint operator^(const T &other) const { return mint(*this) ^= other; }
mint &operator^=(const mint &other)
{
val = mod_exp(val, other.val);
return (*this);
}
mint operator^(const mint &other) const { return mint(*this) ^= other; }
template <typename T>
explicit operator T() { return (T)val; }
template <typename T>
friend mint operator+(T other, const mint &M) { return mint(other) + M; }
template <typename T>
friend mint operator-(T other, const mint &M) { return mint(other) - M; }
template <typename T>
friend mint operator*(T other, const mint &M) { return mint(other) * M; }
template <typename T>
friend mint operator/(T other, const mint &M) { return mint(other) / M; }
template <typename T>
friend mint operator^(T other, const mint &M) { return mint(other) ^ M; }
friend std::ostream &operator<<(std::ostream &output, const mint &M) { return output << M.val; }
friend std::istream &operator>>(std::istream &input, mint &M)
{
input >> M.val;
M.val %= MOD;
return input;
}
};
const int maxn = 20;
const int maxm = 30;
ll modpow(ll a , ll b ) {
ll res = 1 ;
a %= MOD;
while( b > 0 ) {
if( b&1 ) res = ( res*a ) % MOD;
a = ( a*a ) % MOD;
b >>= 1 ;
}
return res ;
}
int main()
{ int t;
cin >> t;
while(t--){
bool pl = 1;
int n, m;
cin >> n >> m;
int k,x;
cin >> k;
vector<int> good;
for(int i=0;i<k;i++){
cin >> x;
good.push_back(x);
}
if(m==1){
cout << 1<< "\n";
continue;
}
vector<vector<vector<bool>>> dp(2, vector<vector<bool>>(n + 1));
int cntt = 0;
for (int i = 1; i <= n; i++)
{
dp[0][i].resize(1 << i);
dp[1][i].resize(1 << i);
if (i == 1)
{
for (int j = 0; j <= 1; j++)
{
for (int k = 0; k <= 1; k++)
{
dp[j][i][k] = k;
}
}
}
else
{
for (int msk = 0; msk < (1 << i); msk++)
{
bool ans0 = 1;
bool ans1 = 0;
for (auto x :good)
{
if (x <= i)
{
cntt++;
int finmsk = removebit(msk, x - 1);
ans0 &= dp[1][i - 1][finmsk];
ans1 |= dp[0][i - 1][finmsk];
}
else
{
break;
}
}
dp[0][i][msk] = ans0;
dp[1][i][msk] = ans1;
}
}
}
mint finans = 0;
for (int r = 0; r < (1<<n); r++)
{
finans += 1+dp[pl][n][r];
}
cout << finans << endl;
}
}
2140E2 - Prime Gaming (Hard Version)
Idea: Divine_Spark Preparation:wakanda-forever, shorya1835
Solution
Tutorial is loading...
Code
#include <bits/stdc++.h>
using namespace std;
typedef long long ll;
const ll MOD = 1000000007;
int removebit(int n, int bit)
{
int msk = (1 << 22);
int filtermask = msk - (1 << bit);
return ((n >> 1) & filtermask) | (n & ((1 << bit) - 1));
}
const int maxn = 20;
const int maxm = 30;
ll binpow(ll a,ll b,ll m = MOD){
ll res = 1;
while (b){
if (b&1) res = (res*a)%m;
a = (a*a)%m;
b>>=1;
}
return res;
}
int main()
{ int t;
cin >> t;
while(t--){
bool pl = 1;
int n, m;
cin >> n >> m;
int k,x;
cin >> k;
vector<int> good;
for(int i=0;i<k;i++){
cin >> x;
good.push_back(x);
}
vector<vector<vector<bool>>> dp(2, vector<vector<bool>>(n + 1));
vector<vector<vector<int>>> cnt(2, (n + 1, vector<vector<int>>(n + 1, vector<int>(n + 1))));
int cntt = 0;
for (int i = 1; i <= n; i++)
{
dp[0][i].resize(1 << i);
dp[1][i].resize(1 << i);
if (i == 1)
{
for (int j = 0; j <= 1; j++)
{
for (int k = 0; k <= 1; k++)
{
dp[j][i][k] = k;
}
cnt[j][1][1]++;
}
}
else
{
for (int msk = 0; msk < (1 << i); msk++)
{
bool ans0 = 1;
bool ans1 = 0;
for (auto x :good)
{
if (x <= i)
{
cntt++;
int finmsk = removebit(msk, x - 1);
ans0 &= dp[1][i - 1][finmsk];
ans1 |= dp[0][i - 1][finmsk];
}
else
{
break;
}
}
dp[0][i][msk] = ans0;
dp[1][i][msk] = ans1;
cnt[0][i][__popcount(msk)] += ans0;
cnt[1][i][__popcount(msk)] += ans1;
}
}
}
ll finans = 0;
for (int r = 1; r <= n; r++)
{
for(int k=1;k<=m;k++){
finans += (((binpow(k-1,n-r)*binpow(m-k+1,r))%MOD) * (cnt[pl][n][r]))%MOD;
}
}
cout << finans%MOD << endl;
}
}
Bonus
Solve the problem when piles must have distinct stones, $$$m \le 10^9$$$ and there is no constraint on sum of $$$m$$$.
Idea: shorya1835 Preparation: wakanda-forever, shorya1835
Solution
Tutorial is loading...
Code
#include<bits/stdc++.h>
using namespace std;
typedef long long ll;
typedef vector<int> vi;
typedef vector<ll> vl;
typedef set<int> si;
typedef pair<ll, ll> pl;
#define fo(i, a, b) for (ll i = a; i < b; i++)
#define pb push_back
ll gcd(ll a,ll b){
if(a==0)return b;
else return gcd(b%a,a);
}
vl a;
ll lc, s, n;
bool good(){
bool v=1;
if(n>23){
fo(i,0,n){
if(a[i]!=a[0]){
v=0;
break;
}
}
}
else{
fo(i,2,n){
fo(j,0,n){
if((a[j]%i)!=(a[0]%i)){
v=0;
break;
}
}
}
}
v&=(s%n==0);
return v;
}
int main() {
cin.tie(0)->sync_with_stdio(0);
cin.exceptions(cin.failbit);
ll T;
cin >> T;
while (T--) {
cin >> n;
a.clear();
a.resize(n);
s=0;
fo(i,0,n){
cin >> a[i];
s+=a[i];
}
sort(a.begin(),a.end());
lc=1;
if(n>23){
lc=1e10;
}
else{
fo(i,2,n+1){
lc=(lc*i)/gcd(lc,i);
}
}
if(good()){
cout << s<< "\n";
continue;
}
a[0]--;
s--;
if(good()){
cout << s<< "\n";
continue;
}
cout << -1 << "\n";
}
}
Thanks to satyam343 for his help throughout the round.








noooo I want yesterday's rating please this one killed me:(
noooo I want yesterday's rating please this one killed me:( The problem E in yesterday is easy and I hoped to increase much rating by that competition.
same
i did same for a but it give me wrong aaaaahhh
btw great tutorial like what i am thinking thanks
Awas very tricky for me to prove that such strategy gives minimum moves.yes..in low rating questions proving always more tricky than solving.
Questions were really good.
For the case 110100, we can take i = 1, j = 3, k = 4 and the string will become 111000 which is sorted but the answer given in test case is 2 steps
000111 is considered sorted not 111000
$$$f(m) = \sum_{i=0}^m {i^c \cdot (m-i)^{n-c}}$$$ is a polynomial of degree $$$n+1$$$ so just do polynomial interpolation.
That's for non distinct piles, bonus asks for distinct.
It's possible to do use some binomials which would still make it a polynomial of degree $$$n+1$$$ so the most braindead approach is just using some dp to solve for m from 1 to $$$2(n+1)$$$ and interpolating.
It's not that complicated, you can derive something closed form combined with the dp
My principle if that if I don't need to think I don't think.
bruh
instead of $$$f(m) = \sum_{i=0}^m i^c \cdot (m-i)^{n-c}$$$
it should be $$$f(m) = \sum_{i=0}^m \binom{i}{c} \binom{m-i}{n-c}$$$ which to me looks like the coeficient of $$$x^{m-n}$$$ in $$$\frac{1}{(1-x)^{c+1}} \cdot \frac{1}{(1-x)^{n-c+1}} = \frac{1}{(1-x)^{n+2}}$$$ so it should be $$$\binom{n+1+m-n}{n+1}$$$.
It might be off by one somewhere since I did it by just looking at the formulas without checking that carefully.
That's correct
Got clapped but round was nice, thanks !
lol
Hey can anyone tell me what is wrong in my approach. And please while pointing out my mistake, tell me how to think in the way to get to the approach mentioned in the editorial. It would be of great help to me. 337853452
Check it for this tc
Answer should be 3 (swap first and last element), but your code is giving 2 on this tc
Thanks for pointing that out. I faced this testcase during the contest only. I want to know how to build intuition for the solution as given in the editorial for C.
What's case 1 in editorial for F? Doesn't
[3, 0, 1]fall into case 1, but allow only 1 single operation?Array $$$a$$$ is assumed to be sorted in the blog, updating the blog
337862621
any clue why my code keeps wrong? give me hint plz
If you look at the failing 26th test case, it says:
3 2 2 1
You can see where your logic has fault. Hint: If you do mx-mn then you are also doing index(mx) — index(mn). This will be correct when mx lies to the right of mn, but that's not the case here.
For B , I just saw the consistent occurances of 12/3 , 102/3 , 1002/3 , then i realized that 3 would always divide the numbers having sum of digits factors of 3 , so therefore 2.x is the best solution here
I solved B by observing the pattern and got
y = 10^9 - 1 - xalways works, can anyone prove thisSomeone did: https://mirror.codeforces.com/blog/entry/146147?#comment-1307743
Yes! let we have chosen y ...d=(length of y****)+1,,and mod=x+y. Then (x#y) = x*(10^d)+y.Then x#y is divisable by x+y;it will hold iff (x*(10^d)+y)%mod=0 i.e (x*(10^d)%mod+y%mod=0 as mod>x then y%mod=y so (x*(10^d))%mod must be x to hold the criteria of (x#y)%mod=0; we already know mod>x and mod>y; so if we can make 10^d %mod==1 we can fillup the criteria....so it will become 1 when mid=10^d-1 i.e x+y=10^d-1;.....thus it is ovbious for every d<i<=9; max value of d=9,,,,we can choose choose y=999999999-x everytime and it will pass every testcase!**____**
We want
We know
Hence for the above to be zero, we need
We also know
Therefore
A number of the form 10^k - 1 will always give remainder 1.
As we want y to have the same number of digits as k, we take the largest possible k, which is $9$.
2140B - Another Divisibility Problem can be solved with other values of $$$y$$$ as well, for example $$$y = 999... - x$$$ depending on $$$x$$$. Because,
$$$x \text{#} y = x \cdot 10^d + y = x \cdot (10^d − 1) + (x + y)$$$
So, to make the $$$10^d - 1$$$, which is basically $$$9999...$$$ for $$$d$$$ times, to be divisible by $$$x + y$$$, we can make $$$x + y$$$ itself be of this shape. For $$$x$$$ to be any $$$k$$$-digit number, $$$x + y$$$ can be of shape with any $$$k$$$ or more digits of $$$9$$$ s. Since the constraints state that $$$x \lt 10^8$$$ and $$$y \lt 10^9$$$, we can set $$$x + y$$$ to be $$$999,999,999$$$ i.e. $$$y = 10^9 - 1 - x$$$.
Also, obviously, $$$y = 8 \cdot x$$$ works for the same reason as tutorial.
Upd: can anybody explain why the downvotes? Cause I'm pretty sure there's nothing wrong in terms of logic cause I literally got AC with this approach. 337798106
E2 Bonus Solution:
Idea:
From this, we could easily compute the number of permutations of $$$n!$$$ with $$$ans = x$$$ (for every x from 1 to N) (i.e.) $$$(ans \ge x) - (ans \ge (x+1))$$$
For a general $$$M \ge 1e9$$$:
If we fix a integer $$$x$$$ as our final answer: For every r (1 to N), Say r is relative ordering of x in the array, (x is rth smallest element in the array)
We need to sum the following:
If observed carefully, $$$\sum x * C(x-1,r-1) * C(m-x,n-r)$$$ when expanded on variable x, it is $$$\sum$$$ ((n+1)^th degree polynomial) , so this is a polynomial of degree n+2.
We can use interpolation, compute n+3 datapoints, and determine the value at $$$x = m$$$ in $$$O(n^2)$$$
So total timecomplexity $$$O(n 2^n + n^3)$$$ which would pass comfortably.
Do let me know if I have made any errors, mistakes, thank you.
As $$$n$$$ is small, we can avoid interpolation using this idea.
For even n, suppose that we have chosen the n/2 segments from which we take the start of the new formed segments. Call this set of segments S1 and the remaining segments S2. Then swapping any segments between these 2 sets cannot increase the answer. On solving the inequality, we get the condition that (l+r) for any segment in S2 > (l+r) for any segment in S1.
What's wrong with my code ? 337868493
Love the reference to my favorite song — "A Cruel Angel's Thesis" by Yoko Takahashi ‧ 1995
"Zankoku na Tenshi no Teeze" from Neon Genesis Evangelion
My solution to Problem D works as follows: the original problem is equivalent to finding a matching among the intervals $$$1, \cdots, n$$$ that maximizes the value
Using the identity $$$\max(A, B) = \frac{A + B}{2} + \frac{|A - B|}{2}$$$, this expression can be rewritten as
For even $$$n$$$, the first term is constant (equal to half the total sum of the lengths of the intervals), and the second term is maximized by partitioning the array $$$a[j] = r_j + l_j$$$ into two halves and subtracting the sum of the smaller half from the sum of the larger half. This can be computed efficiently using prefix sums in $$$\mathcal{O}(1)$$$ time per query after preprocessing.
For odd $$$n$$$, one interval (say $$$i$$$) remains unmatched. We enumerate each $$$i = 1, \cdots, n$$$ as the unmatched interval, and the value in equation $$$(\star)$$$ can be easily evaluated in $$$\mathcal{O}(1)$$$ time using a similar prefix sum approach with minor modifications, resulting to an $$$\mathcal{O}(n)$$$ solution.
This method is basically identical to the one in the editorial, but more clear to me.
the second problem was a humbling experience for me
can someone explain this part This can be solved by maintain prefix minimum over odd and even indexes.
https://mirror.codeforces.com/contest/2140/submission/337817036
for problem E,
dp[i][mask][1]is actually the same asdp[i][~mask][0]; you can just maintain onecan someone help me understand, why this code of mine for problem C is giving wrong answer, I am not able to understand.
For problem C, I used a DP approach. Surprisingly, when I implemented it with a regular array (initialized using memset), it resulted in TLE. However, switching to a map for memoization led to an AC. Could someone explain why this happens?
You initialize the whole DP table with $$$8 \cdot 10^5$$$ elements for every test case. Since there are $$$10^4$$$ test cases at most, you're overwriting $$$8 \cdot 10^9$$$ elements in sum, which is too slow.
Oh, I see now. This means my array-based solution ends up doing about $$$2 \cdot 10^5$$$ operations for each test case, which ignores the fact that "the sum of $$$n$$$ over all test cases does not exceed $$$2 \cdot 10^5$$$". In contrast, the map-based solution only stores and processes the states that are actually needed, so its complexity is closer to $$$O(n)$$$ overall. Is that correct?
You're correct. You don't need the whole DP array for a test case with smaller $$$n$$$. If you are given $$$n$$$ for a test case, make sure you only touch $$$O(n)$$$ elements; then your solution won't time out.
I see, thanks!
I have faced so many WAs over the years for this reason that I always take extra care when clearing/initializing arrays. Always clear only n+5 (or 2n + 5) elements, don't touch higher indices since you don't need them.
You'll get used to it haha. Good luck!
Hello Osama, i want to show you a trick you can use when you come across dp problems that involve test cases, as someone previously replied clearing your dp each case is too slow, but also the map memo isn't the best practice when it comes to this sort of problems.
There are two main ways to to this better, first is to give each test case an id, and to distinguish if you have been in this state before in the same test case you check if vis[state] equals the current id. 338980284
Another way is to clear exactly the size you need for this test case, after taking input n, loop and set it to the value you want.
338981249
Hope this helps.
Thanks a lot, Adham — that was really helpful. I like how these approaches let us stick with arrays instead of maps and still avoid TLE. There’s just one thing I’m not fully clear on: in the first solution, the
visarray isn’t initialized. Is it safe to compare its values like that? If not, then initializing it once likeint vis[(int)(2e5 + 5)][2][2]{};should be enough, right?Welcome , glad to help.
When an array is declared globally it’s initialized with zeros, same for integers.
Oh, that’s new to me — I thought it would be initialized with garbage like local arrays. Appreciate the clarification, bro :)
where balanced paranthesis come from in d editorial??? (*cry emoji)
How can u directly find out left out segment in O(1) in D editorial ??
Oh WOW, that's a fast editorial.
Loved the problems, and I FINALLY became a specialist.
I got the idea of C, but for those who didn't, I think the change in the statement of C gave it away. B was a little hard tho
2140B extended The editorial was prepared together with my coach, after he tried during the contest to brute-force all divisors of $$$10^k-1$$$ and $$$x$$$ and ran into TLE. Submission.
I like how noone mentioned that question 2140D was just an evangelion reference
bro I didn't got yesterday's contest's rating
This is what I got for B and it got accepted lol. If anyone could lmk why this works that would be cool. I was basing this off the fact that both these numbers will be divisible by 3 as both their digit sums would be divisible by 3 and just kinda tested from there and it worked:
What is wrong in this solution for D:
First we initially compute the sum of all segments , Initial_sum = $$$S = \sum_{i=1}^{n} y_i - x_i$$$ then we want to compute the additional segments length formed by marking these segments, if we sort the $$$l_i$$$'s and $$$r_i$$$'s and then take the sum of (highest $$$r_i$$$ — least $$$l_i$$$) + (2nd highest $$$r_i$$$ — 2nd least $$$l_i$$$) + ..... and add this sum to the Initial_sum
You probably mean the highest r_i — least l_j for distinct i and j. The problem arises in the case when a segment with the largest r_i contains all other segments. In that case we have a choice between r_i of largest segment — second highest l_i and second highest r_i — l_i of largest segment.
Consider these segments: [1, 100], [5, 90], [98, 99]. You select the first two, but the optimal way is to take the first and the third segments.
Okay yeah got it.. thanks!
Can someone please explain the solution of E2 after calculating the dp values. How is it calculating the original answer from binary(0, 1)?
For a configuration where alice wins in easy version with the bits that are 1 here are values that are >= k (for some k we are looping through) and other(0) values are < k then Alice can Force Bob into making the final Answer >=k as it forced Bob into making the final value 1 for the corresponding 0-1 mask. so we just need to count for every k the sum of masks * no of ways to generate this mask (1 filled by >=k value and 0 filled by <k values)
(k−1)^n−r⋅(m−k+1)^r⋅cnt[r]
isn't this line just the number of ways to create a mask with (n-r) places with < k and r places with >=k, cnt[r] times. So then how how are we accounting for the sum of the actual values left at last? What am i missing here?
it is clever way of doing things, it is basically >=1 + >=2 + >= 3 ... which can be re written as 1*(==1) + 2*(==2) + 3*(==3)... which what we are required to find.
You can think of this approach : let's choose y=k*x; Now you can so this :
// you can use all values generated below. you can change 1000 to your number // for 1000 these are valid : 2 8 26 36 100 302 908 // basicaaly formula is to count = 10k/k+1 should be divisible. here 10k is concatination of 10 and k // but here y has limit that can atmost 10*x so we can use only 2 and 8 in this code.
// void solve(){ // ll num=10; // for(ll i=1;i<=1000;i++){ // if(i/num>0) num=num*10; // ll x=num*10+i; // if(x%(i+1)==0){ // cout << i << " "; // } // } // cout << endl; // } ~~~~~ Your code here... ~~~~~
For problem D, a very "neat" way of reaching the greedy conclusion is by expanding the formula of the chosen segments' sum. Let's name the of chosen segments $$$C$$$ and the rest be $$$R$$$.
Our formula is :
If we manage to eliminate contribution of group $$$R$$$ from this equation it will be easy to select group $$$C$$$ to maximize this expression. To do so we can expand the second term, based on this substitution :
Where $$$n$$$ is the input size, by substitution our formula becomes :
Now it's clear to see that maximizing this expression is equivalent to maximizing sum of right and left of a segment, since summation of all lefts is constant.
thanks for the straight forward explanation
Glad to help.
Can anyone explain what they mean in the solution for problem C when it says
This can be solved by maintain prefix minimum over odd and even indexes."
This is very confusing to me.
"In Problem D editorial it says
you can directly find the left out segment in O(1)— can someone explain how to actually do that part?"When I write something/2, I mean smaller one (etc. 7/2=3), I do not know how to write that type of brackets in comment.
Well, when n is odd, from the first sum (with all r) you need to take away one r and n/2 l+r. So, you have n choices for taking away r. Lets sort aray by l+r. Now, find a sum of all r minus sum of first n/2 l+r, I called that difference tmp. If that choice is NOT in first n/2 elements, you should just take away r. You find a minimum the same way you would normally do in an array, just with mathematical expression tmp-r.
Now, when you search in first n/2 elements, first you take away l+r from tmp (before taking away r). But now, you substracted n/2-1 l+r's. That means you have to substract l[n/2]+r[n/2] too.
Thats it — you can count the sum for some i in O(1) because you precalculated some "part" of the sum in tmp. You just have to be careful when examining if element is in first n/2 elements or not. For this I used two for loops because ifs looked messy.
Hope I made it clear. :) Here is the code that worked.
After removing the first $$$\lfloor n/2 \rfloor$$$ elements with the smallest $$$l_i+r_i$$$, we need to remove the element with the smallest $$$r$$$ value among the elements contributing their right end or you can keep this element and remove some element from the first $$$\lfloor n/2 \rfloor$$$ elements (It's the one with the highest $$$l$$$ value).
Here's a brainf*ck solution of 2140B - Another Divisibility Problem for no reason
(Assuming all cells are 32-bit instead of 8-bit as normal, if someone can implement an 8-bit solution I'm more than excited to see it)
I don't understand the complexity in E1, isn't is supposed to be $$$O(n^2 \cdot 2^n)$$$, because of the for loop over "good" vector?
$$$ \sum_{i=1}^{n} 2^i = 2^{n+1}-2$$$
Thank you!!!
taskkill /f /im libmain.exe && taskkill /f /im xhost.exe
Hello, My account was recently flagged for cheating in this round. I would like to clarify my situation.
I did not share my code with anyone, nor did I copy from others. I usually keep my solutions locked, but I understand now that similarity in code can happen naturally since many problems have standard approaches. This might have caused suspicion.
I respect Codeforces and its rules, and I never intended to violate them. If my actions caused unintentional leakage or misunderstanding, I sincerely apologize. I will be more careful in the future to avoid such issues.
I kindly request the admins to review my case. Thank you for your time and understanding.
Similar problem for B from Project Euler. Editorial
In the editorial for problem D, I dont understand:
"The sequence of segments selected can also form a valid balanced parenthesis sequence, and thus a valid pairing."
"If the resulting sequence is not a valid balanced parenthesis sequence, then we can always improve it by swapping a segment from the left end to the right end and another from the right end to the left end."
My question being
how we can swap segments without changing the assumed best possible answer in the case that the sequence is not a balanced parenthesis sequence?
Thanks a lot in advance!
I am not sure, what your question is, can you elaborate?
I understand that part of the editorial
When we finish calculating the best answer from the approach laid out in the editorial, we do not consider whether we can form a valid pairing or not, because, as stated in the editorial, that is always the case.
And then the editorial explains why that is always the case, by first assuming we somehow have an invalid pairing at first. After which, we can fix that invalid pairing, step by step, by swapping a segment from the left-end group with a segment from the right-end group.
When swapping 2 segments: $$$i$$$ from left-end group and $$$j$$$ from right-end group, then $$$ans$$$ for the new pairing should be changed by $$$l_{j}+r_{i}-l_{i}-r_{j}$$$. And so, my question being: how is it possible to always find such a swapping sequence to transform an invalid pairing to a valid pairing, with the net change for $$$ans$$$ is $$$0$$$?
Sorry if I misunderstood the editorial. And please excuse my English as well.
The editorial gives the best possible upper bound for the answer, the claim is that if the bracket sequence formed for this answer is not valid, then that implies there is some right end that comes before its left end, which we can swap, but this swap leads to the answer being improved, thus, going above the upper bound, which is not possible.
As for why a right end comes before a left end, if the sequence is not valid, that is implied from the definition of a regular bracket sequence (prefix sum has to be $$$ \ge 0$$$).
Ah, I got it now, thanks a lot!
Problem C: I'm not quite sure how you're meant to use prefix sums. Can anyone explain?
I instead (338651248) used an approach along the lines of "for each + element, what's the best — element to swap it with?". Explaining exactly how I did it is difficult, but essentially it works by considering how the distances (i.e. abs(l-r)) between + elements and — elements change as you iterate over the + elements.
For problem F, is y taken to be the absolute value of x — floor(x/k)*k? I'm not sure if it's ever possible to get infinitely decreasing sum otherwise.
for problem "another divisibility problem" doesnt x#x % x = 0? so returning x always works, nowhere in the problem can I find something that forbids you from doing that. it can be mathematically proven that x#x % x = 0:
let x be = 42 for example
4242 % 42 = (42 *10^2 + 42) % 42
42(10^2+1) % 42
this equals to zero since 10^2 +1 is an integer, that number multiplied by 42 is divisible by 42. this works for all numbers as long as 2 is substituted by the number of digits x has.
It's x#y % (x+y) == 0 that they're asking, not x#y % x == 0 though
I think there's a slight logical error in Case 2 in the editorial for F. The editorial asserts that if any operation can be performed on the last n-1 elements of the array, then performing that operation will take us to Case 1. This is not true when the operation only modifies one element and that element is equal to the minimum of the array. For example, consider the array [1, 1, 3, 3]. We can perform an operation on the three larger elements, which makes the array [0, 1, 3, 3], which does not fall under Case 1.
The reason the model solution still works is that if this happens, then we can perform the same operation again, replacing the element that was decreased with the original minimum element of the array. At this point, we have two occurrences of the original minimum — 1, so we have an element other than $$$a_1$$$ with the opposite parity of the elements of the original array, putting us into Case 1.
Yes, i am aware of that case, in my rough editorial i had written sometime ago, operation wasn't singular but plural. I had mistakenly changed it, when formalizing it. Thanks for pointing it out.
When receiving the following input, the given code for problem F outputs
-1, but I cannot figure out a way to achieve a sum of less than $$$12$$$. Maybe there's still some special cases when $$$n=3$$$?Do operation on whole array to get a=[1,1,10], after that you can repeat the operation 1st, 3rd element and 1st,2nd elements infinitely.
Sorry, it seems that I misunderstood the problem. I thought it was "decrease the y-th element", haha. Thank you for helping out!
I don't understand this :
Why is the result calculated this way?
what is wrong with this approach for C-