It has been a wild ride the final 24 hours in the preparation of this contest! And we really hope you liked the problemset we gave today!
1737A - Эла распределяет книги
Author: low_
...
void execute(int test_number)
{
cin>>n>>k>>str;
vector <int> count_char(26, 0);
for (char c: str) count_char[c - 'a']++;
string ans = "";
for (int i = 0; i < min(25, n/k); i++) {
while (k - ans.size() > count_char[i]) {
ans.push_back(i + 'a');
}
}
char c = 'a' + min(n / k, 25);
while (k > ans.size()) {
ans += c;
}
reverse(ans.begin(), ans.end());
cout << ans << "\n";
}
...
1737B - Фитнес Элы и роскошные числа
Author: low_
...
ll l, r;
ll bs_sqrt(ll x) {
ll left = 0, right = 2000000123;
while (right > left) {
ll mid = (left + right) / 2;
if (mid * mid > x) right = mid;
else left = mid + 1;
}
return left - 1;
}
// main solution goes here:
void execute(int test_number)
{
cin >> l >> r;
ll sql = bs_sqrt(l), sqr = bs_sqrt(r);
ll ans;
if (sql == sqr) {
ans = 0;
for (int i = 0; i < 3; i++) {
if (l <= sql * (sql + i) && sql * (sql + i) <= r) ans++;
}
} else {
ans = (sqr - sql - 1) * 3;
for (int i = 0; i < 3; i++) {
if (l <= sql * (sql + i) && sql * (sql + i) <= r) ans++;
if (l <= sqr * (sqr + i) && sqr * (sqr + i) <= r) ans++;
}
}
cout << ans << "\n";
}
...
1737C - Эла и сверчки
Author: low_
...
int n;
int x[3], y[3];
int u, v;
pii centralSquare() {
int a = (x[0] == x[1]) ? x[0] : x[2];
int b = (y[0] == y[1]) ? y[0] : y[2];
return {a, b};
}
// main solution goes here:
void execute(int test_number)
{
cin>>n;
for (int i=0; i<3; i++) cin>>x[i]>>y[i];
cin>>u>>v;
int cx = centralSquare().first, cy = centralSquare().second;
if ((cx == 1 || cx == n) && (cy == 1 || cy == n)) { // "corner" case, literally
// the crickets can only reach coordinates within the edges that already contains at least 2 crickets,
// which contains the centralSquare of the L
cout << ((u == cx || v == cy) ? "YES\n" : "NO\n");
} else {
if ((cx + cy) % 2 == (u + v) % 2) {
cout << (cx % 2 == u % 2 ? "YES\n" : "NO\n");
} else { // can be prove to always reach, since we have ways to align 2 crickets in the same diagonal as target
cout << "YES\n";
}
}
}
...
1737D - Эла и волшебник
Author: low_
#include <bits/stdc++.h>
#define ll long long
#define db long double
#define ull unsigned long long
#define x first
#define y second
#define mp make_pair
#define pb push_back
#define all(a) a.begin(), a.end()
using namespace std;
#define pper(a) cerr << #a << " = " << a << endl;
void per() { cerr << endl; }
template<typename Head, typename... Tail> void per(Head H, Tail... T) { cerr << H << ' '; per(T...); }
template<class T> bool uin(T &a, T b) { return a > b ? (a = b, true) : false; }
template<class T> bool uax(T &a, T b) { return a < b ? (a = b, true) : false; }
template<class U, class V>
ostream& operator<<(ostream& out, const pair<U, V>& a) {
return out << "(" << a.x << ", " << a.y << ")";
}
template<class U, class V>
istream& operator>>(istream& in, pair<U, V>& a) {
return in >> a.x >> a.y;
}
template<typename W, typename T = typename enable_if<!is_same<W, string>::value, typename W::value_type>::type>
ostream& operator<<(ostream& out, const W& v) { out << "{ "; for (const auto& x : v) out << x << ", "; return out << '}'; }
template<class T>
void readArr(T from, T to) {
for (auto i = from; i != to; ++i) cin >> *i;
}
mt19937 mrand(1337);
unsigned int myRand32() {
return mrand() & (unsigned int)(-1);
}
unsigned ll myRand64() {
return ((ull)myRand32() << 32) ^ myRand32();
}
const int mod = 1000000007;
void add(int& a, int b) {
a += b; if (a >= mod) a -= mod;
}
void dec(int &a, int b) {
a -= b; if (a < 0) a += mod;
}
int mult(int a, int b) {
return a * (ll)b % mod;
}
int bp(int a, int b) {
int res = 1;
while (b > 0) {
if (b & 1) res = mult(res, a);
a = mult(a, a);
b >>= 1;
}
return res;
}
const int N = 507;
ll f[N][N];
int main(){
#ifdef LOCAL
freopen("N_input.txt", "r", stdin);
//freopen("N_output.txt", "w", stdout);
#endif
ios_base::sync_with_stdio(0);
cin.tie(0);
int t;
cin >> t;
for (int a = 0; a < t; ++a) {
int n, m;
cin >> n >> m;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
f[i][j] = 1e18;
}
f[i][i] = 0;
}
vector<tuple<int, int, int> > ed;
for (int i = 0; i < m; ++i) {
int u, v, w;
cin >> u >> v >> w;
ed.pb(make_tuple(u - 1, v - 1, w));
f[u - 1][v - 1] = 1;
f[v - 1][u - 1] = 1;
}
for (int k = 0; k < n; ++k) {
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
f[i][j] = min(f[i][j], f[i][k] + f[k][j]);
}
}
}
ll ans = 1e18;
for (auto x : ed) {
int u = get<0>(x);
int v = get<1>(x);
int w = get<2>(x);
// per(ans, u, v, w);
ans = min(ans, (ll) w * (f[0][u] + f[n - 1][v] + 1));
ans = min(ans, (ll) w * (f[0][v] + f[n - 1][u] + 1));
// per(ans, u, v, w);
for (int i = 0; i < n; ++i) {
ans = min(ans, (ll) w * (f[v][i] + 1 + f[i][0] + f[i][n-1] + 1));
ans = min(ans, (ll) w * (f[u][i] + 1 + f[i][0] + f[i][n-1] + 1));
}
}
cout << ans << '\n';
}
}
1737E - Эла идет в поход
Author: ngfam_kongu
...
// data preprocessing: (e.g.: divisor generating, prime sieve)
ll POW2[mn];
void preprocess()
{
POW2[0] = 1;
for (int i = 1; i < mn; i++) POW2[i] = POW2[i — 1] * 2 % mod;
}
// global variables:
ll n;
ll POW(ll u, ll v) {
if (v == 0) return 1;
ll mid = POW(u, v / 2);
mid = (mid * mid) % mod;
return (v & 1) ? (mid * u % mod) : mid;
}
// main solution goes here:
void execute(int test_number)
{
cin>>n;
if (n == 1) {
cout << "1\n";
return;
}
vector <ll> ans(n + 1, 0), sufsum(n + 1, 0);
sufsum[n] = ans[n] = POW(POW2[(n — 1) / 2], mod — 2);
for (int i = n — 1; i > 1; i--) {
ans[i] = POW(POW2[(i + 1) / 2], mod — 2);
if (2 * i <= n) ans[i] = ans[i] * (1 — sufsum[i * 2] + mod) % mod;
sufsum[i] = (sufsum[i + 1] + ans[i]) % mod;
}
for (int i = 1; i <= n; i++) cout << ans[i] << "\n";
}
...
1737F - Эла и GCD и простые числа
Author: blobugh
Observation: Assume that x is composite number and divisor of n. Among all the multiples of x, the number of the divisor of n must be less than or equal to m/2.
First, factorize n. Assume that w is divisor of n. If w is in the form of a^4, a^3b^2, or a^2b^2c^2, it can be proved that there is no answer. Otherwise, there can be two cases.
If the possible maximum exponent of prime factor is 2, place the divisors like this: 1 a^2b^2 b a^2b b^2 ab a^2 ab^2 a / 1 a^2 a. And expand the sequence as follows:
- Repeat the current sequence twice — 1 a^2b^2 b a^2b b^2 ab a^2 ab^2 a 1 a^2b^2 b a^2b b^2 ab a^2 ab^2 a / 1 a^2 a 1 a^2 a.
- Multiply the odd-indexed elements of first half and the even-indexed elements of second half by the new prime factor. Index 1 is exception — 1 a^2b^2 bc a^2b b^2c ab a^2c ab^2 ac c a^2b^2c b a^2bc b^2 abc a^2 ab^2c a / 1 a^2 ab b a^2b a.
- If more prime factor exists, jump to "Otherwise".
Otherwise, place the divisors like this: 1 a^3 a a^2 / 1 a. Now the exponents of other prime factors are all 1, and we can expand the sequence as follows:
- Repeat the current sequence twice — 1 a^3 a a^2 1 a^3 a a^2 / 1 a 1 a.
- Multiply the even-indexed elements of first half and the odd-indexed elements of second half by the new prime factor — 1 a^3b a a^2b b a^3 ab a^2 / 1 ab b a. 3-1. If the maximum exponent is 3, swap a and b — 1 a^3b b a^2b a a^3 ab a^2 3-2. Otherwise, swap b and ab — 1 b ab a.
Like this, we can expand the sequence
#import<bits/stdc++.h>
#define endl '\n'
using namespace std;
int m, t, b[18], check[18], cnt[5];
vector<int>v;
vector<vector<int>>a;
void initialize(int m)
{
fill(b, b + m + 1, 0);
fill(check, check + m + 1, 0);
fill(cnt, cnt + 5, 0);
v.clear();
a.clear();
}
void insert1(int p1, int c1)
{
v[p1] = c1;
a.push_back(v);
}
void insert2(int p1, int p2, int c1, int c2)
{
v[p1] = c1;
v[p2] = c2;
a.push_back(v);
}
void f1(int x)
{
int n = a.size();
for(int i = 0; i < n; i++)a.push_back(a[i]);
for(int i = 0; i < n; i += 2)
{
a[i + 1][x] = 1;
a[i + n][x] = 1;
}
swap(a[n / 2], a[n]);
}
void f2(int x)
{
int n = a.size();
for(int i = 0; i < n; i++)a.push_back(a[i]);
for(int i = 1; i < n; i += 2)
{
a[i + 1][x] = 1;
a[i + n][x] = 1;
}
a[n][x] = 1;
}
void f3(int x)
{
int n = a.size();
for(int i = 0; i < n; i++)a.push_back(a[i]);
for(int i = 0; i < n; i += 2)
{
a[i + 1][x] = 1;
a[i + n][x] = 1;
}
swap(a[n], a[2 * n — 1]);
}
int main()
{
ios::sync_with_stdio(0);
cin.tie(0);
for(cin >> t; t--;)
{
cin >> m;
for(int i = 1; i <= m; i++)
{
cin >> b[i];
cnt[min(b[i], 4)]++;
}
if(cnt[4] || cnt[3] >= 2 || cnt[3] && cnt[2] || cnt[2] >= 3)
{
cout << -1 << endl;
initialize(m);
continue;
}
for(int i = 1; i <= m; i++)v.push_back(0);
a.push_back(v);
if(cnt[2])
{
int p1 = -1, p2 = -1;
for(int i = 1; i <= m; i++)
{
if(b[i] == 2)
{
if(~p1)p2 = i - 1;
else p1 = i - 1;
}
}
if(~p2)
{
insert2(p1, p2, 2, 2);
insert2(p1, p2, 0, 1);
insert2(p1, p2, 2, 1);
insert2(p1, p2, 0, 2);
insert2(p1, p2, 1, 1);
insert2(p1, p2, 2, 0);
insert2(p1, p2, 1, 2);
insert2(p1, p2, 1, 0);
check[p1 + 1] = check[p2 + 1] = 1;
}
else
{
insert1(p1, 2);
insert1(p1, 1);
check[p1 + 1] = 1;
}
for(int i = 1; i <= m; i++)
{
if(check[i])continue;
if(a.size() % 2)f2(i - 1);
else f3(i - 1);
}
}
else
{
if(cnt[3])
{
int p = 0;
for(int i = 1; i <= m; i++)
{
if(b[i] == 3)p = i - 1;
}
insert1(p, 3);
insert1(p, 1);
insert1(p, 2);
check[p + 1] = 1;
}
else
{
insert1(0, 1);
check[1] = 1;
}
for(int i = 1; i <= m; i++)
{
if(!check[i])f1(i - 1);
}
}
for(auto &v: a)
{
if(*max_element(v.begin(), v.end()))
{
for(auto &p: v)cout << p << ' ';
cout << endl;
}
}
initialize(m);
}
}
1737G - Эла на уроке танцев
Author: magnified
Auto comment: topic has been updated by low_ (previous revision, new revision, compare).
Do you have the Proof for C, that 'somehow' they can reach point (x, y) ?
How did you come to this conclusion ?
C is the worst competitive programming problem I've ever seen.
Glad that you learn something new today!
learnt what? casework? man, make better problems.
You've not learned it enough if you cannot do it on your own. I can never stressed enough how caseworking is so so important, even for the simplest tasks!
A word from a guy had multiple years of exp in the workforce ^_^
you stressed enough today
This task has purely no sense in case of competetive programming. One might say that corner cases have really important meaning in development of larger projects but its weird and utterly useless to make a cp task based only on corner cases. Every task should have at least some creative idea and should not be straight forward annoying paperwork. I find it really sad that instead of taking critique and trying to improve you just fixed your opinion on casework being cool.
I want to add that I indeed consider how hard it is to come up with good and creative problems but it is no excuse to be closed to opinion of everyone else. I really hope that next time you will be able to come with amazing problemset to everyone's liking! (Today I really enjoyed problem D but problems up to D were honestly not to my taste)
It's actually a task based on immutability properties in game theory & discrete math. The idea is based on a "Grasshopper" chess game on Chess.com, and I came up with the idea with my co-workers in the brainstorming sessions and had them head scratched for the whole afternoon!
The only case working was with "corner" cases (which is pretty direct tbh). So obviously, it's not a task "based on" corner cases.
I think you're upset about how the problem slows you down. But hey, ppl wonder what if they had done better all the time. =)
Auto comment: topic has been updated by low_ (previous revision, new revision, compare).
another day to be sad
same my friend, same
No, we didn't. Please, don't make contests again.
Can't agree more
why are all the critiquing comments removed?
we aren't on web3 yet
Someone removed the level 1 comment, and then brought the whole subthread down to the void :D
The person who deleted the comment probably prevented long comments war thread XD so it might be for a good reason
Is there any efficient way to count the number of [n] partitions with each part <k elements? What I am thinking for E is that:
Conditions for the i-th ant to win:
I was also thinking along these lines, but this misses a crucial point. We don't need the ant $$$i$$$ to be bigger than all of the ants going to the right on its left because ant $$$i$$$ will get bigger each time it eats a new one! For example, if the ant has size $$$3$$$ and it's facing a series of ants of sizes $$$2, 4, 5, 10$$$, it will end up surviving.
So the relevant combinatorial question isn't the number of ordered partitions of $$$[n]$$$ with each part containing $$$\leq k$$$ elements, but rather something like "in $$$n-k$$$-bit binary string, what's the probability there's a run of length at least $$$r+k$$$ starting at position $$$r$$$"?
OMG you are right... I missed that observation :-(
my head sucks :).
It is probably due to floating point inaccuracy for large numbers.
From the tutorial:
Also note that, since large numbers can cause inaccuracies in floating point computation, we should use binary search to find the floor-value of a square root, instead of using the sqrt function in any language.
but the error is not happening in pc.
Probably your Python environment or version or something is different from what Codeforces uses in the judge environment. It's possible that your PC would fail a test case that succeeds on Codeforces.
In general, you need to be aware of such floating-point in accuracies for large numbers.
yes i used integer sqrt.. it is accepted now.
Accept the flaw and learn from it. Never repeat it.
yeah you are right
If we don't use binary search in B problem, the answer would overflow right? (in a very "long long" test case)
https://mirror.codeforces.com/blog/entry/107722?#comment-960613
Ohh alright, the precision error was the problem here. Thanks!
Sad to use sqrt instead of sqrtl in problem b ಥ‿ಥ
huge gap between C and D
B: How come
((ll)sqrt(x) * (ll)sqrt(x))
be greater thanx
?AC: 175053872 WA: 175045921
Floating number precision loss.
Maybe you could use
sqrtl(x)
to get $$$\lfloor\sqrt x\rfloor$$$[but we won't]
If you can prove it, do it...
I didn't understand the equation of problem B how did we get it ? , can any one explain?
(1 2 3) (4 6 8)(9 12 15)(16,20,24) and so on, if x=15, sqrt(15)=3 ans = 3*3 =9 =>(1,2,3,,4,6,8,,9,12,15)
This just an example what I mean is how we did conclude it
Let's say that $$$\left \lfloor{\sqrt{x}}\right \rfloor=y.$$$ That means that $$$y \leq \sqrt{x}<y+1$$$ so $$$y^2 \leq x < (y+1)^2 = y^2+2y+1$$$. Obviously the smallest valid value of $$$x$$$ divisible by $$$y$$$ is $$$y^2$$$, the next one is is $$$y \cdot (y+1)$$$, then the next one is $$$y \cdot (y+2)$$$, and $$$y \cdot (y+3)$$$ is too large.
thank you dorijanlendvaj, you helped me too
Can someone explain D test case 1? Why after change, time cost from Machine 1 to 8 is 6?
Disconnects $$$(2, 3)$$$ and connects $$$(2, 8)$$$.
What is this &mdash in solution code of E ?
Probably, after reading the editorial, it means minus"-". I don't know what's wrong but in some places "-" might be changed into "&mdash".
Difficult Round
Can anyone please explain the 2nd case in the editorial of D Ela and the Wiring Wizard
If you try to find the answer for the sample test case #3 in the D, you will find that the 1st case in the solution is not able to find the correct solution.
In this test case, the correct solution would be to -> take the edge connecting vertex 2, 5 -> connect it to 2, 6 take the now edge connecting vertex 2, 6 -> connect it to 2, 4 take the now edge connecting vertex 2, 4 -> connect it to 4, 4 take the now self loop edge connecting vertex 4, 4 -> connect it to 4, 7 take the now edge connecting 4,7 -> connect it to 4, 1 take the now edge connecting 4, 1 -> connect it to 1, 8
The answer -> (22 * 6 + 22) = 154
So what happened is we saw that the edge connecting the vertex 2, 5 might just be what we need for the best possible path but if we try the case 1 that we thought of then the total time won't be the shortest possible we could get. We could instead just take one of the vertex and connect it an intermediate vertex ( a vertex that is present in the shortest path from 1 to n). After this, we create a self loop on this intermediate vertex. (Total time up till now -> dist[u][x] + 1
Now we can just expand this self loop on both sides -> dist[x][1] + dist[x][n]
Now one last time of actually travelling this edge -> 1
total time => (dist[u][x] + 1 + dist[x][1] + dist[x][n] + 1 ) * w
I get that by applying the formula for case 1, the answer would be suboptimal. But how do you apply the case 1 to sample test case 3 in the graph? How to connect both 1 to u and v to n for the edge between (2,5)?
1 to u will be — (1,7,6,5) and v to n will be (2,5,6,4,8) The total time become (3+4+1)*22 equal 176
Which is suboptimal so we go for the case 2 and get better answer
That's fine what I want to know is how you break the edge. If you break it the way you mentioned above, that would cost 65 each time (since the edge between 1 and 7 costs 65). Do you get what I want to convey?
We break the edge between u and v. In the case I described, we break the edge b/w 2,5 and connect it to 2,6, then 2,7, then 2,1. Its cost becomes — dist[1][5] * 22
Then we take the edge of 2,1 break it to join it to 1,5 then 1,6 then 1,4 then 1,8 Cost becomes dist[2][8] * 22
This is.for case 1, for case 2 also we start by breaking the edge between 2,5 but once the u reaches an intermediate vertex we like we create self loop and expand both sides
Firstly, you broke the edge between (2,5) so 2 and 5 are no longer connected. Then later on you connected 1 to 5 directly by breaking the edge between (2,1). How is it possible since 5 is not connected to 2 anymore?
Yeah, you are right. It is not possible like this Good find.
Though it is still possible to achieve the same results, I will first break (2,5) to make self loop on (2,2) than expand one side to 1 and the other to 8
I think this isn't possible as well. That's because the self loop on (2,2) can't be expanded as there is no edge connecting to 2 other than the (2,5) which we broke.
oh sorry,. i didnt see the graph again before replying. it should be self loop on (5,5) .i misremembered the graph
Can someone please help clarify what the second and third cases for problem C are talking about and why they work? Thanks in advance.
on the 4th line they might have meant to say "Assuming that the non-central pieces are on the dark square, we consider 3 cases:"
^ That made things clear to me by picturizing/dry-running on a chessboard https://lichess.org/editor/8/8/1p6/8/4Q3/3QQ3/8/8_w_-_-_0_1?color=white
Why does the solution say that $$$[1,2 * i - 1]$$$ and $$$[2 * i,n]$$$ are independent in problem E?
Why __int128 CE in C++14 and AC on C++ 20?
GNU G++14 on codeforces generates 32 bit executables, apparently. I don't think
__int128
is available on 32 bit targets yet.How did the author write such an obscure topic?
Can you point out which one is obscure?
In D how do you show the following things? 1. It's never optimal to touch any edge apart from the one that we are trying to put on 1-n spot? 2. Lower bounds provided are always feasible? Even the simplest case where you never 'merge' ends of the edge that is being moved. How to prove that the path "will not break" or something along the process? This doesn't seem very hard but I have some trouble finding clean proof.
maybe i have an answer to Q1.
suppose shortest path a->c is a->b->c, WLOG, let dist(a,b) < dist(b,c) (cases with more edges are similar). so cost is dist(a,b)+dist(b,c)
however we can rewire a->b to get a->c, then total cost is 2*dist(a,b) which is less than previous one. base on this, we can alway reduce the optimal answer to only one edge.
Yeah, that's clear. I'm rather asking about cases like we rewire other edge(s) to deliver "our" edge to 1 — n faster (than we have only one edge from 1 to n in the end).
oh, sorry for misunderstanding
so in general the answer is (number of rewiring/movement) * (edge weight), and now you are concerning about reducing the first term instead of simply use floyd shortest path, right?
Roughly yes. And i can see some "sketch" of the proof of that, but there are so many (nested) cases etc. that it doesn't convince me at all. I assume there should exist something concise.
If its optimal to use any other edge then this is not the edge which will us give us optimal answer and this only happens when there is an edge with weight smaller than current one in this path. I wrote a proof to a similar question by my friend so I am copy pasting it here.
We can prove that it is always better to make an edge directly connect 1 and n. (quoting editorial)
Can someone give proof of that? (Problem D)
Imagine if you have a sequence of edges in a path, lets say it is $$${1}, {2}, {3}... {k}$$$ for simplicity if the minimum weight among $$$w_{1}, w_{2}, w_{3}... w_{k}$$$ is $$$x$$$ then $$$w_{1} + w_{2} + w_{3}... + w_{k} >= x * k$$$ and cost of rewiring everything such that 1 and n are connect by the edge with weight $$$x$$$ is $$$x * (k - 1)$$$ so you can always achieve $$$x * k$$$ by rewiring so it never hurts.
Note this is true for any path, since we are checking all the edges the path which gives optimal answer will also be included.
I don't think you answered my question. Have you seen discussion above? I'm not concerning about the fact that optimal solution consists of exactly one edge, but rather whether fixed edge could possibly be delivered to 1-n faster using other rewires.
I guess only you read the "Question" part. My proof did contain it even tho he didn't ask for it. I will be more specific this time.
Lets say if there was a segment $$${a} - {b} - {c}$$$ (possibly $$${a} = {c}$$$) in some path after one operation we can "contract" it and make it $$${a} - {c}$$$. It doesn't induce any new paths but rather contract already existing ones. So we can say final edge is just a contraction of one of initial paths from $$${1}$$$ to $$${n}$$$. Note this is true for any path not necessary shortest.
But what's the Optimal way to contract a path? that's what I did in my proof. You only need one edge that has minimum weight in that particular path.
Lets say you need to use another edge with weight $$$x$$$ to rewire current edge then its obvious $$$x < weight_{current-edge}$$$. So cost of rewiring everything in this path with weight $$$x$$$ < cost of rewiring with current edge, its better not to use current edge at all.
Note this is also true for any path. This by induction proves you only need one edge. ¯\_(ツ)_/¯
I was asleep, this reply is a bit late.
If I understood correcty,
You say that if we can wire our current edge from 1 to n faster by using another smaller weighted edge, why don't we use that edge to speed up our "rewiring"
And badlad says that if we can use some another smaller weighted edge to speed up our "rewiring" process, there is no point to use our current edge to wire 1 and n
Because if we can use some edge to speed up our rewiring, that means this edge is in the same path as our current edge and its weight is smaller than our current edge
And if we have another smaller weighted edge in a path that contains our current edge, there is no point trying to rewire our current edge (proof is given by badlad in the first reply)
For the final edge $$$f$$$ which will be between $$$1-n$$$ at the end, I think we can prove that using it only is optimal as follows:
When we re-wire we have $$$2$$$ options:
Conclusion of point $$$2$$$:
We can always achieve a bypasses count by $$$f$$$ alone no greater than when there are other edges as well contributing in the bypasses count.
More elaboration on why we can use only $$$1$$$ edge in all the re-wires:
Observe that re-wiring works bidirectionally, e.g., for $$$e_1(u-v)$$$ and $$$e_2(v-w)$$$, we can either use $$$e_1$$$ or $$$e_2$$$ to achieve the same new end points $$$u-w$$$. This means that whenever we make $$$2$$$ rewires with different edges sharing an end-point, e.g., for $$$e_1(u-v)$$$, $$$e_2(v-w)$$$, and $$$e_3(w-x)$$$:
then we could have always chosen the cheapest edge to perform both the re-wires while achieving the same new end points. e.g., if the cheapest is $$$e_2$$$, the sequence of re-wires could be:
I don't have an answer for you but I agree that these are points that should be addressed more carefully. For example, it is not true that for an arbitrary edge $$$e$$$, the cheapest way to move this edge to go between $$$1$$$ and $$$n$$$ involves operations with $$$e$$$ only, Intuitively, the problem might be okay still because for these cases, any helper edges which are moved just end up being better choices as the final edge. (That is not a proof, of course.)
The point is, if the rewiring could be sped up by using some other edgeto speed it up, our choice of the optimal edge to place between 1 and n is wrong, which is why this method works since it takes the minimum of all possible edges.
To state it more mathematically, if an edge e can be placed faster between 1 and n by using an intermediate edge f to speed up the process, edge f is a bettee choice to place between 1 and n
isn't it a true for a path though? If its initial length was k no matter how you reduce it to "the final one edge" its cost will always be sum of k terms. So the best you can do is just pick one edge that has minimum weight.
Or am I assuming something wrong here?
why downvotes? if you can why explain why I am wrong then please do I want to know.
A bit late but you can prove it like this:
Consider
for a fixed edge. You can prove that with any operation it decreases by at most 1 with a small case work.
hi low_ sorry for the ping.
Can u pls exaplin how u came up with the idea of problem c.Though the observaton was quite simple but it took me a long time to observe it.So want to know how u thought it in the first place.So that from next time I could try that way too.
Sorry for bad english.
I put 3 pawns on the chessboard and move around like crickets and see the pattern.
The idea I got from grasshopper chess variants, which you can find on chess.com
Thanks for the reply.Really enjoyed the contest.
And after finding some pattern on a chessboard, you thought, oh cool, this will make a nice competitive programming problem? It has absolutely nothing related with competitive programming.
What do you know abt competitive programming? XD
I don't know much, but I know you're not a good problem setter.
well if C isn't related to competitive programming, then I don't know what is.
Well I can't ever argue with unranked ppl, can I? XD
in problem D, "We can prove that it is always better to make an edge directly connect 1 and N."
Here is the proof if anyone’s not getting the hang of it.
https://mirror.codeforces.com/contest/1737/submission/175080288 same code gets ac when I used isqrt when sqrt used gives a wa on test 4 dont know why https://mirror.codeforces.com/contest/1737/submission/175044806
We can prove that it is always better to make an edge directly connect 1 and n.
Can someone give proof of that? (Problem D)
Imagine if you have a sequence of edges in the path, lets say it is $$${1}, {2}, {3}... {k}$$$ for simplicity if the minimum weight among $$$w_{1}, w_{2}, w_{3}... w_{k}$$$ is $$$x$$$ then $$$w_{1} + w_{2} + w_{3}... + w_{k} >= x * k$$$ and cost of rewiring everything such that 1 and n are connect by edge with weight $$$x$$$ is $$$x * (k - 1)$$$ so you can always achieve $$$x * k$$$ by rewiring so it never hurts. Note this is true for any path, since we are checking all the edges the path which gives optimal answer will also be included.
Trash problem. Coordinators should survey authors better. We've reached new peaks of stupidity.
Marinush
little boy crying how cute
Thank you for floppily bullying me. I will speedrun the days until you correct your paths, stop feeling so superior. Apologies if this does not fit your view.
Marinush
In B you should not write a new sqrt if you use c++ you can use
int r2 = sqrtl(r)
then r2 will be the floor of sqrt r
be careful to use sqrtl, not sqrt because the input is long long not int
didnot understood why should we use sqrtl
sqrt is more accurate for int because its return double
but sqrtl returns a long double and it's better to use this for accuracy and don't get overflow
when i used sqrt i got wa , but when i used sqrtl i got correct . why i got wrong with sqrt. submission :- 175144961
Proof for $$$C$$$:
Assuming that the central cell is labeled $$$X$$$ and the other two cells are labeled $$$O$$$, for the following initial configuration (and equivalently for the other three valid configurations as well):
We can cover the following cells (A similar pattern can be achieved vertically as well):
After analyzing this pattern we can see that all white diagonals (equivalently all white cells) can be covered, and all black cells which are in column similar in parity as $$$X$$$'s initial column (and equivalently rows similar in parity to $$$X$$$'s initial row) can be covered. Since each time we move 2 rows or 2 columns, black cells in columns (and equivalently rows) with different parity are already unreachable by definition.
However, the previous pattern requires that at least one of $$$X$$$'s initial row and column to be neither $$$1$$$ nor $$$n$$$ (for the initial $$$X$$$ to not be in a corner), otherwise the previous pattern cannot be achieved and only the initial row and column of $$$X$$$ can be covered.
A little late, but I think my approach is exactly same as you.
If you could take a look at this at tell me what I am doing wrong
UPD: Got it
Can anyone please explain test case 3 of problem D? ok nvm found it above
can someone tell why it is optimal to directly connect 1 to n and how we thought of using 1 weight shortest path .please help
Can anyone explain the need of creating the self look in problem D
Can anyone tell me how to solve C[1400-1600 rated Problems] faster? What I have observed is
Can someone explain 3rd sample test case for problem D