Several unexpected Kuhn solutions passed for D1F. Could you please discuss your solutions in the comments and prove its correctness or provide any counter-examples. Author's solution uses flows with Dinic.
Editorial is not completed yet. Problem D1F will be added later. Hope you enjoyed the problemset!
Editorial was/will be written by bthero and BledDest.
Our tester namanbansal013 has made amazing video-tutorials on YouTube for problems D2D/D1B and D2E/D1C. Make sure to check them out and show him some love!
Finally added the editorial for D1E. Currently it is very complicated and error-prone.
Div2A by bthero
// chrono::system_clock::now().time_since_epoch().count()
#include<bits/stdc++.h>
#define pb push_back
#define eb emplace_back
#define mp make_pair
#define fi first
#define se second
#define all(x) (x).begin(), (x).end()
#define debug(x) cerr << #x << " = " << x << endl;
using namespace std;
typedef long long ll;
typedef pair<int, int> pii;
const int MAXN = (int)1e3 + 5;
int n, k;
int arr[MAXN];
void solve() {
scanf("%d %d", &n, &k);
for (int i = 1; i <= n; ++i) {
scanf("%d", &arr[i]);
}
int mn = min_element(arr + 1, arr + n + 1) - arr;
int ans = 0;
for (int i = 1; i <= n; ++i) {
if (i != mn) {
while (arr[i] + arr[mn] <= k) {
arr[i] += arr[mn];
++ans;
}
}
}
printf("%d\n", ans);
}
int main() {
int tt;
scanf("%d", &tt);
while (tt--) {
solve();
}
return 0;
}
Div2B by nkamzabek
#include <bits/stdc++.h>
#define len(v) ((int)((v).size()))
#define all(v) (v).begin(), (v).end()
#define rall(v) (v).rbegin(), (v).rend()
#define chmax(x, v) x = max((x), (v))
#define chmin(x, v) x = min((x), (v))
using namespace std;
using ll = long long;
void solve() {
int n, tar;
cin >> n >> tar;
int curMid = 0;
for (int i = 0; i < n; ++i) {
int x; cin >> x;
int r;
if (tar % 2 == 0 && x == tar/2)
r = (curMid++) % 2;
else if (2*x < tar)
r = 0;
else
r = 1;
cout << r << " \n"[i==n-1];
}
}
int main() {
ios::sync_with_stdio(false), cin.tie(0);
int nbTests;
cin >> nbTests;
for (int iTest = 0; iTest < nbTests; ++iTest) {
solve();
}
}
Div2C/Div1A by nkamzabek
// chrono::system_clock::now().time_since_epoch().count()
#include<bits/stdc++.h>
#define pb push_back
#define eb emplace_back
#define mp make_pair
#define fi first
#define se second
#define all(x) (x).begin(), (x).end()
#define debug(x) cerr << #x << " = " << x << endl;
using namespace std;
typedef long long ll;
typedef pair<int, int> pii;
const int MAXN = (int)3e5 + 5;
int f[MAXN], last[MAXN], arr[MAXN], ans[MAXN];
int n;
void solve() {
scanf("%d", &n);
for (int i = 1; i <= n; ++i) {
f[i] = last[i] = 0;
ans[i] = -1;
}
for (int i = 1; i <= n; ++i) {
scanf("%d", &arr[i]);
}
for (int i = 1; i <= n; ++i) {
int x = arr[i];
f[x] = max(f[x], i - last[x]);
last[x] = i;
}
for (int x = 1; x <= n; ++x) {
f[x] = max(f[x], n - last[x] + 1);
for (int i = f[x]; i <= n && ans[i] == -1; ++i) {
ans[i] = x;
}
}
for (int i = 1; i <= n; ++i) {
printf("%d%c", ans[i], " \n"[i == n]);
}
}
int main() {
int tt;
scanf("%d", &tt);
while (tt--) {
solve();
}
return 0;
}
Div2D/Div1B by nkamzabek
// chrono::system_clock::now().time_since_epoch().count()
#include<bits/stdc++.h>
#define pb push_back
#define eb emplace_back
#define mp make_pair
#define fi first
#define se second
#define all(x) (x).begin(), (x).end()
#define debug(x) cerr << #x << " = " << x << endl;
using namespace std;
typedef long long ll;
typedef pair<int, int> pii;
const int MAXN = (int)1e4 + 5;
vector<array<int, 3>> ans;
int arr[MAXN];
int n;
void go(int x, int y, int z) {
arr[x] -= x * z;
arr[y] += x * z;
ans.pb({x, y, z});
}
void solve() {
scanf("%d", &n);
for (int i = 1; i <= n; ++i) {
scanf("%d", &arr[i]);
}
ans.clear();
int sum = accumulate(arr + 1, arr + n + 1, 0);
if (sum % n) {
printf("-1\n");
return;
}
for (int i = 2; i <= n; ++i) {
if (arr[i] % i) {
go(1, i, i - arr[i] % i);
}
go(i, 1, arr[i] / i);
}
for (int i = 2; i <= n; ++i) {
go(1, i, sum / n);
}
for (int i = 1; i <= n; ++i) {
assert(arr[i] == sum / n);
}
assert((int)ans.size() <= 3 * n);
printf("%d\n", (int)ans.size());
for (auto &[x, y, z] : ans) {
printf("%d %d %d\n", x, y, z);
}
}
int main() {
int tt;
scanf("%d", &tt);
while (tt--) {
solve();
}
return 0;
}
Div2E/Div1C by DimmyT
#include <bits/stdc++.h>
#define mp make_pair
#define pb push_back
#define f first
#define s second
#define ll long long
#define forn(i, a, b) for(int i = (a); i <= (b); ++i)
#define forev(i, b, a) for(int i = (b); i >= (a); --i)
#define VAR(v, i) __typeof( i) v=(i)
#define forit(i, c) for(VAR(i, (c).begin()); i != (c).end(); ++i)
#define all(x) (x).begin(), (x).end()
#define sz(x) ((int)(x).size())
#define file(s) freopen(s".in","r",stdin); freopen(s".out","w",stdout);
using namespace std;
const int maxn = (int)5e6 + 100;
const int maxm = (int)1e6 + 100;
const int mod = (int)1e9 + 7;
const int P = (int) 1e6 + 7;
const double pi = acos(-1.0);
#define inf mod
typedef long double ld;
typedef pair<int, int> pii;
typedef pair<ll, ll> pll;
typedef vector<int> vi;
typedef vector<ll> Vll;
typedef vector<pair<int, int> > vpii;
typedef vector<pair<ll, ll> > vpll;
int n, t[2][maxn], id = 1;
ll dp[2][30];
vi g[maxn];
void add(int x, int pos){
int v = 0;
forev(i, 29, 0){
int bit = ((x >> i) & 1);
if(!t[bit][v]) t[bit][v] = id++;
v = t[bit][v];
g[v].pb(pos);
}
}
void go(int v, int b = 29){
int l = t[0][v], r = t[1][v];
if(l) go(l, b - 1);
if(r) go(r, b - 1);
if(!l || !r) return;
ll res = 0;
int ptr = 0;
for(auto x : g[l]){
while(ptr < sz(g[r]) && g[r][ptr] < x) ptr++;
res += ptr;
}
dp[0][b] += res;
dp[1][b] += sz(g[l]) * 1ll * sz(g[r]) - res;
}
void solve(){
scanf("%d", &n);
forn(i, 1, n){
int x;
scanf("%d", &x);
add(x, i);
}
go(0);
ll inv = 0;
int res = 0;
forn(i, 0, 29){
inv += min(dp[0][i], dp[1][i]);
if(dp[1][i] < dp[0][i])
res += (1 << i);
}
printf("%lld %d", inv, res);
}
int main () {
int t = 1;
//scanf("%d", &t);
while(t--) solve();
}
Div2F/Div1D by DimmyT
// chrono::system_clock::now().time_since_epoch().count()
#include<bits/stdc++.h>
#define pb push_back
#define eb emplace_back
#define mp make_pair
#define fi first
#define se second
#define all(x) (x).begin(), (x).end()
#define debug(x) cerr << #x << " = " << x << endl;
using namespace std;
typedef long long ll;
typedef pair<int, int> pii;
const int MAXN = (int)5e5 + 5;
const int MAXM = (int)3e5 + 5;
const int MAXQ = (int)5e5 + 5;
pii e[MAXM], que[MAXQ], t[MAXN << 2];
vector<int> adj[MAXN];
int tin[MAXN], tout[MAXN], timer;
int par[MAXN], arr[MAXN];
bool del[MAXM];
int n, m, q;
int getPar(int x) {
if (x == par[x]) {
return x;
}
return par[x] = getPar(par[x]);
}
void uni(int a, int b) {
a = getPar(a);
b = getPar(b);
if (a == b) {
return;
}
++n;
par[n] = n;
par[a] = n;
par[b] = n;
adj[n].pb(a);
adj[n].pb(b);
}
void dfs(int v) {
tin[v] = ++timer;
for (int to : adj[v]) {
dfs(to);
}
tout[v] = timer;
}
pii segMax(int v, int tl, int tr, int l, int r) {
if (l > r || tl > r || tr < l) {
return mp(0, 0);
}
if (l <= tl && tr <= r) {
return t[v];
}
int mid = (tl + tr) >> 1;
int c1 = (v << 1), c2 = (c1 | 1);
return max(segMax(c1, tl, mid, l, r), segMax(c2, mid + 1, tr, l, r));
}
void updPos(int v, int tl, int tr, int p, pii x) {
if (tl == tr) {
t[v] = x;
return;
}
int mid = (tl + tr) >> 1;
int c1 = (v << 1), c2 = (c1 | 1);
if (p <= mid) {
updPos(c1, tl, mid, p, x);
}
else {
updPos(c2, mid + 1, tr, p, x);
}
t[v] = max(t[c1], t[c2]);
}
void solve() {
scanf("%d %d %d", &n, &m, &q);
for (int i = 1; i <= n; ++i) {
scanf("%d", &arr[i]);
}
for (int i = 1; i <= m; ++i) {
int u, v;
scanf("%d %d", &u, &v);
e[i] = mp(u, v);
}
for (int i = 1; i <= q; ++i) {
int a, b;
scanf("%d %d", &a, &b);
que[i] = mp(a, b);
if (a == 2) {
del[b] = 1;
}
}
for (int i = 1; i <= n; ++i) {
par[i] = i;
}
for (int i = 1; i <= m; ++i) {
if (!del[i]) {
uni(e[i].fi, e[i].se);
}
}
for (int i = q; i > 0; --i) {
int tp = que[i].fi;
if (tp == 2) {
int id = que[i].se;
uni(e[id].fi, e[id].se);
}
else {
que[i].se = getPar(que[i].se);
}
}
for (int i = 1; i <= n; ++i) {
if (getPar(i) == i) {
dfs(i);
}
}
for (int i = 1; i <= n; ++i) {
updPos(1, 1, n, tin[i], mp(arr[i], tin[i]));
}
for (int i = 1; i <= q; ++i) {
int tp = que[i].fi;
if (tp == 1) {
int v = que[i].se;
pii tmp = segMax(1, 1, n, tin[v], tout[v]);
if (tmp.fi == 0) {
printf("0\n");
}
else {
printf("%d\n", tmp.fi);
updPos(1, 1, n, tmp.se, mp(0, 0));
}
}
}
}
int main() {
int tt = 1;
while (tt--) {
solve();
}
return 0;
}
Div1E by bthero
// chrono::system_clock::now().time_since_epoch().count()
#include<bits/stdc++.h>
#define __DBG 1
#define pb push_back
#define eb emplace_back
#define mp make_pair
#define fi first
#define se second
#define all(x) (x).begin(), (x).end()
#define debug(x) if (__DBG) cerr << #x << " = " << x << endl;
using namespace std;
typedef long long ll;
typedef pair<int, int> pii;
typedef pair<ll, ll> pll;
const int MAXN = (int)5e5 + 5;
int arr[MAXN];
int n;
void solve() {
scanf("%d", &n);
for (int i = 1; i <= n; ++i) {
scanf("%d", &arr[i]);
}
int zero = 0, two = -1;
set<pll> one;
ll shifter = 0;
int sign = 1;
for (int i = 1; i <= n; ++i) {
int x = arr[i];
if (two != -1) {
zero += 2;
sign = 1;
shifter = 0;
one.clear();
if (two < x) {
one.insert(mp(x - two, x - two));
}
}
else if (!one.empty()) {
zero++;
if (sign == -1) {
// shifter - idx >= x
// shifter - x >= idx
// idx <= shifter - x
ll lim = shifter - x;
while (!one.empty() && one.begin() -> se <= lim) {
one.erase(one.begin());
}
if (!one.empty() && one.begin() -> fi <= lim) {
pll it = mp(lim + 1, one.begin() -> se);
one.erase(one.begin());
assert(it.fi <= it.se);
one.insert(it);
}
}
else {
// shifter + idx >= x
// idx >= x - shifter
ll lim = x - shifter;
while (!one.empty() && one.rbegin() -> fi >= lim) {
one.erase(--one.end());
}
if (!one.empty() && one.rbegin() -> se >= lim) {
pll it = mp(one.rbegin() -> fi, lim - 1);
one.erase(--one.end());
assert(it.fi <= it.se);
one.insert(it);
}
}
shifter = x - shifter;
sign *= -1;
}
else {
sign = -1;
shifter = x;
int lim = min(arr[i - 1] - 1, x - 1);
if (1 <= lim) {
one.insert(mp(1, lim));
}
}
// consider x/2!
two = -1;
if (x % 2 == 0) {
int y = x / 2;
ll val = (y - shifter) / sign;
auto it = one.lower_bound(mp(val + 1, (ll)-1e18));
bool found = 0;
if (it != one.begin()) {
--it;
if (it -> fi <= val && val <= it -> se) {
found = 1;
}
}
if (found) {
two = y;
}
else {
one.insert(mp(val, val));
}
}
}
int ans;
if (two != -1) {
ans = zero + 2;
}
else if (!one.empty()) {
ans = zero + 1;
}
else {
ans = zero;
}
printf("%d\n", 2 * n - ans);
}
int main() {
int tt;
scanf("%d", &tt);
while (tt--) {
solve();
}
return 0;
}
#include <bits/stdc++.h>
#define int long long
#define len(v) ((int)((v).size()))
#define all(v) (v).begin(), (v).end()
#define rall(v) (v).rbegin(), (v).rend()
#define chmax(x, v) x = max((x), (v))
#define chmin(x, v) x = min((x), (v))
using namespace std;
using pii = pair<int, int>;
struct EndSet {
set<int> points;
pii seg = {1, 0};
int cbase = 0, cdir = 1;
void setFree(int mx) {
points.clear(), cbase = 0, cdir = 1;
seg = {1, mx};
}
void setPush(int x) {
setFree(0);
points.insert(x);
}
void rotate(int balance) {
cbase = balance - cbase;
cdir = -cdir;
seg = {balance - seg.second, balance - seg.first};
}
int tr(int x) {
return cbase + cdir*x;
}
int anti(int x) {
return cdir * (x - cbase);
}
bool in(int x) {
if (points.count(anti(x)))
return true;
return (seg.first <= x && x <= seg.second);
}
void push(int x) {
points.insert(anti(x));
}
void popBelow(int x) {
seg.second = min(seg.second, x);
auto nextIt = [&] {
return (cdir == 1 ? prev(points.end()) : points.begin());
};
while (!points.empty() && tr(*nextIt()) > x)
points.erase(nextIt());
}
bool empty() {
return points.empty() && seg.first > seg.second;
}
};
void solve() {
int n; cin >> n;
EndSet util;
int curAns = 0;
for (int i = 0; i < n; ++i) {
int x; cin >> x;
util.popBelow(x-1);
if (x % 2 == 0 && util.in(x/2)) {
curAns += 2;
util.setPush(x/2);
} else if (util.empty()) {
if (x % 2 == 0)
curAns += 1, util.setPush(x/2);
else
util.setFree(x-1);
} else {
curAns += 1;
if (x % 2 == 0)
util.push(x/2);
}
util.rotate(x);
}
cout << 2*n - curAns << "\n";
}
signed main() {
ios::sync_with_stdio(false), cin.tie(0);
int nbTests;
cin >> nbTests;
for (int iTest = 0; iTest < nbTests; ++iTest) {
solve();
}
}
Div1F by bthero
...
// chrono::system_clock::now().time_since_epoch().count()
#include<bits/stdc++.h>
#define pb push_back
#define eb emplace_back
#define mp make_pair
#define fi first
#define se second
#define all(x) (x).begin(), (x).end()
#define debug(x) cerr << #x << " = " << x << endl;
using namespace std;
typedef long long ll;
typedef pair<int, int> pii;
typedef vector<int> vi;
typedef vector<vi> vvi;
const int dx[] = {0, 1, 0, -1};
const int dy[] = {1, 0, -1, 0};
const char dc[] = {'R', 'D', 'L', 'U'};
const int INF = (int)1e6;
const int MAXN = (int)3e5 + 5;
namespace {
struct Edge {
int v, to, f, c;
Edge() {
v = to = f = c = 0;
}
Edge(int v, int to, int c) : v(v), to(to), c(c) {
f = 0;
}
};
vector<Edge> e;
vector<int> adj[MAXN];
int ptr[MAXN], d[MAXN], q[MAXN];
int S, T, newS, newT, V;
int cap[2][MAXN];
void prep() {
e.clear();
for (int i = 0; i < V; ++i) {
adj[i].clear();
ptr[i] = d[i] = q[i] = 0;
cap[0][i] = cap[1][i] = 0;
}
}
void addEdge(int u, int v, int c) {
//printf("E %d %d %d\n", u, v, c);
adj[u].pb((int)e.size());
e.pb(Edge(u, v, c));
adj[v].pb((int)e.size());
e.pb(Edge(v, u, 0));
}
void addEdgeLim(int u, int v) {
//printf("F %d %d\n", u, v);
++cap[0][v];
++cap[1][u];
}
bool bfs() {
fill(d, d + V, -1);
d[newS] = 0;
int l = 0, r = 0;
q[r++] = newS;
while (l < r) {
int v = q[l++];
for (int id : adj[v]) {
if (e[id].f < e[id].c) {
int to = e[id].to;
if (d[to] == -1) {
d[to] = d[v] + 1;
q[r++] = to;
}
}
}
}
return d[newT] != -1;
}
int dfs(int v, int flow = INF) {
if (!flow || v == newT) {
return flow;
}
int sum = 0;
for (; ptr[v] < (int)adj[v].size(); ++ptr[v]) {
int id = adj[v][ptr[v]];
int to = e[id].to;
int can = e[id].c - e[id].f;
if (d[to] != d[v] + 1 || can == 0) {
continue;
}
int pushed = dfs(to, min(flow, can));
if (pushed > 0) {
e[id].f += pushed;
e[id ^ 1].f -= pushed;
sum += pushed;
flow -= pushed;
if (flow == 0) {
return sum;
}
}
}
return sum;
}
int maxFlow() {
int ret = 0;
while (bfs()) {
fill(ptr, ptr + V, 0);
while (int pushed = dfs(newS)) {
ret += pushed;
}
}
return ret;
}
}
vvi arr, follow;
int n, m;
bool inside(int x, int y) {
return 0 <= x && x < n && 0 <= y && y < m;
}
int id(int x, int y) {
return x * m + y;
}
void solve() {
scanf("%d %d", &n, &m);
arr = follow = vvi(n, vi(m, -1));
S = n * m;
T = S + 1;
newS = T + 1;
newT = newS + 1;
V = newT + 1;
prep();
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
scanf("%d", &arr[i][j]);
}
}
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
for (int dir = 0; dir < 4; ++dir) {
int ni = i + dx[dir], nj = j + dy[dir];
if (inside(ni, nj)) {
if (arr[ni][nj] < arr[i][j]) {
follow[i][j] = dir;
}
else if ((i + j) % 2 == 0 && arr[ni][nj] == arr[i][j]) {
addEdge(id(i, j), id(ni, nj), 1);
}
}
}
if (follow[i][j] == -1) {
// important vertex
if ((i + j) % 2) {
addEdgeLim(id(i, j), T);
}
else {
addEdgeLim(S, id(i, j));
}
}
else {
if ((i + j) % 2) {
addEdge(id(i, j), T, 1);
}
else {
addEdge(S, id(i, j), 1);
}
}
}
}
for (int i = 0; i <= T; ++i) {
if (cap[0][i] > 0) {
addEdge(newS, i, cap[0][i]);
}
if (cap[1][i] > 0) {
addEdge(i, newT, cap[1][i]);
}
}
addEdge(T, S, INF);
maxFlow();
for (int id : adj[newS]) {
if (e[id].f != e[id].c) {
printf("NO\n");
return;
}
}
vvi ansv, ansc;
ansv = ansc = vvi(n, vi(m));
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
int dir = follow[i][j];
if (dir != -1) {
int ni = i + dx[dir], nj = j + dy[dir];
ansv[i][j] = arr[i][j] - arr[ni][nj];
ansc[i][j] = dir;
}
}
}
for (Edge it : e) {
int v = it.v, to = it.to;
if (max(v, to) < n * m && it.f == it.c && it.c == 1) {
int ax = v / m, ay = v % m;
int bx = to / m, by = to % m;
ansv[ax][ay] = arr[ax][ay] - 1;
ansv[bx][by] = 1;
for (int dir = 0; dir < 4; ++dir) {
if (mp(ax + dx[dir], ay + dy[dir]) == mp(bx, by)) {
ansc[ax][ay] = dir;
ansc[bx][by] = (dir + 2) % 4;
}
}
}
}
printf("YES\n");
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
printf("%d%c", ansv[i][j], " \n"[j == m - 1]);
}
}
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
printf("%c%c", dc[ansc[i][j]], " \n"[j == m - 1]);
}
}
}
int main() {
int tt;
scanf("%d", &tt);
while (tt--) {
solve();
}
return 0;
}
Auto comment: topic has been updated by bthero (previous revision, new revision, compare).
A really great tutorial. Also one of the fastest tutorials.
Video Editorials: Problem D: Make Them Equal Problem E: XOR Inverse
@namanbansal013 Thanks bro.Keep it up.
IN div 2 B solution:: There is a mistake ::-- f(X)=f(Y)=0 I think this should be f(X)=f(Z)=0.
Fixed, thanks.
thanks for instant editorial
it was posted 38 hours ago !
These dudes are time travelers bro, just in case you were not aware of this already
D was really a good one... Thank you for such an amazing round :)
Yep agreed 100%, I was thinking in terms of a linear combination of factors and stuff, later realized how amazingly they exploited the fact that we have 3 * n operations!
Thanks for the super fast editorial!
Can anyone explain C i still don't get it.
Okay, here are some observations that might help you.
I think these two observations were enough for me to reach at solution.You can try too or I can explain if you are not able to reach to algorithm.
Please elaborate more. It will be very helpful.
Okay so here is what we can conclude from above observations.
Here is my code — Submission.
I hope it helped.
yes it is a very nice explanation.
if x has to be the minimum number in all segments of length k then in input array between every occurrence of x and its previous occurrence there should not be another segment of length >=k. For instance in {1,2,2,1,1} for k=2 and x=1 we have a segment {2,2} of length two and hence not all segment s of length 2 have '1' in common. Note: We have to consider index -1 and n+1 also as value equal to chosen x.(segment from beginning and end);
We can calculate this by maintaining a previous index for all the values we see traversing through array and updating the max distance between two same values. Then we can sort and find the min for every k.
Thanks for the quick editorial!
For div2b explanation, is the part "It is clear that f(X) = f(Y) = 0" a typo (since f(Z)=0, not f(Y))?
Loved Div2C. Purely algorithmic and appropriate difficulty.
.
Okay, so let's make an observation if, for each distinct element, I know it's indices (could be stored by using a map to a set/vector), so let's say my current element is 'a' and it's distributed in the following manner in my array
x x x a x x x x a x x a x a x x x
(where x is an arbitrary element (x!=a) and a is our desired element)
so now let's compute the maximum difference between consecutive indices and the distance of first 'a' from the start and the distance of last 'a' from the end.
The consecutive differences are
[4 (dist. from start), 5, 3, 2, 4 (dist. from end)]
now the max diff from the above array is 5, so now for every sliding window of length 5 & more, I can ensure that at least one 'a' is present in each of the sliding window. (Can easily be visualized)
Thus, now as in an ordered_map, it's sorted according to its keys, so for each element, when I calculate the max diff for the indices of this element (let's say it's y), I can mark the answer for all length from y to n, as my current element. In the above loop (used for marking my answer), I would iterate only till a point where my answer isn't marked (as whenever I encounter a marked index, all the indices from that index to n would already be marked & as it's a map, it would definitely be marked with a smaller element!), and thus after that, I break the loop.
For all the indices that aren't marked, the answer would be -1.
.
Isn't the solution for 1B missing something ? The statement says that after each operation, all elements of the array should be non-negative. Edit: I now see that $$$a_1 \ge i-1$$$ at the $$$i$$$-th iteration.
I think that this solution takes that into account. All array elements start greater or equal to 1. If we start accumulating the sum at $$$a[2]$$$, then we are guaranteed to at least add 2 to $$$a[1]$$$, meaning $$$a[1] \geq 2$$$. Then even if $$$a[3]\mod 3 \equiv 1$$$, it is guaranteed that we are able to add 2 to bring it to a multiple of 3, and turn $$$a[3]$$$ into 0. In this way the entire sum can be moved into $$$a[1]$$$.
Dimmmy must've used his time travel capabilities again to bring such a fast editorial!
Hehe... You're pretty smart
Div2D/Div1B
"Otherwise, we have to make it divisible by transferring i−(ai mod i) from a1 to ai. Note that this operation does not break a condition on non-negativity because all ai are initially positive."
Why does this not break the condition? How are we guaranteed that a1 >= i-(ai mod i)?
EDIT: just figured it out after posting this comment. Since we iterate in increasing order of i, each element we've seen will contribute at least 1, so by the time we are at index i, a1 will have at least (i-1).
Imagine you are at position pos and have successfully accumulated all 2..pos-1 piles into the pile at position 1. Since v[i] for all i is at least 1 you'll have at least pos-1 value in the first position. Notice that v[pos]%pos can be at max at pos-1. (although pos-(v[i]%pos) can be equal to pos but that case is handled separately). Hence if we iterate from left to right it'll always be possible to do this.
Basically at each 'i' , subtract at max (i-1) from 1, and add atleasr 'i' at 1 , hence a[0]+= (1 or more for each i). Then as robinz62 edit.
i think that In Div2E/Div1C editorial
let's construct a trie on our array integers
, trie is typo and tree is correct meaning.https://en.wikipedia.org/wiki/Trie
Trie is amazing!
Save this for the "what data structure should I study to reach blue in CF" questions :)
Good
Please can you tell me good source to study trie data structure ?
Well, a trie is also a tree lol.
That was too fast. Thanks!
Solved C quickly but failed on B. I think it a rare condition in this round.
The elegancy of the solutions made me happy.
Div2C is a very elegant solution
In Div2D/Div1B, is it possible to use a ternary search to find the value we want all the elements in the list to assume?
That value is already known, say it's x. After every move, array sum will remain constant so, in the end
sum = nx
, x = sum/n. That's whysum%n == 0
is a necessary condition.Indeed. Now I see it, thanks!
"Otherwise, there always exists a solution which uses no more than 3n queries." What is the solution of [0,1,1,1,2], please? sum=5,divisible by n=5.
Read statements carefully: "You are given an array a consisting of n positive integers"
Got it, thanks!
Div2D
Why can't we just take $$$\frac{a_i}{i} * i$$$ from each $$$2 \leq i \leq n$$$, making values equal to $$$a_i := a_i\mod i$$$ and transfer it to $$$a_1$$$, using exactly $$$n - 1$$$ operations, then transfer needed values back from $$$a_1$$$ to all elements, using another $$$n-1$$$ operations?
I've desperately tried to find some counter-exapmle during last 1.5 hours, but I couldn't.
UPD: Got it, thank you all
It's wrong if there is i and a[i] % i > sum / n
Can you try to find such example?
n = 4 a = [3 1 1 3] x in every step needs to bo more than 0
Got it, thank you =)
Have a huge number of ones so that the average is low and one approriate value high
1 1 1 1 9 1 1 1
$$$a_i$$$ can become more than the target value in this case but still less $$$i$$$
1 1 2 2 3 3
First phase wont change any values, so you dont have enough in a[1] for second phase
Can anyone explain div2C problem in simpler way with some example
Consider the distance between 2 positions (j > i), as d = (j — i — 1).
For each number x that may be present in the array, calculate the following distances:
1- Between the first element of the array and the position of the first occurrence of x.
2- Between pairs of positions (j > i), such that a [i] = x and j is the position of the element equal to x closest to i on right.
3- Between the last element of the array and the position of the last occurrence of x.
Now take the largest of these values for each x value, consider this value equal to k.
The number x appears in all subsegments of size >= k.
then we have the answer :)
my code: https://mirror.codeforces.com/contest/1417/submission/93998385
Nice div1C
Also, thanks for strong pretests <3
UPD: I really don't understand this contests with 3 pretests.
UPD2: I actually understand why we don't have full tests. But when we have so weak pretests, contests are just BlindForces. Why don't remove all pretests at all in this case? At least participants will get same experience, IMO
RIP.
They do that when the problem allows many tests per test file to make the queue faster.
It is very cool, but they can at least make them stronger. Why in problems with several testcases they make pretests with only 1 testcase per pretest? BTW, my two problems failed because of typos, so stupid typos, that I am not sure how this passed event 3 pretests. That is the point in doing many-testcase input, if pretests are actually 1 test-case? I thought they do test-case input to speed up testing process.
Tutorial for Div2C is hard to understand. I solved Div2C, maybe the way the tutorial explains it, I am not sure.
So I think if somebody did not solve it, it is completly not understandable. What is meant by "we can update the suffix [some formular]..."?
For every array element x, they compute the minimum window size k which is guaranteed to contain x. If the minimum k contains x, then all window sizes greater than k also contain x. Then, they sort the array elements in increasing order. For every array element, they have computed a minimum k which it covers. So we try to update the suffix [k..n] with this array element. If we find some k' in the suffix [k..n] which is already updated, then we don't need to update any further because every element in the suffix [k', n] has already been updated by a smaller array element because we're iterating through the elements in increasing order.
How to update "the suffix", whatever it is?
We make an array which maps the window size k to the smallest number which occurs in all windows of size k. For example if the array size is 3, then we will have 3 window sizes. So, let the array(1-indexed) "K" be [-1, -1, -1]. Then, we sort the array given in the problem in non-decreasing order. Let's say the array given in the problem is [1, 2, 3](already sorted). If it wasn't, then sort it.Let's also assume that we have computed the smallest window size k for each of these elements. For 1, this window size is 3. For 2 this window size is 2. For 3 this window size is 3.
Now, we will update the suffixes of the "K" array while iterating through the array we sorted.
When iterating through the array we sorted, we will run into 1, 2, 3 in that order. For 1, the smallest window which works is 3. So, we will update each k >= 3 in the K array with 1. Now the K array is [-1, -1, 1].
Then we'll run into 2 while iterating. The best minimum window size which works is 2. So, we'll update the "suffix" with indices [2..3] in the K array with the value 2. But, when we try to update position 3, we see that it has already been updated. So, we can stop and we don't need to go any further. Now the "K" array is [-1, 2, 1].
Then we run into 3 while iterating. The smallest window which works is 3. So, we can update the suffix [3..3], but it has already been updated by a smaller value 1. So, we can stop.
Thanks!
Can someone tell me how I passed pretest and managed to get runtime error on test "1" during system testing?
here is my submission: 93984302
Usually this is undefined behaviour, some index out of bounds. But I cannot see in which line.
But I at least passed pretest(which had 4 tests). Very confused, hope I don't get ruined by some mysterious problem.
I compiled and run it on my computer, it is a out of bounds.
You could try to use compiler options which make your code fail in such cases.
nah I re-submitted and got AC, idk
That is the "undefined" in undefined behaviour. Sometimes it works, sometime it does not.
Hmm, very interesting. I guess I will prompt to static arrays more to avoid this kind of problem(maybe will help? idk).
Anyway, thank you for trying my code.
Why would using static arrays help?
It may sound rude and it is rude
But people who don't understand what UB is should not code in c++. It should literally be repeated on every c++ lesson
I will try to study what UB is. It is correct that I didn't take C++ lesson though.
Don't take my comment personally
It just happens again and again.
People create posts or comments and always ping Mike or contest authors regarding "It passes locally but fails with the same example in tests" or something like that. Though it is completely normal for their language Really painful.
Also it is not the only ub you can get. There are plenty of other options to run into it.
What is ub?
Undefined Behavior
Yes you did get AC from undefined behaviour, but you may get RE during systests. To avoid that, compile with
-D_GLIBCXX_DEBUG
. This will catch all out of bounds accesses(in stl containers), and you will therefore not get any UB for that reason at least.DimmyT asking for help
xD I don't know what to do... Maybe MikeMirzayanov can help? In every problem, pretests are prefix of all tests.
yes but I surely passed pretest so I don't know why...
Yeah I submitted the exact same code again and got accepted, MikeMirzayanov sorry to bother but please help me TAT
This contest made me purple for the first time :) Thanks for the good problems
On this contest I had big troubles with problem E. I used the idea with the mergesort + checking every bit of answer from last to first. It uses O(30 * n * logn) time, and with n = 3e5 it is approximately 1e8, is it really that slow for time limit = 2 seconds? If anyone can get AC with my code, please share my mistake. (94018657)
My solution was with almost the same idea as yours and also got TLE on test case 12, when I optimized my code it runned locally on 3 seconds for the worst test case.
However I think you should use
long long
for computing the amount of inversions.a great contest but very weak pre test for B (div 2)
Div2-B my code What's wrong in this I'm unable to figure it out. If anyone could help me. Initially it is assigning all the elements of array as '0' and putting current index(j) in map if it's not making 'T' with another previous element i<j, but when it founds a[i]+a[j]==T 'j' is assigned to 1.
What about the case when a[i] = T/2???
Testcases were very weak for Div2 C. I saw a lot of submissions where the test case 1 1 3 would fail, but somehow got accepted. I submitted my code at 1h 6 mins(all pretests passed). Went on ahead to try D, but at 1h 52 mins, I realised, that I've missed the case for n = 1, So I resubmitted it. Turns out they didn't have a basic case like n = 1. My ranking fell from expected 1200 to 1765. Please do something about the resubmission rule.
When you submit solution in the last min and net fucks you up and then you submit that after contest and it ACs. FML
Weak Test cases for problem (div.2)A. A soluion with 10^7 operations(in worst case) in java, it just has 3 test cases which are also randomly made i guess. look at this solution. I know its just problem A but still it decreases the quality of contest.
"Several unexpected Kuhn solutions passed for D1F. Could you please discuss your solutions in the comments and prove its correctness or provide any counter-examples. Author's solution uses flows with Dinic."
Don't worry, it is almost impossible to hack Kuhn (especially on those, specific graphs). Even if it is possible, it is VERY hard. Of course, I don't have any proof of my words, but I know it from my experience.
This is a strange question, but: how to solve this problem with Kuhn? I don't doubt its time efficiency, but I don't know how to find the maximum matching which saturates some given set of vertices without using circulations (which is the model solution).
Alright, so you need to find the matching that covers some set of vertices.
Using Kuhn in proper order, find any maximum matching that covers all vertices of the left part from the set, let's say that the set of covered vertices of the left part is $$$L$$$ (some vertices from the set + something else).
Using Kuhn in proper order, find any maximum matching that covers all vertices of the right part from the set. Similarly, define $$$R$$$.
The claim is: there is a maximum matching on the set of vertices $$$L \cup R$$$.
You can prove it using alternating paths.
And then you can just leave only those vertices and find the maximum matching.
Note: this algorithm also can be used for solving the maximum vertex-weighted matching where you have weights on both parts. I will leave it to you as an exercise.
That's really cool (though I need some time to make sure I understand why this is correct). Thank you!
which Kuhn's algorithm is this and what does it do? He has too many...
Oh, excuse me! In Russia, we call the 'Kuhn's algorithm' a DFS-like approach for maximum matching in the bipartite graph, which goes like that: for $$$v \in L$$$ in some order, run $$$dfs(v)$$$ (i.e. simplified and faster version of Ford-Fulkerson, which appears to be quite powerful).
Here is the Russian (unfortunately) page with code and description of this algorithm (you can use a translator if you really want): e-maxx..
Thank you!
So u use that claim just to cut down the number of vertices and edges? Or am i missing something?
Can you please explain how to find maximum weighted max matching? I know about fast Kuhn, but I always thought that it can’t be used for finding maximum weighted matching.
Another way (I didn't participate today, though, so I'm not 100% sure). We can model our subproblem (given a bipartite graph, find a matching covering a given subset of vertices) as a maximum-cost flow: connect a source with all vertices of one side of the bipartite graph with oriented edges with capacity 1 and cost 0 or 1 (1 if the vertex must be covered by the matching). We analogously connect all vertices of the other part of the graph with the sink. Now, if we need to cover $$$R$$$ vertices, we can easily see that we are looking for any flow with cost $$$\geq R$$$.
But running a maximum-cost flow algorithm naively is probably too slow. In order to cope with that, we need to understand how the simplest maximum-cost flow algorithm works on our network:
Hi, Can anyone please tell me the time complexity of my code for D2B, according to me, it is approximately O(N log N). But it gave TLE on the system tests, which shows that I have approximated the complexity incorrectly. Here is the link to my submission:
https://mirror.codeforces.com/contest/1417/submission/93976053
Thanks in advance!
If input looks like $$$a = [x,x,x,...,y,y,y,...]$$$ and $$$x + y = T$$$, every time your program sees a $$$x$$$ it will go through every positions of $$$y$$$ and set the answer, then the run time becomes $$$O(n^2logn)$$$
For problem D how can it be told for sure that a[1] wont be negative after the operation. lets say if a[i]=8 and i=7 then it would take 7-(8%7)=6 at least 6 but if a[1] is less that that how what will happen then? what am i missing?
Even I have the same doubt
Because he did the operation from i=2 to i=n
you can see that the when we are doing from i=k, a[i] is at least k-1 since a[1]~a[k-1] is all greater than zero, thus the method will work.
Aaaaaaaah! That feeling when a solution passes after contest by changing just ONE CHARACTER!!!!
Nice contest!
please someone help what's wrong in my code in div.2(B)? It failed in system testing(test#4). https://mirror.codeforces.com/contest/1417/submission/93983911
why I am downvoted why it's not right of the "pupil" to ask doubts
My solution for Div1 D using small-to-large merging passed in 1.4s. 94011971
The idea is that we simulate small-to-large merging for edges in reverse order and save which values were in a smaller set, so we could simulate the process in reverse order by removing the smaller set from the larger set.
You don't even need to use a set of pairs to answer queries.
With the same idea you can maintain the vectors during the small-to-large merging and sort them afterwards, then you can simply rollback the edges which you are supposed to erase. To answer queries you go from the back of the vector and pop elements which are already deleted (equal to 0) or aren't in the same disjoint set anymore. So for all queries you get O(n) amortized (After sorting for NlogN of course).
This made my code run in about 650ms.
Submission
Wouldn't there be $$$O(NlogN)$$$ elements in total which we need to sort? Therefore the complexity would still be $$$O(Nlog^2N)$$$, but with a better constant.
Yeah, i didn't notice that, you're correct.
You can get the complexity down to $$$O(N \log N)$$$ if you sort all vectors using counting sort (consider all vectors that contain 1, then all vectors that contain 2..., and proceed like this to reconstruct all vectors in the correct order). See 94166796
Cool. I spent day to optimize this solution with vectors to $$$O(n log(n))$$$ and didn't come up.
In DIV2B , why can't we just put all pairs whose sum add up to the target into different multi sets?
Was O(nlog(n)log(max(A[i]a)) not intended for div2E or does my solution just have poor constant/ bad implementation on that time complexity?
My O(n log A) with hashmap failed on pretests so I assume that anything slower than O(n log A) wasn't intended but I might be wrong
The time limits were tight here. My solution failed with map too. Unordered_map barely passes (but I needed a custom hash function — they may have some anti-hash test cases).
EDIT: Actually your complexity can work (for example see tourist's solution), But constants matter a lot here.
Otherwise, we have to make it divisible by transferring i−(aimodi) from a1 to ai. Note that this operation does not break a condition on non-negativity because all ai are initially positive. I don't know why this sentence is true. If a1 is small, isn't it a negative number? Or am I too stupid?
Since we are considering the indices $$$i$$$ in ascending order, and all $$$a_i > 0$$$, when we start dealing with $$$a_i$$$, $$$a_1 \ge i - 1 \ge a_i \bmod i$$$.
I understand, thank you very much!
Rating changes working correctly? 2000 rank made me a specialist again
Dropped to specialist before, seems like 2000 rank is not enough for a steady blue title.
Can any one give a formal proof for Div 2 A?
Can someone please explain div2b, how we can separate array a in white and black balls based on given unlucky integers T?
here two array element say x , y will make bad pair when x+y = T ,,, there are two case :
case 1: (x!=y ) in this case you can paint all array elements with value = x ,,, white and all elements with value = T-x ,,, black(if such numbers exist )
case 2: ( x==y ) say there are Cnt such elements , its optimal to paint Cnt/2 elements white and rest elements black .
That's exactly what i did but it shows WA, code: https://mirror.codeforces.com/contest/1417/submission/93987002
Changed your code a bit its ac now ...
https://mirror.codeforces.com/contest/1417/submission/94035994
Some video solutions (for 2A-F), in case you like those
I might be wrong here, but isn't your solution for D wrong with an input of:
The second operation from your code
1 3 1
would reduce the first element by 1 and increment the third element by 1, but the first element is still 0 at that stage.A[i] >= 1
For Div1C / Div2E I have another solution without using a trie.
Here is my submission: 94030812
Explanation:
Let f(v) be a function that calculates the number of inversions of the array v. We can calculate that while doing a MergeSort in O(n log n).
We keep track of the minimum number of inversions in ans, and the number x that creates the answer when doing xor to the vector.
Then we update bit by bit x, from the highest bit to the lowest bit. If adding that bit to x, the answer decrease, we keep that bit set, else we continue.
Pseudocode:
Cheers, I tried a mergesort solution after the contest but it would always be too slow, your submission + explanation was a nice reference :)
I did think of the solution on these lines but couldn't come up with a concrete proof of why it would work... good to know it works
Your solution's time complexity is O(30*n*log(n)) right ? I have made similar submissions but they all got TLE, did you use any optimization ?
can someone tell me what is wrong with my solution....what i did was take input as pairs(to remember the index after sorting by value) and loop until i find two index that gives the sum "k" and the remember the indices of the two value. Now just make half of the subarray 1 and others zero then sort by indices to finally print the answer. submission : 94032531
For div2 B it shows WA, My solution was to divide them like this: paint X white and T-X black if it exists and distribute all x=T/2 equally to black and white. What is wrong with this approach.
My Solution: https://mirror.codeforces.com/contest/1417/submission/93987002
maybe trying not declaring big arrays in main? that sometimes causes WA
Div1 C TL not friendly :( My solution with hash table didn't pass.
Is div2D solution wrong? consider 1 1 1 1 3 3 4 the sum is divisible by 7, but it seems to be unsolvable...... pls help me with this case :)
1 1 1 1 3 3 4 >>> 0 2 1 1 3 3 4 >>> 2 0 1 1 3 3 4 >>> 0 0 3 1 3 3 4 >>> 3 0 0 1 3 3 4 >>> 0 0 0 4 3 3 4 >>> 4 0 0 0 3 3 4 >>> 2 0 0 0 5 3 4 >>> 7 0 0 0 3 4 >>> 4 0 0 0 6 4 >>> 10 0 0 0 0 4 >>> 7 0 0 0 0 7 >>> 14 0 0 0 0 0 >>> 12 2 0 0 0 0 0 >>> 10 2 2 0 0 0 0 >>> 8 2 2 2 0 0 0 >>> 6 2 2 2 2 0 0 >>> 4 2 2 2 2 2 >>> 2 2 2 2 2 2 2
Oops i gave the wrong data......
0 1 1 1 3 3 5
should this be unsolvable or not ?
thx
0 1 1 1 3 3 5 is not a valid testcase
A[i] should be larger than 0
Oh thx for the fastest reply i ever seen......
I should make sure that i am able to read problem statements carefully for every contest :( it's so hard since i'm in gmt+8 and codefroces contests are often held late at night
anyway thanks @..vince
In Div2B if we consider this test case
5 9
1 3 3 3 8
According to solution provided it would be 0 0 0 0 1
But optimal solution would be 0 0 0 1 1
Am I correct nkamzabek ?
What are you asking for?
Both coloring you gave have f(c)+f(d)=0
Yes He's(theHermit) wrong ,but in my test case I consider the optimal solution to be right whereas nkamzabek's version gives another answer.
Nope, in your case both are optimal.
In Div1 B , the condition:"$$$a_i\geq 1$$$" is really important.At first,I ignore it,so I am stuck for a very long time.
Compared to B,Div1 C is much classical.
I made an unnecessary resubmission for missing that. I found out about that constraint when I wanted to hack someone.
According to problem Div2D: Number 1 is the most powerful number :))
The trick of an array of adhoc problems
Construct a Trie to solve Problem E. I can never think such a great idea! Here's my solution: 94045226 without using trie. Code may be a little long, but it got accepted!
Auto comment: topic has been updated by bthero (previous revision, new revision, compare).
Auto comment: topic has been updated by bthero (previous revision, new revision, compare).
Div 2B: Even after reading editorials I don't see any error. Can someone plz tell me what's wrong with my code? Solution B. My solution was to divide them like this: paint X white and T-X black if it exists and distribute all x=T/2 equally to black and white.
Is the rating change unusual this time for everyone or only me?
We (me, gyh20, Time_tears) have a totally different (and much easier, we think) solution to D1D.
First add the edges from the last operation to the first, and at the same time use disjoint-set-unions (with merging the small one into the big one) to add the numbers a connected component contain into a vector and keep the total size under $$$n\log n$$$.
After getting done with the vectors, sort each of them from small to big. Let $$$bel[i]$$$ be the connected component the point with value $$$i$$$ is now in. For all $$$i$$$ from $$$1$$$ to $$$n$$$, let $$$bel[a_i]\leftarrow getfather(i)$$$. (getfather is the dsu function)
Then we answer the queries:
The solution runs a lot faster than the author's. Without any optimizes, it only uses 452ms. Also it doesn't need any data structures.
My code is here: 94056232
can anyone explain what does this line in editorial for DIV2 F mean, If the current query is of first type, remember the "boss" of the corresponding vertex. How is the question transformed to subtree-maximum query on DSU tree ?
why my O(n*30) solution in E give TLE
94058521
https://mirror.codeforces.com/contest/1417/submission/94186120 Use reserve as the hash table require a lot of rehashing and resizing.
Can Someone please tell me what's wrong in my solution for div2-B
https://mirror.codeforces.com/contest/1417/submission/94138194
I have simply changes the color (k — number) to opposite value of the (number). This also ensures that all the n/2 numbers are equally distributed but still getting wa on test 4
Got my mistake. For any one doing same mistake as me consider the test case
4 8
8 0 4 4
Thanks, bro! It was really messing up with my head, didn't realize that I am just plain dumb :)
The editorial of Div1E is missing. Could anyone share the idea of its solution?
Since the editorials of problem E and F are missing, I will share my approach to solve them:
E:
We are actually minimizing the number of neighbor pairs in $$$b$$$ that are different
Add numbers one by one. For each number $$$a_i$$$, we will decide if we want $$$b_{2i - 2}$$$ and $$$b_{2i - 1}$$$ to be different, and also $$$b_{2i - 1}$$$ and $$$b_{2i}$$$. (We can special handle the first element, of course).
If we want $$$b_{2i - 1}$$$ and $$$b_{2i}$$$ to be same, $$$a_i$$$ must be even and $$$b_{2i}$$$ $$$=$$$ $$$\frac{a_{i}}{2}$$$.
If we want $$$b_{2i - 2}$$$ and $$$b_{2i - 1}$$$ to be same, $$$1$$$ $$$\leq$$$ $$$b_{2i - 2}$$$ $$$<$$$ $$$a_{i}$$$. and $$$b_{2i}$$$ $$$=$$$ $$$a_i$$$ $$$-$$$ $$$b_{2i-2}$$$.
After first element is added, possible values of $$$b_2$$$ are either a range ($$$1$$$ $$$\leq$$$ $$$b_2$$$ $$$<$$$ $$$a_1$$$) (If $$$b_1$$$ $$$\neq$$$ $$$b_2$$$)
or a single possible value $$$\frac{a_1}{2}$$$.
It follows that after adding each number, the possible values of $$$b_{2i}$$$ is a range of values added by a set of individual values $$$S$$$.
If $$$b_{2i - 1}$$$ $$$=$$$ $$$b_{2i}$$$, possible values become a single value.
Otherwise if $$$b_{2i - 2}$$$ $$$=$$$ $$$b_{2i - 1}$$$, you will essentially flip the range and every value inside $$$S$$$.
Otherwise, possible values are $$$1$$$ $$$\leq$$$ $$$b_{2i}$$$ $$$<$$$ $$$a_{i}$$$.
You can now do a DP or greedy here:
For greedy, you can observe that when you add each element, you will want to always make $$$b_{2i - 2}$$$ $$$=$$$ $$$b_{2i - 1}$$$ or $$$b_{2i - 1}$$$ $$$=$$$ $$$b_{2i}$$$, or both.
If you can do both, it is always optimal to do both.
If you can do either, it is always optimal to do either. The result possible values will be a range of values added by the set of individual values $$$S$$$, and we will add at most one more individual value to $$$S$$$.
It is annoying but possible to maintain the range and the set $$$S$$$ with $$$O(n lg n)$$$ total time complexity.
A lot of details are omitted but that should be enough observations for a complete solution.
F:
If we construct the resultant graph based on directions, it's a cycle with some branches moving towards the cycle.
The sum value of all elements within cycle must be the same.
The sum value of other elements must be strictly larger than its next element.
All cycles can be decomposed to cycles of length 2.
Label squares with no neighbor strictly smaller than itself.
If we can match each of labelled squares with a neighbor element (not necessary labelled) with same value, such that no element is matched twice, we can form a solution.
The reverse is also true: If no such matching exists, we don't have a solution.
Since the board is bipartite, form a bipartite graph with odd parity elements on left, and even parity elements on right. (Parity based on value of $$$i+j$$$ for element $$$a_{i,j}$$$).
Create a lower bound max flow graph by connecting source to each element on left, and each element on right to sink with capacity 1 each.
If an element is labeled, make the lowerbound of the particular edge from source to element or from element to sink to be 1.
If $$$x$$$ is on left side, element $$$x$$$ and $$$y$$$ are neighbor in original graph, and their values are equal, make a edge from $$$x$$$ to $$$y$$$.
By running lowerbound maxflow, we will get a possible matching that fulfills the requirement, or notice that no such matching exists.
Lastly, maxflow on unit capacity graph have some complexity like $$$O(E \sqrt{V})$$$.
Thank you. Was waiting for this.
In the editorial of Div2E/Div1C,there's something I can't understand. In the tutorial, $$$a, b$$$ are the children of $$$v$$$, and $$$v$$$ has a depth of $$$k$$$. Then $$$a, b$$$ have a depth of $$$k-1$$$, and the highest bit which differs in both should be the $$$k-1$$$-th highest bit. So I think only the $$$k-1$$$-th highest bit of X is toggled, lists $$$S(a)$$$ and $$$S(b)$$$ will change their relative order, not $$$k$$$-th. Can anyone explain it?
Edit: has been fixed.
The maxn (4e6 + 100) in the code of Div2E/Div1C is small and can be hacked by this generator:
Nice contest. I'm new to the technique of adding fake node to build dsu tree where used in Div2F/Div1D problem. Is there any relevant problem can be solved by this technique?
Auto comment: topic has been updated by bthero (previous revision, new revision, compare).
Auto comment: topic has been updated by bthero (previous revision, new revision, compare).
Auto comment: topic has been updated by bthero (previous revision, new revision, compare).
Div2 A. The statement "any operation which increases the value of $$$min(a_1,a_2,…,a_n)$$$ is unoptimal" (the proper English word is non-optimal) is false. Besides, it is meaningless since not every operation that preserves the minimal value is optimal.
You can copy once regardless of whether you copy from $$$a_1$$$ to $$$a_2$$$ or from $$$a_2$$$ to $$$a_1$$$.
Why has it been downvoted? Is anything stated there incorrect? It referred to the original text of the editorial which has since been edited.
Thanks for Editorial
seeing space complexity for the first time in editorial!! one of the best round.
Auto comment: topic has been updated by bthero (previous revision, new revision, compare).
I am thrilled after understanding div2 F's solution
In the problem Div2 D, the editorial said that transfering i-(a[i]%i) from a[1] to a[i] does break the non-negative rule. How about the case that a[1] < i(a[i]%i)? Will the a[1] become negative?
In Div2E/Div1C we can convert all numbers to binary. Then we start with the whole array of n elements and look at the most significant bit. Iterate over the array, counting inversions and antiinversions along the way. (Inversions are 1-0 pairs and antiinversions are 0-1 pairs, 0-0 and 1-1 pairs are neither.) Split the array into two subsequences, placing the elements with a 0 bit in this position to one of them and elements with 1 bit to the other, while preserving order within each subsequence. Repeat the same procedure recursively on each of the subsequences, with the next most significant bit. Inversions (as well as antiinversions) at the same recursion depth should be added up. Recursion stops, after processing the least significant bit. At the end we have the total number of inversions and total number of antiinversions per recursion level i.e. per bit position. Now we can construct the required x, setting its bits to 1 at positions where the number of inversions is strictly larger than the number of antiinversions and to 0 in the others. Complexity is O(n logn).
Unofficial Div1F editorial (Dinic's maxflow solution):
Note that a satisfactory solution can be produced as follows: for every cell, exactly one of the following must be true:
Note that the second condition resembles the bipartite matching problem that arises from domino-tiling an arbitrarily-shaped chessboard (e.g. this problem from AtCoder). In fact, it can be framed as a bipartite matching, but with only a subset of nodes being mandatory participants for the matching. A node is mandatory iff its cell has no neighbors of lower value, otherwise it's optional.
Recall that to use Dinic's maximum flow to solve a bipartite matching, there are source-edges to one of the parts (the "white" cells on a chessboard), sink-edges from the other of the parts (the "black" cells), and directed edges from the first part to the second part based on potential matchings (i.e. between cells with equal value). To model mandatory nodes, we add a "demand" to its source or sink edge, modeling demands by the transformation detailed in this tutorial.
The answer is yes iff all demands are met by the flow. The solution can be reconstructed by matching cells by the second condition if there is flow between them. If a cell isn't matched this way, the first condition can be fulfilled instead; this is always possible as the node would have been marked mandatory otherwise.
Beware: Dinic's algorithm implementations that pass many other flow problems fine may TLE here due to a subtle bug. In particular, ensure your adjacency-list pointers (
ptr
in the author's solution, ornext
in my solutions [recursive | iterative]) only increment if its edge is exhausted, i.e. the search in that node hasn't fulfilled its flow limit yet.In problem 1D, the given solution code runs about 1.3s (GNU C++17(64)) and the TL is 1.5s?
All spoilers are broken. Please fix this.
I have alternate solution for div-2 F which has same TC as the editorial's solution, but slightly worse space complexity, $$$(O(n + q)\log{n})$$$.
Firstly, store all the queries, and delete all the edges given in the type-2 queries. Now, in this new graph, unite all the nodes in connected components into one dsu set each.
Now we will basically try to simulate the process in reverse. We can do it like this: Iterate from the last query to the first. Now all the type 2 queries are essentially unite queries (as we are going backwards), so store the set representatives of both the ends of the edge that was to be deleted in this query. Also, store the smaller set.
Now we will iterate from first to the last query, and answer queries too.
Implementation: link