Author: FelixMP Preparator: FelixMP
Solution
Tutorial is loading...
Code
#include<bits/stdc++.h>
using namespace std;
int main() {
int t;
cin >> t;
while(t--) {
int n;
cin >> n;
int minv = 1e9+1;
int maxv = -1;
int mini;
int maxi;
for(int i=0; i < n; ++i) {
int a;
cin >> a;
if(a > maxv) {
maxi = i+1;
maxv = a;
}
if(a < minv) {
mini = i+1;
minv = a;
}
}
cout << mini << " " << maxi << endl;
}
}
Author: FelixMP Preparator: xpov1LL
Solution
Tutorial is loading...
Code
#include<iostream>
#include<vector>
#include<algorithm>
using namespace std;
int main() {
int t;
cin >> t;
while(t--) {
int n, a;
cin >> n >> a;
vector<int> v(n);
for(int& x : v) cin >> x;
bool ans = false;
if(n == 1) ans = (v[0] == a);
else {
sort(v.begin(), v.end());
int i = 0;
int j = 1;
while(j < n and i < n) {
if(v[i] + abs(a) == v[j]) {
ans = true;
break;
}
else if(v[i] + abs(a) < v[j]) ++i;
else ++j;
}
}
cout << (ans? "YES" : "NO") << '\n';
}
}
Author: FelixMP Preparator: FelixMP
Solution
Tutorial is loading...
Code
#include<bits/stdc++.h>
using namespace std;
typedef vector<int> vi;
int main() {
int t;
cin >> t;
while(t--) {
int n;
cin >> n;
vi a(n);
for(int i=0; i < n; ++i) cin >> a[i];
sort(a.begin(), a.end());
bool one = false;
bool consec = false;
for(int i=0; i < n; ++i) {
if(a[i] == 1) one = true;
if(i < n-1 && a[i]+1 == a[i+1]) consec = true;
}
cout << ((one && consec) ? "NO" : "YES") << endl;
}
}
Author: FelixMP Preparator: FelixMP
Solution
Tutorial is loading...
Code
#include<bits/stdc++.h>
using namespace std;
#define endl '\n'
typedef long long ll;
int main() {
ios::sync_with_stdio(false);
int T;
cin >> T;
while(T--) {
ll n;
cin >> n;
ll x = n;
while(x % 2 == 0) x /= 2;
if(x == 1) {
cout << -1 << endl;
}
else if(x <= 2e9 && (x*(x+1))/2 <= n) {
cout << x << endl;
}
else {
cout << 2*(n/x) << endl;
}
}
}
Author: FelixMP Preparator: FelixMP
Solution
Tutorial is loading...
Code
#include<bits/stdc++.h>
using namespace std;
typedef vector<int> vi;
typedef vector<vi> vvi;
vvi al;
vi ans;
void dfs(int u, int p, int c) {
ans[u] = ((int)al[u].size())*c;
for(int v : al[u]) {
if(v != p) {
dfs(v, u, -c);
}
}
}
int main() {
int T;
cin >> T;
while(T--) {
int n;
cin >> n;
al = vvi(n);
for(int i=0; i < n-1; ++i) {
int u, v;
cin >> u >> v;
u--;
v--;
al[u].push_back(v);
al[v].push_back(u);
}
ans = vi(n);
dfs(0, -1, 1);
for(int i=0; i < n; ++i) {
cout << ans[i];
if(i < n-1) cout << " ";
}
cout << endl;
}
}
Author: FelixMP Preparator: FelixMP
Solution
Tutorial is loading...
Code
#include<bits/stdc++.h>
using namespace std;
typedef long long ll;
typedef vector<ll> vi;
int main() {
int T;
cin >> T;
while(T--) {
ll n;
cin >> n;
vi a(n);
ll tsum = 0;
for(int i=0; i < n; ++i) {
cin >> a[i];
tsum += a[i];
}
sort(a.begin(), a.end());
if(a[n-1]*(n-2) + tsum < 0 || a[0]*(n-2) + tsum > 0) {
cout << "INF" << endl;
continue;
}
ll cslope = a[n-1]*(n-2) + tsum;
ll cvalue = -(n-1)*a[n-1]*a[n-1];
ll ans = cvalue;
for(ll i=1; i < n; ++i) {
ll d = a[n-i]-a[n-i-1];
cvalue += cslope*d;
ans = max(cvalue, ans);
cslope += a[0]-a[n-1];
}
cout << ans << endl;
}
}
Author: FelixMP Preparator: FelixMP
Solution
Tutorial is loading...
Code
#include<bits/stdc++.h>
using namespace std;
typedef vector<int> vi;
typedef vector<vi> vvi;
vi obtain_p_permutation(const vi& a) {
int n = a.size();
vi pp(n, -1);
vi p(n);
int cp = 0;
vi cnt(n);
for(int i=0; i < n; ++i) {
cnt[a[i]]++;
}
for(int i=0; i < n; ++i) {
if(pp[a[i]] == -1) {
if(cnt[a[i]] == 1) {
p[i] = n/2;
}
else {
p[i] = cp;
pp[a[i]] = n-cp-1;
cp++;
cnt[a[i]]--;
}
}
else {
p[i] = pp[a[i]];
pp[a[i]] = -1;
cnt[a[i]]--;
}
}
return p;
}
vvi find_cycles(vi p, vi& in_cyc) {
int n = p.size();
vi vis = vi(n);
vvi cycles;
for(int i=0; i < n; ++i) {
if(!vis[i]) {
vi cyc;
int v = i;
while(!vis[v]) {
cyc.push_back(v);
in_cyc[v] = cycles.size();
vis[v] = true;
v = p[v];
}
cycles.push_back(cyc);
}
}
return cycles;
}
int find_set(vi& dsu, int x) {
if(dsu[x] == x) return x;
return dsu[x] = find_set(dsu, dsu[x]);
}
void solve() {
int n;
cin >> n;
vi a(n);
vi cnt(n);
for(int i=0; i < n; ++i) {
cin >> a[i];
a[i]--;
cnt[a[i]]++;
}
int odd_i = -1;
for(int i=0; i < n; ++i) {
if(cnt[i]%2 == 1) {
if(odd_i == -1) {
odd_i = i;
}
else {
odd_i = -2;
}
}
}
if(odd_i == -2) {
cout << "NO" << endl;
}
else if(odd_i != -1 && (cnt[odd_i] == 1 && odd_i == a[n/2])) {
cout << "NO" << endl;
}
else {
vi p = obtain_p_permutation(a);
if(odd_i != -1 && p[n/2] == n/2) {
for(int i=0; i < n; ++i) {
if(i != n/2 && a[i] == odd_i) {
p[n/2] = p[i];
p[i] = n/2;
break;
}
}
}
vi rp(n);
for(int i=0; i < n; ++i) {
rp[p[i]] = i;
}
vvi cycles;
vi in_cyc(n);
cycles = find_cycles(p, in_cyc);
vi dsu(n);
for(int i=0; i < n; ++i) dsu[i] = i;
for(int i=0; i < n; ++i) {
if(find_set(dsu, in_cyc[i]) != find_set(dsu, in_cyc[n-i-1])) {
dsu[find_set(dsu, in_cyc[i])] = find_set(dsu, in_cyc[n-i-1]);
int j1 = rp[i];
int j2 = rp[n-i-1];
p[j2] = i;
rp[i] = j2;
p[j1] = n-i-1;
rp[n-i-1] = j1;
}
}
cycles = find_cycles(p, in_cyc);
int nc = cycles.size();
vi prev_p = vi(p);
vi prev_rp = vi(rp);
for(int i=0; i < nc-1; ++i) {
int i0 = cycles[i][0];
int ip1 = cycles[(i+1)][0];
p[prev_rp[n-i0-1]] = ip1;
rp[ip1] = prev_rp[n-i0-1];
}
p[prev_rp[n-cycles[nc-1][0]-1]] = n-cycles[0][0]-1;
rp[n-cycles[0][0]-1] = prev_rp[n-cycles[nc-1][0]-1];
for(int i=0; i < nc-1; ++i) {
int i0 = cycles[i][0];
int ip1 = cycles[(i+1)][0];
p[prev_rp[i0]] = n-ip1-1;
rp[n-ip1-1] = prev_rp[i0];
}
p[prev_rp[cycles[nc-1][0]]] = cycles[0][0];
rp[cycles[0][0]] = prev_rp[cycles[nc-1][0]];
cout << "YES" << endl;
for(int i=0; i < n; ++i) {
cout << rp[i]+1 << " ";
}
cout << endl;
}
}
int main() {
int T;
cin >> T;
while(T--) {
solve();
}
}
Author: FelixMP Preparator: FelixMP
Solution
Tutorial is loading...
Code
#include<bits/stdc++.h>
using namespace std;
typedef __int128 ll;
typedef vector<ll> vi;
typedef vector<vi> vvi;
ll read_ll() {
string s;
cin >> s;
ll x = 0;
for(int i=0; i < (int)s.length(); ++i) {
x *= 10;
x += ll(s[i]-'0');
}
return x;
}
void print_ll(ll x) {
vector<int> p;
while(x > 0) {
p.push_back((int)(x%10));
x /= 10;
}
reverse(p.begin(), p.end());
for(int y : p) cout << y;
}
inline ll ctz(ll x) {
long long a = x&((ll(1)<<ll(63))-ll(1));
long long b = x>>ll(63);
if(a == 0) return ll(63)+__builtin_ctzll(b);
return __builtin_ctzll(a);
}
inline ll abll(ll x) {
return x >= 0 ? x : -x;
}
ll gcd(ll a, ll b) {
if(b == 0) return a;
if(a == 0) return b;
int az = ctz(a);
int bz = ctz(b);
int shift = min(az, bz);
b >>= bz;
while(a != 0) {
a >>= az;
ll diff = b-a;
if(diff) az = ctz(diff);
b = min(a, b);
a = abll(diff);
}
return b << shift;
}
void init_st(vi& st, const vi& v) {
int n = v.size();
for(int i=0; i < n; ++i) {
st[n+i] = v[i];
}
for(int i=n-1; i >= 1; --i) {
st[i] = gcd(st[2*i], st[2*i+1]);
}
}
void update_st(vi& st, int i, ll x) {
int n = (int)st.size() / 2;
int pos = n+i;
st[pos] = x;
while(pos > 1) {
pos /= 2;
st[pos] = gcd(st[2*pos], st[2*pos+1]);
}
}
void solve(const vi& a, const vi& b, vector<bool>& ai, vector<bool>& bi) {
int n = a.size();
int m = b.size();
vvi st_a = vvi(n, vi(2*m, 0));
vvi st_b = vvi(m, vi(2*n, 0));
for(int i=0; i < n; ++i) {
vi af(m);
for(int j=0; j < m; ++j) {
af[j] = a[i]/gcd(a[i], b[j]);
}
init_st(st_a[i], af);
}
for(int i=0; i < m; ++i) {
vi bf(n);
for(int j=0; j < n; ++j) {
bf[j] = b[i]/gcd(b[i], a[j]);
}
init_st(st_b[i], bf);
}
queue<int> dq;
for(int i=0; i < n; ++i) {
if(st_a[i][1] > 1) {
dq.push(i);
ai[i] = false;
}
}
for(int i=0; i < m; ++i) {
if(st_b[i][1] > 1) {
dq.push(n+i);
bi[i] = false;
}
}
while(!dq.empty()) {
int idx = dq.front();
dq.pop();
if(idx < n) {
int i = idx;
for(int j=0; j < m; ++j) {
if(bi[j]) {
update_st(st_b[j], i, b[j]);
if(st_b[j][1] > 1) {
dq.push(n+j);
bi[j] = false;
}
}
}
}
else {
int i = idx-n;
for(int j=0; j < n; ++j) {
if(ai[j]) {
update_st(st_a[j], i, a[j]);
if(st_a[j][1] > 1) {
dq.push(j);
ai[j] = false;
}
}
}
}
}
}
int main() {
int T;
cin >> T;
while(T--) {
int n, m;
cin >> n >> m;
vi a(n);
vi b(m);
for(int i=0; i < n; ++i) a[i] = read_ll();
for(int i=0; i < m; ++i) b[i] = read_ll();
vector<bool> ai(n, true);
vector<bool> bi(m, true);
solve(a, b, ai, bi);
int as = 0;
int bs = 0;
for(int i=0; i < n; ++i) {
if(ai[i]) as++;
}
for(int i=0; i < m; ++i) {
if(bi[i]) bs++;
}
if(as == 0 || bs == 0) cout << "NO" << endl;
else {
cout << "YES" << endl;
cout << as << " " << bs << endl;
for(int i=0; i < n; ++i) {
if(ai[i]) {
print_ll(a[i]);
cout << " ";
}
}
cout << endl;
for(int i=0; i < m; ++i) {
if(bi[i]) {
print_ll(b[i]);
cout << " ";
}
}
cout << endl;
}
}
}
Author: FelixMP Preparator: FelixMP
Solution
Tutorial is loading...
Code
#include<bits/stdc++.h>
using namespace std;
typedef vector<int> vi;
typedef vector<vi> vvi;
typedef pair<int, int> ii;
typedef vector<ii> vii;
typedef vector<vii> vvii;
struct graph {
int n;
int m;
vvi al;
vi morphism;
vvi dfs_children;
vi dfs_parent;
vi dfs_num;
vi dfs_low;
int dfs_count;
bool is_root_ac = false;
bool bad_biccon = false;
vii repr_edge;
void dt_dfs(int v, int par) {
dfs_parent[v] = par;
dfs_num[v] = dfs_low[v] = dfs_count++;
for(int u : al[v]) {
if(u == par) {
//Nothing
}
else if(dfs_num[u] == -1) {
dfs_children[v].push_back(u);
dt_dfs(u, v);
dfs_low[v] = min(dfs_low[v], dfs_low[u]);
}
else {
dfs_low[v] = min(dfs_low[v], dfs_num[u]);
}
}
}
ii min_repr_edge(ii a, ii b) {
if(a.first == -1) return b;
if(b.first == -1) return a;
if(dfs_num[a.second] < dfs_num[b.second]) return a;
if(dfs_num[a.second] > dfs_num[b.second]) return b;
if(dfs_num[a.first] > dfs_num[b.first]) return a;
if(dfs_num[a.first] < dfs_num[b.first]) return b;
return a;
}
void dt_dfs_hamil(int v) {
repr_edge[v] = ii(-1, -1);
for(int u : dfs_children[v]) {
dt_dfs_hamil(u);
repr_edge[v] = min_repr_edge(repr_edge[v], repr_edge[u]);
}
for(int u : al[v]) {
if(u == dfs_parent[v]) {
//Nothing
}
else if(dfs_num[u] < dfs_num[v]) {
repr_edge[v] = min_repr_edge(repr_edge[v], ii(v, u));
}
}
if(dfs_parent[v] == -1) {
//Root case
//Nothing more to check, repr_edge will always be set correctly.
}
else {
if(dfs_children[v].size() == 0) {
//Nothing to check
}
else if(dfs_children[v].size() == 1) {
//Lemma 4
if(dfs_low[dfs_children[v][0]] != dfs_low[v] && dfs_low[dfs_children[v][0]] != dfs_num[dfs_parent[v]]) {
bad_biccon = true;
}
}
else if(dfs_children[v].size() == 2) {
//Lemma 3
if(dfs_parent[dfs_parent[v]] != -1) {
if(!((dfs_low[dfs_children[v][0]] == dfs_num[dfs_parent[v]]) ^ (dfs_low[dfs_children[v][1]] == dfs_num[dfs_parent[v]]))) {
bad_biccon = true;
}
}
if(dfs_low[v] < min(dfs_low[dfs_children[v][0]], dfs_low[dfs_children[v][1]])) {
bad_biccon = true;
}
}
else {
bad_biccon = true;
}
}
}
void dt_dfs_cedges(vvii& cedges, vii& edge_stack, int v, int par) {
dfs_parent[v] = par;
dfs_num[v] = dfs_low[v] = dfs_count++;
for(int u : al[v]) {
if(u == par) {
//Nothing
}
else if(dfs_num[u] == -1) {
dfs_children[v].push_back(u);
edge_stack.emplace_back(v, u);
dt_dfs_cedges(cedges, edge_stack, u, v);
dfs_low[v] = min(dfs_low[v], dfs_low[u]);
if((par == -1 && is_root_ac) || (par != -1 && dfs_low[u] >= dfs_num[v])) {
vii comp;
while(edge_stack.back().first != v || edge_stack.back().second != u) {
comp.push_back(edge_stack.back());
edge_stack.pop_back();
}
comp.push_back(edge_stack.back());
edge_stack.pop_back();
cedges.push_back(comp);
}
}
else {
dfs_low[v] = min(dfs_low[v], dfs_num[u]);
if(dfs_num[u] < dfs_num[v]) {
edge_stack.emplace_back(v, u);
}
}
}
}
void generate_dfs_tree(int root) {
dfs_children = vvi(n, vi());
dfs_parent = vi(n);
dfs_count = 0;
dfs_num = vi(n, -1);
dfs_low = vi(n, -1);
dt_dfs(root, -1);
}
void generate_edge_partition(vvii& cedges, vii& edge_stack, int root) {
generate_dfs_tree(root);
is_root_ac = (dfs_children[root].size() > 1);
dfs_children = vvi(n, vi());
dfs_parent = vi(n);
dfs_count = 0;
dfs_num = vi(n, -1);
dfs_low = vi(n, -1);
dt_dfs_cedges(cedges, edge_stack, root, -1);
if(!edge_stack.empty()) cedges.push_back(edge_stack);
}
void generate_hamil_dfs_tree(int root) {
generate_dfs_tree(root);
repr_edge = vii(n, ii());
dt_dfs_hamil(root);
}
};
vector<graph> partition_biconnected(graph& g) {
vvii cedges;
vii edge_stack;
g.generate_edge_partition(cedges, edge_stack, 0);
vector<graph> comp;
for(vii& vec : cedges) {
graph h;
h.n = 0;
h.m = vec.size();
unordered_map<int, int> rmorph;
for(ii e : vec) {
if(rmorph.find(e.first) == rmorph.end()) {
rmorph[e.first] = h.n++;
}
if(rmorph.find(e.second) == rmorph.end()) {
rmorph[e.second] = h.n++;
}
}
h.morphism = vi(h.n);
h.al = vvi(h.n);
for(ii e : vec) {
int u = rmorph[e.first];
int v = rmorph[e.second];
h.morphism[u] = g.morphism[e.first];
h.morphism[v] = g.morphism[e.second];
h.al[u].push_back(v);
h.al[v].push_back(u);
}
comp.push_back(h);
}
return comp;
}
void upwards_path(graph& g, vi& hc, int v, int tar);
void downwards_path(graph& g, vi& hc, int v, int tar);
void upwards_path(graph& g, vi& hc, int v, int tar) {
if(g.dfs_children[v].size() == 2) {
int u1 = g.dfs_children[v][0];
int u2 = g.dfs_children[v][1];
if(g.repr_edge[u1] == g.repr_edge[v]) swap(u1, u2);
hc.push_back(v);
downwards_path(g, hc, u1, g.repr_edge[u1].first);
if(v != tar) {
upwards_path(g, hc, g.dfs_parent[v], tar);
}
}
else if(g.dfs_children[v].size() == 1) {
int u = g.dfs_children[v][0];
hc.push_back(v);
if(g.repr_edge[u] != g.repr_edge[v]) {
downwards_path(g, hc, u, g.repr_edge[u].first);
}
if(v != tar) {
upwards_path(g, hc, g.dfs_parent[v], tar);
}
}
else {
hc.push_back(v);
if(v != tar) {
upwards_path(g, hc, g.dfs_parent[v], tar);
}
}
}
void downwards_path(graph& g, vi& hc, int v, int tar) {
if(g.dfs_children[v].size() == 2) {
int u1 = g.dfs_children[v][0];
int u2 = g.dfs_children[v][1];
if(g.repr_edge[u1] == g.repr_edge[v]) swap(u1, u2);
upwards_path(g, hc, g.repr_edge[u1].first, u1);
hc.push_back(v);
downwards_path(g, hc, u2, tar);
}
else if(g.dfs_children[v].size() == 1) {
int u = g.dfs_children[v][0];
if(v == tar) {
upwards_path(g, hc, g.repr_edge[u].first, u);
hc.push_back(v);
}
else {
hc.push_back(v);
downwards_path(g, hc, u, tar);
}
}
else {
hc.push_back(v);
}
}
vi hamiltonian_cycle(graph& g) {
g.generate_hamil_dfs_tree(0);
if(g.bad_biccon) return vi();
vi hc;
downwards_path(g, hc, 0, g.repr_edge[0].first);
assert((int)hc.size() == g.n);
return hc;
}
int comp_index;
bool cyclic_comparator(int a, int b) {
if(a < comp_index) a += 1e7;
if(b < comp_index) b += 1e7;
return a < b;
}
graph sort_graph(const graph& g, const vi& hc) {
graph h;
h.n = g.n;
h.m = g.m;
h.morphism = vi(g.n);
h.al = vvi(g.n);
vi rhc(g.n);
for(int i=0; i < g.n; ++i) {
h.morphism[i] = g.morphism[hc[i]];
rhc[hc[i]] = i;
}
for(int i=0; i < g.n; ++i) {
for(int j : g.al[hc[i]]) {
h.al[i].push_back(rhc[j]);
}
comp_index = i;
//This is m log m, can be improved
sort(h.al[i].begin(), h.al[i].end(), cyclic_comparator);
}
return h;
}
bool has_crossing(const graph& g) {
vii bad_stack;
for(int i=0; i < g.n; ++i) {
comp_index = i;
while(!bad_stack.empty() && i == bad_stack.back().first) bad_stack.pop_back();
for(int j=(int)g.al[i].size()-2; j > 0; --j) {
int u = g.al[i][j];
if(!bad_stack.empty() && cyclic_comparator(bad_stack.back().first, u) && cyclic_comparator(u, bad_stack.back().second)) {
return true;
}
if(u > i && (bad_stack.empty() || u != bad_stack.back().first)) {
bad_stack.emplace_back(u, i);
}
}
}
return false;
}
void merge_ans(const graph& g, vvi& ans) {
for(int i=0; i < g.n; ++i) {
for(int j : g.al[i]) {
ans[g.morphism[i]].push_back(g.morphism[j]);
}
}
}
void merge_single_edge_ans(const graph& g, vvi& ans) {
int u = g.morphism[0];
int v = g.morphism[1];
ans[u].push_back(v);
ans[v].push_back(u);
}
vvi solve(graph& input_graph) {
vvi ans(input_graph.n);
vector<graph> components = partition_biconnected(input_graph);
for(graph g : components) {
if(g.n == 1) {
//Nothing
}
else if(g.n == 2) {
merge_single_edge_ans(g, ans);
}
else {
vi hc = hamiltonian_cycle(g);
if(hc.empty()) {
return vvi();
}
graph g2 = sort_graph(g, hc);
if(has_crossing(g2)) return vvi();
merge_ans(g2, ans);
}
}
return ans;
}
graph read_graph() {
graph g;
int n, m;
cin >> n >> m;
g.n = n;
g.m = m;
g.al = vvi(n, vi());
g.morphism = vi(n, 0);
for(int i=0; i < n; ++i) {
g.morphism[i] = i;
}
for(int i=0; i < m; ++i) {
int u, v;
cin >> u >> v;
g.al[u].push_back(v);
g.al[v].push_back(u);
}
return g;
}
void print_ans(const vvi& ans) {
int n = ans.size();
if(n == 0) {
cout << "NO" << endl;
}
else {
cout << "YES" << endl;
for(int i=0; i < n; ++i) {
for(int x : ans[i]) {
cout << x << " ";
}
cout << endl;
}
}
}
int main() {
int T;
cin >> T;
while(T--) {
graph input_graph = read_graph();
vvi ans = solve(input_graph);
print_ans(ans);
}
}
Congratulations to winners!
What a contest! (☉_☉)
MathForces
and ConstructiveForces
used unordered_set for B, got TLE.
unordered_set works $$$O(n^2)$$$ in STL. Use set
What I see from the docs is that they mention linear time in the worst case and constant time on average. They also mention that it is faster than set as the elements are not stored in sorted manner.
The thing with unordered_set is that it relativly slowly adds new elements. Yes, it is guaranteed that it will run in constant time, but no one said that this constant will be less than logarithm =)
Here is your submission with set instead of unordered_set : https://mirror.codeforces.com/contest/1656/submission/150817878
UPD: It doesn't work! (However, if you still want use unordered_set you can rehash (resize) it before using. This is similar to what vector::reserve() does, (but impacts more). __ Here is your sumbission with us.rehash(n) after creation of us: https://mirror.codeforces.com/contest/1656/submission/150818489 It runs in just 78ms __ Conclusion: be careful with std::unordered_set and use rehash())
Conclusion: don't use std::unordered_set
That is completely incorrect! The hashing used by std::unordered_set and std::unordered_map is fundamentally broken, and you can easily create O(n^2) blow-ups, see this blog. Trying to use
.rehash
,.max_load_factor
or.reserve
etc... will not fix this. To fix it, you need to create your own custom hash.Wow, that's mindblowing! I've always used std::unordered_map with rehashing, it's never been a problem, and I've never even thought about these types of blow-ups. Thanks a lot for pointing this out!
Thanks for the fast editorial.
Proof for E?
For each edge assign a positive end to one node and a negative end to the other. The positive node has values equal to +degree(node) and the negative node has a value equal to -degree(node). Now u delete one node. If the node was positive then all the disconnected regions are unaffected except by 1 edge. We know the sum of values of nodes that are contributed by the edges which are not deleted is 0. So whatever sum the disconnected regions have it will be either -1 if the deleted node was positive or +1 if deleted node is negative.
If you are/were getting a WA/RE verdict on problems from this contest, you can get the smallest possible counter example for your submission on cfstress.com. To do that, click on the relevant problem's link below, add your submission ID, and edit the table to increase/decrease the constraints.
I've also added a new feature to view all the hacks and small testcases for any CF problem. However, for this contest, only problem D has hack testcases of length less than 255.
View hacks for D: K-Good
If you are not able to find a counter example even after changing the parameters, reply to this thread, mentioning the contest_id, problem_index and submission_id.
The editorial to E reads to me like some kind of magic, where you must hope a higher power whispers the right construction into your ears. There's a straightforward way to reach this solution without guessing some construction.
Root the tree at node 1. Let $$$S_u$$$ be the subtree sum for vertex $$$u$$$ and $$$p_u$$$ be the parent of $$$u$$$. Consider an arbitrary vertex $$$u \neq 1$$$ and it's children, $$$c_1, c_2, \cdots c_k$$$. Then, if you remove vertex $$$u$$$, it is clear that:
The right most term is the sum of the rest of the tree, not counting $$$u$$$ and it's children. From this, we find that for any $$$u$$$, $$$S_u + S_{p_u} = S_1$$$. This is how the idea of bicoloring becomes clear — the sum of $$$u$$$ and $$$p_{p_u}$$$ must be the same by this identity. Now what should be the sum for each colour?. Neither can be 0 since then the leaves might have value 0. So try $$$S_1 = 0$$$, and the sums of each colour being 1 and -1. Through this, you reach the same conclusion as the editorial.
A straightforward solution of E for those who are bad in finding explicit constructions:
Let the weight in the root be $$$W$$$ and sum in each of the root’s subtrees be $$$S$$$
Weight of every node can be written as $$$aW+bS$$$
We can find $$$a$$$ and $$$b$$$ for every node by running a dfs, just keep track of (1) sum in the current subtree and (2) sum in the nodes outside of it
Now we can effectively compute weight of each node for fixed $$$W$$$ and $$$S$$$
At this point, just brute-force different combinations of $$$W$$$ and $$$S$$$ before you find the one for which all the weights are non-zero and smaller than $$$10^5$$$
Thanks a lot. After reading the tutorial, i thought that it's impossible for me to transform the problem into the conclusion, too abstract for me. Your comment is very helpful for me to fully understand this problem.
this is how the idea of bicoloring becomes clear
Could you elaborate a bit more on why should we use bicoloring here ? and assign weights according to degree and color for each node
include<bits/stdc++.h>
// #include<ext/pb_ds/assoc_container.hpp> // #include<ext/pb_ds/tree_policy.hpp>
define int long long int
define F first
define S second
define pb push_back
define endl "\n"
define all(v) v.begin(),v.end()
using namespace std; // using namespace __gnu_pbds;
// typedef tree<pair<int, int>, null_type, less<pair<int, int>>, rb_tree_tag, tree_order_statistics_node_update > pbds;
void solve(){ int n,k; cin>>n>>k; int arr[n]; for(int i{};i<n;i++) cin>>arr[i]; sort(arr,arr+n); int i{},j=n-1; bool f=0; while(i<j){ if(arr[j]-arr[i]==k){ f=1; break; } else if((arr[j]-arr[i])>k) j--; else i++; } if(f) cout<<"YES\n"; else cout<<"NO\n"; }
int32_t main(){
ifndef ONLINE_JUDGE
endif
}
Why don't you put code in spoiler, and why are you using float
Sorry I didn't know about that. I will keep this in mind next time. Actually I just use the boiler plate of earlier question I was doing using float variable. So that's why.
Thanks for your suggestion I just changed float to int. It worked just confused ewhy it give wrong result in float?.
You should edit your comment and add code to a spoiler. It hinders scrolling down for others.
Float and int are the same number of bytes, but a float only uses 23 bits to store the precise value of the number (and the other 8 bits to store the exponent/magnitude + 1 bit for the sign), so for large numbers, a float can't store the exact value. For this problem, the values of $$$k$$$ are large enough to need 30 bits to be stored. So tl;dr, float causes precision issues here.
I don't get why in D we consider sum of remainders as $$$ 1 + 2 + ... + k$$$, shouldn't it be $$$0 + 1 + 2 + ... + k - 1$$$?
positive integers
ah, indeed
Still 0, 1, ... ,k-1 are k distinct remainders where k is a positive number (k > 0 ) so why shouldn't the remainders be 0, 1, ..., k-1 ?
$$$0 + ... + k - 1 \equiv 1 + ... + k$$$, if that's what you're saying. I don't really understand your comment to be honest.
No I am not saying this. n will be sum of k positive integers.
Say n = 6 then 6 = 1 + 2 + 3 then remainders are (0, 1, 2) hence three distinct remainders are 0, 1, 2...so sum of remainders is (0 + 1 + k — 1), in this case k = 3.
To eliminate the condition for positive integers, instead of 0 they are assuming the remainder is k. In your case instead of 0, put 3 there, as any way you are going to add some positive multiple of 3 to 0.
Can you explain me the rest of the problem D. I am not getting it.
How do you make checker for D?
Unless I’m misunderstanding the question, it seems like writing the checker should be pretty straightforward: the authors specify conditions for n to be k-good on the first few lines of the editorial, so you can just check if these conditions are satisfied (or, if the program prints -1, check if N was a power of two).
Oh, you're right. I misunderstood the editorial. My bad.
I think checking the following conditions would be fine-
$$$ (2 \cdot n) \% k = 0 $$$
$$$ \dfrac{2 \cdot n}{k} + 1 - k $$$ is positive and even.
Just check:
why this failed for B https://mirror.codeforces.com/contest/1656/submission/150765827
for question C, editorial says that if 1 is not present in the array then the answer will always be YES, but what if array contains only two elements 7,8 then what should be the answer??
The author says that if we have elements with difference 1 along with 1 in the array, such as 1,7,8(eg) we have answer no. But in the case said by you, we get answer yes, as we have no 1 in array as well as no 0. This has been said in the first line of editorial, if there is no 1 in array answer is always yes.
yes because: the first operation will be: 8 mod 8 = 0 7 mod 8 = 7 the second operation will be: 0 mod 7 = 0 7 mod 7 = 0
Can someone explain to me in a simpler language the solution to problem D? I was able to figure out, that for odd n, a valid answer is always 2, and if n is a power of 2, answer is -1. But I was not able to figure out for even n which are not powers of 2. Can someone help?
I: It is easy to see that the graph doesn't satisfy the condition if it contains $$$K_4$$$ or $$$K_{2,3}$$$ as a minor, and graphs that don't contain those minor graphs are called outerplanar. It is also clear that outerplanar graphs have correct neighbor ordering, so the whole problem is almost equivalent to checking if the graph is outerplanar.
I used another solution to pass problem F, the idea is as follows:
First, prove (or write brute force to check) that $$$f(t)$$$ is a convex function.
Then we devise the following $$$\mathcal O(n)$$$ algorithm to calculate $$$f(t)$$$ given $$$t$$$:
Using Prim's algorithm, in each step, we want to find the unconnected node that is the cheapest to connect to, which we can see is either the one with the largest or the smallest value of $$$a_i$$$ in the unconnected nodes. We also see that the edge will come out from the connected set from either the one with the largest or the smallest value of $$$a_i$$$ among them. Therefore, we only have 4 possible cases to handle for each step and so we can construct the MST and its weight in $$$\mathcal O(n)$$$.
Now we can directly do a binary search for the optimal value of $$$t$$$, and do some special handling for the
INF
cases.Time complexity: $$$\mathcal O(n \lg n + n \lg a_i)$$$
Problem B "since the previous substractions are cancelled", what does this sentence mean ?
Let's take four numbers a,b,c,d
{a, b, c, d} -> {a-b, c-b, d-b} -> {a-b-c+b, d-b-c+b} = {a-c, d-c}
You can see -b which was there in all the terms gets cancelled.
I have solved a similar observation based problem few days back: https://atcoder.jp/contests/arc135/tasks/arc135_c
Another way to think is that if there exists a pair that has a difference k, then it's always possible to do subtractions in some order because the relative difference between them is not going to change.
Can someone explain D in simple terms?
Was my solution right? It looks not right at all but it has been passed during the system test. link:https://mirror.codeforces.com/contest/1656/submission/150790452
Problem I can be analyzed somewhat simply using SPQR trees. (Some motivation: at a glance, the idea of ordering the edges incident to each vertex and looking at cycles is very reminiscent of picking a planar embedding of the graph. Alternatively, we can notice that 3 parallel nontrivial vertex-disjoint paths can get us into trouble because they form 3 cycles oriented different ways, so we are motivated to look at triconnectivity.)
Note that, similar to planar embeddings, if there exists a good neighbor ordering of the whole graph, there exists a good neighbor ordering of any graph minor (a subgraph formed by deleting vertices, deleting edges, or contracting paths into edges). This is straightforward to construct, we can make path contractions preserve the ordering by inserting the new edge into the orderings by the locations of its starting and ending edge.
Let's consider a single biconnected component at a time. An SPQR tree consists of the triconnected components of a graph, where each SPQR tree edge represents a 2-vertex cut of the graph, and each SPQR node represents a triconnected component with each adjacent triconnected component compressed into a virtual edge.
Consider a particular SPQR node. Note that each virtual edge is "spanned" by at least one real path in the adjacent component, so the triconnected component is a graph minor. Thus, we can restrict any global good neighbor ordering to a good neighbor ordering of this triconnected component.
Additionally, if the adjacent component is nontrivial (more than a single real edge), then there is a spanning path of length at least 2, which induces a "directionality" to the virtual edge: when going from top to bottom, we are forced to have either $$$v_1 <_{v_2} v_3$$$ or vice versa. This suggests that we should actually replace each nontrivial virtual edge with a path of length 2. Indeed, if we find a good neighbor ordering for each triconnected component with virtual edges replaced by paths of length 2, then we can uniquely glue the triconnected components back together into a good neighbor ordering of the whole graph, using the neighbor ordering of each virtual edge's midpoint to decide whether to reverse each component's ordering or not.
Hence, there is a global good neighbor ordering if and only if there is a good neighbor ordering of each triconnected component, with virtual edges replaced by paths of length 2.
Now, all that remains is to analyze each type of triconnected component (S, P, and R).
S nodes (cycles) are straightforward to order: we can either direction of the cycle, and order each node's predecessor before its successor.
P nodes (parallel edges) require a little analysis. We can check that a P node with at least 3 virtual edges is necessarily bad (2 vertices with 3 nontrivial vertex-disjoint paths), since the "middle" path cannot satisfy both the cycle with the earlier one or the later one. In a P node with 2 virtual edges and 1 real edge, the real edge must be the middle one to prevent this issue.
R nodes each contain a $$$K_4$$$ as a graph minor. It is simple to verify that there is no good neighbor ordering of a $$$K_4$$$, so R nodes do not have good neighbor orderings.
To summarize, the SPQR tree of the graph must contain only S nodes and P nodes with 2 virtual edges and 1 real edge. Such graphs look like several cycles glued together on edges, forming a tree of cycles (kind of like a cactus, but glued at edges not vertices). If we walk along the exterior, we can find the Hamiltonian path as described in the editorial.
Here is a solution implementing this idea, using a (work-in-progress) linear time SPQR tree template. 150839781 We could also write a simpler implementation by just using the SPQR template to identify the Hamiltonian cycles, and then sorting the edges like the editorial.
I got TLE in B (fail system test) because I use unordered_set in my program /(ㄒoㄒ)/~~. I couldn't believe it!
And I solve the problem by changing unordered_set into set. This used unordered_set 150731295. This used set 150837224.
ShaBi! TaMenDouShi ShaBi A!
goodjob
Excuse me, what happened? I'm a little curious.
TheoryForces.
WeakPretestForces
Can someone explain this approach 150742723
see acc to question we need to write
n=(a0*k)+(a1*k+1)+....+(a(k-1)+(k-1)),where a0>=1 while ai>=0 for all i>=1.
Now if write a0 as 1+a00 then n=(k+a00k)+(a1*k+1)+....+(a(k-1)+(k-1)),
which reudces to n=k*(1+a00+a1+a2+...a(k-1))+k*(k-1)/2
whch can be rewritten as n=k*C+k*(k+1)/2 where C=a00+a1+..a(k-1)
now again simplifying n=k*(k+2*c+1)/2
i.e. n has to expressed as odd*even/2 or even*odd/2 [if first term is even the second term is odd or vice versa]
k=min(even,odd).
Thats why in the code he first take out all the powers of 2 and then the left out part is odd and then print the min of them if the min is 1 answer will be -1 and he mulitply extra 2 in final answer becuase n=even*odd/2 or 2*n=even*odd.
link to my code (in case u need it):-150843839
Why do we need min(even,odd)
because k=min(k,k+2*C+1) remeber C>=0.
But in Odd part, why are we taking the maximum ODD factor of n, say n=3*5*7*(2^p) , then odd could have been 3 also right, instead of 3*5*7. Can someone clarify.
yes u can but their is no need to do so ,because u r checking odd only if u fail in 2^(x+1) and the reason why u fail in 2^(x+1) is that 2^(x+1)>sqrt(n) which means odd<sqrt(n) and hence this odd will be small enough to be considered.The problem in considering more smaller odds is that u need to do prime factorisation which will give TLE as n<=1e18.
How to proof the E?
If you have read the editorial of E, You would see this :
Consider removing one vertex u. In each subtree, for each edge not incident with u, +1 will be added for one of the endpoints and −1 for the other endpoint, so the total contribution is 0. For the one edge in each subtree incident with u, the contribution will be +1 or −1 depending on the color of u. So the sums of the subtrees will all be equal to +1 or −1.
thx
see this -LINK
thx
In problem D I can't understand this words "Note that, if k is even, the second condition is n≡k2(modk),". Can anybody tell me why is it so, please ?
originally it was n%k = (k*(k+1)/2)%k now we can rewrite this as
n%k= ((k/2)%k*(k+1)%k)%k future simplifying
n%k= ((k/2)%k*1)%k
n%k=(k/2)%k.
Ok got it. Thanks !
It would be great for beginners like me if someone who solved E during contest (or even after it but without the help of ediotrial) can share their thought process.I got the solution after reading the editorial but i m still confused how to think and reach to this solution on your own.If anyone has some other solution please share.
Thanks in advance.
You can think about it like this. Root the tree at $$$1$$$, and then consider any node $$$u$$$. When we take away $$$u$$$, we are left with $$$u$$$'s children's subtrees, and the rest of the tree above $$$u$$$. Let's call the sum of the whole tree $$$T$$$, the sum of any of $$$u$$$'s children's subtrees $$$m$$$ (note that $$$m$$$ is the same for all of $$$u$$$'s subtrees), $$$u$$$'s value $$$L$$$, and the count of $$$u$$$'s children $$$c$$$.
Because the component above $$$u$$$ must have the same sum as $$$m$$$, it must be the case that $$$T - (c \cdot m + L) = m$$$. Solving for $$$L$$$, we find that $$$L = T - (c + 1) \cdot m$$$. Notice that $$$c + 1$$$ is the degree of $$$u$$$. If we try to let $$$T = 0$$$ and set $$$m$$$ = 1 for the bottom-most leaves, we find that the weights of any $$$u$$$ where $$$u \ge 2$$$ alternates between $$$c + 1$$$ and $$$-c - 1$$$. The root is the only exception, since setting it equal to either of those will make the $$$|T| > 0$$$. The root's value $$$R$$$ must satisfy $$$R + \sum_{u \ge 2} u = 0$$$, which is $$$-\sum_{u \ge 2} u$$$.
It is true that none of these nodes' values ever reach $$$0$$$ if we set $$$T = 0$$$ and $$$|m| = 1$$$. For any non-leaf $$$u \ge 2$$$, $$$|\pm (c + 1) \cdot m| > 0$$$ (for any leaf it is $$$\pm 1$$$). Since the root has at least $$$1$$$ subtree, its sum, being the negative of the sum of all of its subtrees, is not $$$0$$$.
thanks a lot!
I dont like that explanation for E because it doesnt show ideas that leads to the solution. For me this idea the best to explain what is happening:
See on graph and assume that V vertex will be removed. Obviously for every edge we would +1 and -1 incident vertex, but there is a problem: we would remove incident to V edge and sum cannot be 0. We would swap +1 and -1 on some edges. That idea leads to applying graph coloring.
That's my issue with most editorials, they show the solution but not the "path" or thought process that can lead to that solution
In editorial of D, What is this architecture-specific function? I could not find it on goolge
In c++ you have __builtin_ctz(x) which counts how many times 2 divides x.
Love problems like C
why this code is failing for problem C 150995742
checked your code:
should be put in
rather than
which is in yours
In editorial of D: "all odd candidates of k must be odd divisors of n ". Explain this please
Can someone explain F a little bit more? I don't get where $$$-t^2$$$ went?
Why does this condition checks if it goes to infinity?
What was the motivation behind the $$$4 \cdot 10^{36}$$$ constraint for H?
I guess people could use some highly-optimized prime factorization to factorize everything in $$$O(nU^3)$$$ or something rather than use the gcd trick to get "prime" factors in $$$O(n^2U)$$$, where by "prime" I mean, for example, 6 is not prime, but if everything in the input which is divisible by either 2 or 3 is also divisible by 6, then 6 can be treated as prime for the purpose of the problem.
But still, surprising to see such a large constraint. I think it is the first time I have seen a problem where int128 is required just to read the input.
If constraints were only up to $$$10^{18}$$$, all numbers could be factorized using the following strategy: first, take out all small ($$$\leq 10^6$$$) prime factors, and then each number can have at most $$$2$$$ big prime factors, which if they appear separate then they can be found by calculating gcd with al other numbers, and if they appear together then product of the two factors can be considered as a prime as you said.
Wait, so using the gcd method to split apart the two big prime factors was not intended? In my solution (151180177), I used it to factor all of the numbers in $$$O(n^2U\log n)$$$. Maybe the constraints should have been $$$8 \cdot 10^{72}$$$ :)
Why would doubling $$$U$$$ prevent this solution?
It wouldn't, I'm just joking that they made unconventional constraints requiring int128 to read the input, just to prevent a specific kind of unintended solution, but actually they didn't prevent the solution.
Also on looking at my solution a second time, I think it's also $$$O(n^2U)$$$, I don't remember where the $$$\log n$$$ came from.
:O i see
although yeah 1e18 can be factored quickly enough to pass, so 1e36 was necessary
why problom I has rating only 1800?
How does one develop intuition for a problem like D ? I was able to solve the problem till the part where we had to check for $$$k_1$$$, but I got stuck after that. How did you guys figure out that we're supposed to consider a number $$$k_2 = \frac{2n}{k_1}$$$ and that it would give the correct answer? Some insight as to how to get these ideas naturally would be really helpful.
We did not magically construct that number, we were, in fact, looking for it.
We know that $$$n$$$ is $$$k$$$-good iff
(The editorial skips this segment). Suppose $$$k$$$ is odd. We have
Hence, any odd divisor of n satisfies the second condition. What is the best candidate to satisfy the first condition? Answer: The smallest odd divisor of $n$.
However, finding the smallest odd divisor of $$$n$$$ is not a trivial task (in fact, prime detection would follow as a consequence of it, since the smallest odd divisor of a prime is the number itself). Of course, for this problem, when we talk about divisors, we mean numbers greater than 1.
So now, we have a correct but inefficient algorithm to find the answer when $$$n$$$ has at least one odd divisor. Since we are unable to progress further, let's move on to the second segment, i.e, $$$k$$$ is even.
For even $$$k$$$, the editorial does a good job of explaining that all valid candidates would be a divisor of $$$2n$$$, but not a divisor of $$$n$$$.
What does it look like visually? Write $$$n = 2^x * pod$$$ where $$$pod$$$ is the product of odd divisors of $$$n$$$.
The smallest candidate is naturally $$$k = 2*2^x$$$. (Note that the first 2 came from the left half of $$$(2)n$$$, and $$$2^x$$$ came from $$$n$$$. If this candidate satisfies condition 1, we are done. Otherwise, no other candidate can satisfy that condition for an even $$$k$$$. At this point, the analysis for even $$$k$$$ ends, but since the editorial mentions $$$k_2 = \frac{2n}{k_1}$$$, it looks like this analysis is still ongoing (which is not true).
Now, we have a correct but slow algorithm for the original problem. The answer is an odd divisor of $$$n$$$ if it exists. If it does not, try $$$k = 2*2^x$$$ where $$$x$$$ is the exponent of 2 in the prime factorization of $$$n$$$. If this $$$k$$$ is not valid, the answer is $$$-1$$$.
To optimize the time complexity, notice that the first condition states that $$$n \geq k*(k + 1)/2$$$. Making some slight approximations, we can say that $$$n \geq k*k$$$ or $$$k \leq \sqrt{n}$$$. Hence, if we can somehow find ANY odd divisor of $$$n$$$ which is $$$\leq \sqrt{n}$$$, we are done.
Recall an obvious fact, for every divisor $$$div > \sqrt{n}$$$, the complement divisor $$$div ^\complement = \frac{n}{div}$$$ is $$$\leq \sqrt{n}$$$.
Let's circle back to the $$$k$$$ even case, recall that $$$k_1 = 2*2^x$$$ was a possible candidate. If this candidate did not satisfy condition 1, it means that $$$k_1 > sqrt(n)$$$, or roughly $$$2^x > sqrt(n)$$$. Hence, the complement divisor of $$$2^x$$$ should be $$$\leq \sqrt{n}$$$. And voila, what's the complement divisor of $$$2^x$$$? It's $$$pod$$$ (product of odd divisor, that we had defined earlier). Now we have an odd divisor that satisfies condition 1, hence we have also solved for odd $$$k$$$ efficiently. Also notice that this is the largest odd divisor, because the largest even divisor was $$$> \sqrt{n}$$$.
And by the way, did you notice that the fancy $$$k_2 = \frac{2n}{k_1}$$$ is nothing but $$$pod$$$ in our definition, i.e, $$$k_2$$$ is the number remaining when you remove all exponents of $$$2$$$ in the prime factorization of $$$n$$$.
In retrospect, it worked because of the property of complement divisor. If we couldn't find an answer for even $$$k$$$ case, we were destined to find the answer for odd $$$k$$$ by taking the complement divisor (or prove that the answer does not exist). Since you asked for intuition, I made some rough approximations here and there, (leaving out a factor of 2 while comparing with $$$\sqrt{n}$$$ but you can refer to the editorial for the actual proof).
Thanks you for such a detailed reply. I also tried the smallest odd factor idea but soon realized that it would be difficult to compute in the appropriate time complexity. I think the main link that I was missing was the complement divisor idea, which you did a great job explaining, thanks a lot.
Can anyone give proper explanation of D
(O(1)with some architecture-specific functions)
it's 2 * lowbit i thonk