Идея: adedalic
Разбор
Tutorial is loading...
Решение (adedalic)
fun main() {
repeat(readLine()!!.toInt()) {
val (l, r) = readLine()!!.split(' ').map { it.toInt() }
println(if (2 * l > r) "YES" else "NO");
}
}
1437B - Развороты бинарных строк
Идея: adedalic
Разбор
Tutorial is loading...
Решение (adedalic)
#include<bits/stdc++.h>
using namespace std;
#define fore(i, l, r) for(int i = int(l); i < int(r); i++)
#define sz(a) int((a).size())
const int INF = int(1e9);
int n;
string s;
inline bool read() {
if(!(cin >> n >> s))
return false;
return true;
}
int cntSame(const string &s) {
int ans = 0;
fore (i, 1, sz(s))
ans += (s[i - 1] == s[i]);
assert(ans % 2 == 0);
return ans / 2;
}
inline void solve() {
int ans = INF;
fore (k, 0, 2) {
ans = min(ans, cntSame(string(1, '0' + k) + s + string(1, '1' - k)));
}
cout << ans << endl;
}
int main() {
ios_base::sync_with_stdio(false);
cin.tie(0), cout.tie(0);
cout << fixed << setprecision(15);
int tc; cin >> tc;
while(tc--) {
assert(read());
solve();
}
return 0;
}
Идея: BledDest
Разбор
Tutorial is loading...
Решение 1 (pikmike)
#include <bits/stdc++.h>
#define forn(i, n) for (int i = 0; i < int(n); i++)
using namespace std;
const int INF = 1e9;
void solve(){
int n;
scanf("%d", &n);
vector<int> t(n);
forn(i, n){
scanf("%d", &t[i]);
--t[i];
}
sort(t.begin(), t.end());
vector<vector<int>> dp(n + 1, vector<int>(2 * n, INF));
dp[0][0] = 0;
forn(i, n + 1) forn(j, 2 * n - 1) if (dp[i][j] < INF){
if (i < n) dp[i + 1][j + 1] = min(dp[i + 1][j + 1], dp[i][j] + abs(t[i] - j));
dp[i][j + 1] = min(dp[i][j + 1], dp[i][j]);
}
printf("%d\n", dp[n][2 * n - 1]);
}
int main() {
int q;
scanf("%d", &q);
forn(_, q) solve();
}
Решение 2 (pikmike)
#include <bits/stdc++.h>
#define forn(i, n) for (int i = 0; i < int(n); i++)
using namespace std;
template<typename T>
T hungarian(const vector<vector<T>>& cost) {
const T INF = numeric_limits<T>::max();
int n = cost.size(), m = cost[0].size();
vector<T> u(n + 1), v(m + 1), dist(m + 1);
vector<int> p(m + 1), way(m + 1), used(m + 1);
for (int i = 1; i <= n; ++i) {
p[0] = i;
int j0 = 0;
fill(dist.begin(), dist.end(), INF);
do {
used[j0] = i;
int i0 = p[j0], j1 = -1;
T delta = INF;
for (int j = 1; j <= m; ++j) if (used[j] != i) {
T cur = cost[i0 - 1][j - 1] - u[i0] - v[j];
if (cur < dist[j]) dist[j] = cur, way[j] = j0;
if (dist[j] < delta) delta = dist[j], j1 = j;
}
forn(j, m + 1) {
if (used[j] == i) u[p[j]] += delta, v[j] -= delta;
else dist[j] -= delta;
}
j0 = j1;
} while (p[j0] != 0);
for (int j1; j0; j0 = j1)
p[j0] = p[j1 = way[j0]];
}
return -v[0];
}
void solve(){
int n;
scanf("%d", &n);
vector<int> t(n);
forn(i, n){
scanf("%d", &t[i]);
--t[i];
}
vector<vector<int>> cost(n, vector<int>(2 * n));
forn(i, n) forn(j, 2 * n) cost[i][j] = abs(t[i] - j);
printf("%d\n", hungarian(cost));
}
int main() {
int q;
scanf("%d", &q);
forn(_, q) solve();
}
1437D - Дерево минимальной высоты
Идея: adedalic
Разбор
Tutorial is loading...
Решение (adedalic)
#include<bits/stdc++.h>
using namespace std;
#define fore(i, l, r) for(int i = int(l); i < int(r); i++)
#define sz(a) int((a).size())
#define x first
#define y second
typedef long long li;
typedef pair<int, int> pt;
const int INF = int(1e9);
const li INF64 = li(1e18);
int n;
vector<int> a;
inline bool read() {
if(!(cin >> n))
return false;
a.resize(n);
fore (i, 0, n)
cin >> a[i];
return true;
}
inline void solve() {
vector<int> h(n, INF);
h[0] = 0;
int lst = 0;
fore (i, 1, n) {
if (i - 1 > 0 && a[i - 1] > a[i])
lst++;
h[i] = h[lst] + 1;
}
cout << h[n - 1] << endl;
}
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
int tt = clock();
#endif
ios_base::sync_with_stdio(false);
cin.tie(0), cout.tie(0);
cout << fixed << setprecision(15);
int tc; cin >> tc;
while(tc--) {
read();
solve();
#ifdef _DEBUG
cerr << "TIME = " << clock() - tt << endl;
tt = clock();
#endif
}
return 0;
}
Идея: Neon
Разбор
Tutorial is loading...
Решение (Ne0n25)
#include <bits/stdc++.h>
using namespace std;
#define forn(i, n) for (int i = 0; i < int(n); ++i)
const int N = 500 * 1000 + 13;
int n, k;
int a[N], b[N];
int main() {
scanf("%d%d", &n, &k);
forn(i, n) scanf("%d", &a[i + 1]);
a[0] = -1e9;
a[n + 1] = 2e9;
forn(i, n + 2) a[i] -= i;
forn(i, k) scanf("%d", &b[i + 1]);
b[k + 1] = n + 1;
int ans = 0;
forn(i, k + 1) {
int l = b[i], r = b[i + 1];
if (a[l] > a[r]) {
puts("-1");
return 0;
}
vector<int> lis;
for (int j = l + 1; j < r; ++j) if (a[l] <= a[j] && a[j] <= a[r]) {
auto pos = upper_bound(lis.begin(), lis.end(), a[j]);
if (pos == lis.end()) lis.push_back(a[j]);
else *pos = a[j];
}
ans += (r - l - 1) - int(lis.size());
}
printf("%d\n", ans);
}
Идея: BledDest
Разбор
Tutorial is loading...
Решение (BledDest)
#include<bits/stdc++.h>
using namespace std;
const int N = 5043;
const int MOD = 998244353;
int dp[N][N];
int pdp[N][N];
int cntLess[N];
int lastLess[N];
int a[N];
int n;
int add(int x, int y)
{
x += y;
while(x >= MOD) x -= MOD;
while(x < 0) x += MOD;
return x;
}
int sub(int x, int y)
{
return add(x, MOD - y);
}
int mul(int x, int y)
{
return (x * 1ll * y) % MOD;
}
int main()
{
cin >> n;
for(int i = 0; i < n; i++)
cin >> a[i];
sort(a, a + n);
for(int i = 0; i < n; i++)
{
cntLess[i] = 0;
lastLess[i] = -1;
for(int j = 0; j < n; j++)
if(a[j] * 2 <= a[i])
{
lastLess[i] = j;
cntLess[i]++;
}
}
for(int i = 0; i < n; i++)
{
dp[i][1] = 1;
pdp[i + 1][1] = add(pdp[i][1], dp[i][1]);
}
for(int k = 2; k <= n; k++)
for(int i = 0; i < n; i++)
{
if(cntLess[i] + 1 >= k)
dp[i][k] = add(mul(dp[i][k - 1], add(cntLess[i], sub(2, k))), pdp[lastLess[i] + 1][k - 1]);
else
dp[i][k] = 0;
//cerr << i << " " << k << " " << dp[i][k] << endl;
pdp[i + 1][k] = add(pdp[i][k], dp[i][k]);
}
cout << dp[n - 1][n] << endl;
}
Идея: BledDest
Разбор
Tutorial is loading...
Решение 1 (pikmike)
#include <bits/stdc++.h>
#define forn(i, n) for (int i = 0; i < int(n); i++)
using namespace std;
const int AL = 26;
vector<int> val;
vector<int> pos;
struct aho_corasick {
struct node {
map<int, int> nxt, go;
int p, pch;
int suf, ssuf;
multiset<int> vals;
bool term;
node() {
nxt.clear();
go.clear();
suf = ssuf = -1;
term = false;
vals.clear();
p = -1, pch = -1;
}
};
vector<node> nodes;
aho_corasick() {
nodes = vector<node>(1, node());
}
int add(const string& s) {
int v = 0;
forn(i, s.size()) {
int c = s[i] - 'a';
if (!nodes[v].nxt.count(c)) {
nodes.push_back(node());
nodes[v].nxt[c] = int(nodes.size()) - 1;
nodes.back().p = v;
nodes.back().pch = c;
}
v = nodes[v].nxt[c];
}
nodes[v].term = true;
nodes[v].vals.insert(0);
return v;
}
int feed(const string &s){
int v = 0;
int ans = -1;
forn(i, s.size()){
int c = s[i] - 'a';
v = go(v, c);
int u = v;
while (u != 0){
if (!nodes[u].vals.empty())
ans = max(ans, *nodes[u].vals.rbegin());
u = ssuf(u);
}
}
return ans;
}
int go(int v, int c) {
if (nodes[v].go.count(c))
return nodes[v].go[c];
if (nodes[v].nxt.count(c))
return nodes[v].go[c] = nodes[v].nxt[c];
if (v == 0)
return nodes[v].go[c] = 0;
return nodes[v].go[c] = go(suf(v), c);
}
int suf(int v) {
if (nodes[v].suf != -1)
return nodes[v].suf;
if (v == 0 || nodes[v].p == 0)
return nodes[v].suf = 0;
return nodes[v].suf = go(suf(nodes[v].p), nodes[v].pch);
}
int ssuf(int v) {
if (nodes[v].ssuf != -1)
return nodes[v].ssuf;
if (v == 0 || nodes[v].p == 0)
return nodes[v].ssuf = 0;
int s = suf(v);
if (nodes[s].term)
return nodes[v].ssuf = s;
return nodes[v].ssuf = ssuf(s);
}
};
aho_corasick ac;
int main() {
int n, m;
ios::sync_with_stdio(!cin.tie(0));
cin >> n >> m;
pos.resize(n);
val.resize(n, 0);
vector<int> tp2;
ac = aho_corasick();
forn(i, n){
string s;
cin >> s;
pos[i] = ac.add(s);
}
forn(i, m){
int t;
cin >> t;
if (t == 1){
int j, x;
cin >> j >> x;
--j;
ac.nodes[pos[j]].vals.erase(ac.nodes[pos[j]].vals.find(val[j]));
val[j] = x;
ac.nodes[pos[j]].vals.insert(val[j]);
}
else{
string q;
cin >> q;
printf("%d\n", ac.feed(q));
}
}
return 0;
}
Решение 2 (pikmike)
#include <bits/stdc++.h>
#define forn(i, n) for (int i = 0; i < int(n); i++)
using namespace std;
const int AL = 26;
vector<vector<pair<int, int>>> upd;
vector<int> ans;
int n, m;
struct segtree{
vector<int*> where;
vector<int> vals;
vector<int> t;
int n;
segtree(){}
segtree(int n) : n(n){
t.assign(4 * n, -1);
where.clear();
vals.clear();
}
void updh(int v, int l, int r, int L, int R, int val){
if (L >= R)
return;
if (l == L && r == R){
where.push_back(&t[v]);
vals.push_back(t[v]);
t[v] = max(t[v], val);
return;
}
int m = (l + r) / 2;
updh(v * 2, l, m, L, min(m, R), val);
updh(v * 2 + 1, m, r, max(m, L), R, val);
}
void upd(int l, int r, int val){
updh(1, 0, n, l, r, val);
}
int geth(int v, int l, int r, int pos){
if (l == r - 1)
return t[v];
int m = (l + r) / 2;
if (pos < m)
return max(t[v], geth(v * 2, l, m, pos));
return max(t[v], geth(v * 2 + 1, m, r, pos));
}
int get(int pos){
return geth(1, 0, n, pos);
}
void rollback(){
*where.back() = vals.back();
where.pop_back();
vals.pop_back();
}
};
segtree st;
struct aho_corasick {
struct node {
map<int, int> nxt, go;
int p, pch;
int suf, ssuf;
vector<int> term, qs;
node() {
nxt.clear();
go.clear();
suf = ssuf = -1;
term.clear();
p = -1, pch = -1;
qs.clear();
}
};
vector<node> nodes;
vector<vector<int>> g;
aho_corasick() {
nodes = vector<node>(1, node());
}
void add(const string& s, int id) {
int v = 0;
forn(i, s.size()) {
int c = s[i] - 'a';
if (!nodes[v].nxt.count(c)) {
nodes.push_back(node());
nodes[v].nxt[c] = int(nodes.size()) - 1;
nodes.back().p = v;
nodes.back().pch = c;
}
v = nodes[v].nxt[c];
}
nodes[v].term.push_back(id);
}
void feed(const string &s, int id){
int v = 0;
forn(i, s.size()){
int c = s[i] - 'a';
v = go(v, c);
nodes[v].qs.push_back(id);
}
}
int go(int v, int c) {
if (nodes[v].go.count(c))
return nodes[v].go[c];
if (nodes[v].nxt.count(c))
return nodes[v].go[c] = nodes[v].nxt[c];
if (v == 0)
return nodes[v].go[c] = 0;
return nodes[v].go[c] = go(suf(v), c);
}
int suf(int v) {
if (nodes[v].suf != -1)
return nodes[v].suf;
if (v == 0 || nodes[v].p == 0)
return nodes[v].suf = 0;
return nodes[v].suf = go(suf(nodes[v].p), nodes[v].pch);
}
void build_tree() {
g.resize(nodes.size());
forn(v, nodes.size()) {
int u = suf(v);
if (v != u)
g[u].push_back(v);
}
}
void dfs(int v){
int cur = st.where.size();
for (auto i : nodes[v].term){
int lst = m;
for (auto it : upd[i]){
st.upd(it.first, lst, it.second);
lst = it.first;
}
st.upd(0, lst, 0);
}
for (auto j : nodes[v].qs){
ans[j] = max(ans[j], st.get(j));
}
for (int u : g[v]){
dfs(u);
}
int nw = st.where.size();
forn(_, nw - cur){
st.rollback();
}
}
};
aho_corasick ac;
int main() {
ios::sync_with_stdio(!cin.tie(0));
cin >> n >> m;
upd.resize(n);
ans.resize(m, -1);
vector<int> tp2;
ac = aho_corasick();
st = segtree(m);
forn(i, n){
string s;
cin >> s;
ac.add(s, i);
}
forn(i, m){
int t;
cin >> t;
if (t == 1){
int j, x;
cin >> j >> x;
--j;
upd[j].push_back(make_pair(i, x));
}
else{
string q;
cin >> q;
ac.feed(q, i);
tp2.push_back(i);
}
}
forn(i, n){
reverse(upd[i].begin(), upd[i].end());
}
ac.build_tree();
ac.dfs(0);
for (auto it : tp2)
cout << ans[it] << "\n";
return 0;
}
Well i'm not so sure about this but i think another more straight forward answer for b is that we use the fact below
In the The optimal answer no indice is switched more than once and the fact that we need to make the string into a binary string and that the number of ones and zeros are equal
we just calculate the number of indices that need to be reversed and if any adjacent indices need to be reversed we count them as one(as we can switch them with one move)
Once again I say as i'm still a beginner i'm not fully confident of this approach tell me if it has a problem
I did this, but I don't have proof why this works.
I saw that when there is a even length contiguous block of bad indices then one reversal fixes that block. But for odd length blocks it still works, I don't why, I was seeing that probably the odd length blocks cam be paired up with other odd blocks but couldn't conclude much.
Anybody can prove this?
idc123 You are almost correct. The reason this works is that since the number of indices that have to be reversed is always even (pretty straightforward), the number of odd blocks will be even in number. Two consecutive odd blocks can be made correct in 2 moves, the first move- reverse the whole substring between the first odd block tot he next odd block(both inclusive), then in the second move reverse only the middle part which was initially correct before the first move.
So overall, 2 odd blocks require 2 moves, and they are even in number, so for the sake of counting we can say that we need 1 move per odd block, and 1 move per even block is obvious. So the answer is the total number of blocks.
Ah, I see, I was unable to see why number of odd blocks is even, turned out it's because total bad indices are even. I think I had hard time realising this. Thank you for your comment!
I don't think it is always true that two consecutive odd-blocks can be made correct in 2 moves.
Consider s = 11011011100000. Let the bad index tracking string be p. Then, p = 01110001001010
Now if you do reverse(s[2..8]) (1-based), it leaves you with s = 11101101100000 with corresponding p = 01000111001010. Then doing reverse(s[3..5]), it leaves you with s = 11101101100000 and p = 01000111001010. But at this step we should have corrected the adjacent bad odd segments and have had s = 10101010100000.
Not only we don't have this but in this s, if it were correct, we would have basically generated new 0's which should not be possible.
I think the issue is since the 4 zeroes are at the end in s, it is not possible to correct the first two odd segments without including the 0's. Maybe instead it is correct to say that for every odd length segment, we can find another odd length segment such that the they can be fixed in the way you described leaving everything else unchanged.
Please correct if I made a mistake.
Edit: In this case if we swap segments s[2...13], s[3...10], then it reduces the 4-odd segments to 2. We can then fix the whole string by swapping s[6...9] and s[7...8]
Yup, you are right. I used this approach only. :-)
good contest
For G, can someone please explain why iterating over all the terminals in each query is $$$O(Q*sqrtN)$$$?
Not exactly $$$q \sqrt n$$$ but $$$(sum~of~lengths~of~q) \cdot \sqrt{sum~of~lengths~of~s_i}$$$. No more than $$$\sqrt{sum~of~lengths~of~s_i}$$$ words can end in each position and there are $$$(sum~of~lengths~of~q)$$$ positions in total.
why n^(1/2)? I think it should be n^(2/3).
Why? If we want to find out the maximum number of unique lengths then let's take the smallest possible lengths $$$1, 2, \dots, k$$$, one string per each length. The sum of them is $$$\frac{k(k+1)}{2}$$$, thus $$$k$$$ is $$$\sqrt{2 \cdot total\_length}$$$.
Let's conside the worst case, when you query for a string T, let's says T = abcd...z, and insert every substrings into the Automaton, There are O(|T|^2) substrings, and the total length of them is n = 1*|T| + 2*(|T|-1) + .... + |T|*1 = O(|T|^3).
By subsititution, we have O(n^(2/3)) in the worst case. Did I make some mistake?
I think you are mistaking what are we summing up. What we are doing with the query is the following: add the first letter, save the current vertex and look at the terminals above the vertex (their count is $$$O(\sqrt{dictionary\_length})$$$). Then go over the next letter from the saved vertex, once again jump no more than sqrt times. I don't really see how the substring count of the query is relevant here.
for each search you can also just keep a set of which words you have already matched in the current search (actually a set with each terminal of the trie that has already matched). thus, if you are visiting it again during the same search, you just ignore it. then the first solution for G becomes O((q+T) log n), T being the sum of lengths of each query of type 2.
97259249
Why do you think the complexity is like that? I don't quite see how to construct a test such that there are a lot of distinct substrings of fixed total length but I don't see $$$O((q + T) \log n)$$$ either.
In E, I am not able to get why are we subtracting i from each a[i] int the following step
forn(i, n + 2) a[i] -= i;
in editorial solution.Can somebody help me? Thanks in advance!
Same Here. I didn't get that part For eg. if the seq is [2,5,3,1] then we can't choose LIS as [2,3] but by taking distance consideration and by making a[i]-=i how we are able to do that.
This explanation was good for problem E.
If you subtract i, it leaves room for you to make the other elements in strictly increasing order.
For example, if we have the array
1, 1, 2, 3
And we subtract i from each,
1, 0, 0, 0
You can see if we make these elements all equal, then the original array will be increasing. If we change everything to one and then add i again, we have
1, 1, 1, 1 -> 1, 2, 3, 4
Okay, I got it. Thanks for explaining kevin!
Hi which programming language is this: (answer to A) ? (I'm asking cuz it looks sexy)
I guess it's kotlin.
adedalic "The longer the segment [a2,a) is the better and the maximum we can take is a=2l."
Can you please explain this statement a bit more? maximum
a
possible is2*l
but there can bea
such thata<l
and satisfies the constraints!! and we have to analyze that also.. Am I missing something?It has been shown that $$$\left\lfloor\frac{l}{a}\right\rfloor = \left\lfloor\frac{r}{a}\right\rfloor$$$. And also that, we must find $$$a$$$ such that $$$\frac{a}{2} \leq l$$$ $$$mod$$$ $$$a \leq r$$$ $$$mod$$$ $$$a < a$$$. So, it must be obvious that $$$r - l < \frac{a}{2}$$$, if such an $$$a$$$ exists
Case I: $$$r \geq 2l$$$
Here we must have $$$l \leq r - l < \frac{a}{2}$$$. That is $$$a > 2l$$$. But this wont work as $$$l$$$ $$$mod$$$ $$$a$$$ would be less than $$$\frac{a}{2}$$$
Case II: $$$r < 2l$$$
We can always choose $$$a = 2l$$$
I got what you said, it becomes easy as soon as you have the two cases(
r>=2*l
and other one) to analyze.but what if we want to analyze cases as below
0) obviously
a
should not belong to[l,r]
1) if it is possible to have required
a
such thata>r
, this gave me2*l>r
2) then
a<l
, this I couldn't get to any conclusionAccording to 0), either
a > r
ora < l
. ifa > r
, obviously thata == r+1
is the best. Now we want to prove ifa == r+1
is not satisfied,a < l
will be not satisfied too.If
a == r+1
is not satisfied, it meansl*2 < r+1
, right? Because Only in the situation thatl mod (r+1) = l < (r+1)/2
, the restriction is not satisafied.Next,
l*2 < r+1
meansl*2 <= r
. Ifa < l
, you will easily become aware of that for everya
, exist ak
that satisfyl <= k*a <= r
. Becausel*2 <= r
.If
l <= k*a <= r
, the restriction is not satisfied. So we finally find ifa == r+1
is not satisfied,a < l
will be not satisfied too.For Problem F, I've got a $$$O(n log n log max a_i)$$$ solution. Code
Basically $$$dp[i][j]$$$ means the number of ways to make fisherman $$$[1,i]$$$ emotional while $$$j$$$ of them are happy, considering the sequence in descending order.
Then use a segment tree with range multiply update and range sum query to speed up the solution.
G can also be solved with suffix tree. I dont know about Aho Corasick , but i heard that algorithm use in Aho-Corasick is similar .
It may be completely irrelevant but I just want to ask one doubt. Should I upsolve the problem which are rated at something greater than 2600 (For example F,G in this contest) or should I upsolve only problem which I currently feel comfortable with? (From experience, I can say I am able to understand 2400 rated problem if given some hours)
If you just want to be high expert or low cm, you should probably just do easier tasks(<=1900). Though in theory, doing hard problems is a lot better but doing too hard tasks can make you demotivated, make you look at editorial too quick etc.
I would suggest upsolve upto 200-400 rating points above you, depending on how hard they feel to you.
Contests are about struggling with problems so also make sure you struggle before reading editorial.
why would greedy fail for C?
what greedy?
Like we use T for every distinct ti equal to it so answer will be 0 till then and for every duplicate ti we will use the smallest absolute difference to it's left(greater than 0) or right which has not been used and add it to the answer
Any reason why this should work?
we are trying to minimize the answer but now that it's known that greedy doesn't work, I just want to know why
How will you choose which side you should take
I will sort the initial array first and then if both sides have equal absolute difference then I will choose the left side as the right side may come in handy for further values of the array
Just take this example: 1 3 4 4 6 7 7 8 9 10 11 12
According to your logic, you'd take 5 for the repeated 4 because 2 is farther, however, taking 2 would be more beneficial because then you'd have 5 available for the repeated 7 later. I hope it's clear.
Thank you. This is the test case I was missing.
And even your algorithm will not follow this: " I claim that there is an optimal answer such that the times T for each dish go in the increasing order. "
Even I was doing same during contest but cant find why it is wrong https://mirror.codeforces.com/contest/1437/submission/96912740 .Help if anyone got it?
look at the replies in my comment, it is clear now
yes!!
Can anyone please explain the implementation of problem C first approach. I understood the tutorial, but didn't understand implementation.
why do we use dp[i][j + 1] = min(dp[i][j + 1], dp[i][j]); ?
Let's see what we are doing while iterating the for loop in the first approach-
We are updating two values dp[i+1][j+1] and dp[i][j+1].
We are updating dp[i+1][j+1] when it has larger value than dp[i][j] + abs(t[i] — j), which is in fact always (so you can try changing
min(dp[i + 1][j + 1], dp[i][j] + abs(t[i] - j)
todp[i][j] + abs(t[i] - j)
).Now to the real question- We are updating dp[i][j+1] when it has larger value than dp[i][j], as dp[i][j+1] currently stores
dp[i-1][j] + abs(t[i-1] - j)
(accept for i=0), if its more than dp[i][j] we update it.In simpler terms, we are just comparing
dp[i][j] + abs(t[i] - j)
anddp[i+1][j]
and assigning the minimum of the two todp[i+1][j+1]
.Try making dp matrix and dry run the code against some sample test cases, you will surely get it. Correct me if I'm wrong :P
Here is an easy to understand approach :- Let
dp[i][T]
denote the minimum cost of processing the firsti
dishes (Remember we first need to sort them in the increasing order first), such that the $$$i^{th}$$$ dish is processed at timeT
.We can simply calculate answer for each state in
O(n)
hence the total time complexity will beO(n^3)
Note that we can move the
abs(j - arr[i])
part out of themin()
function as it is not dependent onk
, hence our code now becomes :-So finally we can optimise it to
O(n^2)
by observing that the innermost loop only calculates the prefix minimum ofdp[i-1]
(evenO(n^3)
easily passes the time limit though).In the Hungarian implementation of Problem C, can anyone provide a general code that also includes finding which employee was allotted which job ? ( or in this case what was the final T for all input values) Thanks.
for B i used so simple approach and i have no proof of it but somehow it worked . if someone knows please tell me too. just counted max(adjacent count of(0) , adjacent count of (1)) = answer... thats it.. May be we need to do work when something is consecutive , this what i thought while solving
I think in the editorial it is proved. Actually
adjacent count of(0)==adjacent count of (1)
if you link the a[0] with a[n-1]. So the answer issum(s[i] == s[(i+1)%n]) / 2
Can anyone explain the second proposal in B's editorial which says there will be equal pair of 0's and 1's if we add 1 to the left and 0 to the right
Either the no. of consecutive 0 pair and 1 pair are equal already or in the other case if no. of 1 pair is more than it could be only one more than the no. of 0 pairs and both ends will have 0 same for more 0 pairs than 1 pairs (I am getting this intuition) . I think this can be proved by using the fact that no. of 0s and 1s are equal, but didn't tried to prove yet. Sorry for bad english.
Thanks!
welcome
There's a simpler dp for problem F that doesn't require any optimization. The state is just $$$dp[i]$$$, which is just the number of ways in which we can choose fisherman $$$i$$$ to be happy (again, we have the fishermen sorted in non-decreasing order). The overall running time is $$$O(n^2)$$$.
My submission.
If you've got any doubts about what's going on here, feel free to ask :)
I don't understand anything in there, can you explain it?
Yeah, my bad. I should've realized that code is hard to read. I'll try my best to explain it.
Firstly, the fishermen are sorted in non-decreasing order.
Secondly, $$$smaller[i]$$$ is the number of $$$j < i$$$ such that $$$2 * a[j] > a[i]$$$. These are fisherman that we cannot place to the left of $$$i$$$ should we choose $$$i$$$ to be a happy fisherman. When I say some $$$j$$$ belongs to $$$smaller[i]$$$, it just means that $$$j < i$$$ and $$$2 * a[j] > a[i]$$$.
Thirdly, when we have reached any state $$$i$$$, we have already placed all elements $$$j$$$ such that $$$j \le i$$$ excluding the elements which belong to $$$smaller[i]$$$, because as stated before, these are elements that we cannot place in front of $$$i$$$, so we must decide their positions later on after choosing some other happy fisherman (after changing maximums i.e. transitioning to another state).
From here, I will describe the transitions in the dp ($$$dp[i]$$$ is the number of ways in which we can make fisherman $$$i$$$ happy). $$$i$$$ will denote the current state we are in, and $$$j$$$ the state we want to transition to. ($$$i < j$$$).
When we make this transition, we place element $$$j$$$ in the first unoccupied position, and so there is only one way to place element $$$j$$$. The reason the first unoccupied position is always given to $$$j$$$ is because all the other elements that we are yet to be placed can only come after $$$j$$$, which in turn is because we cannot place it directly to the right of $$$i$$$ (either because they belong to $$$smaller[i]$$$ in which case we will get a content fisherman, or because they are just larger than $$$i$$$ and would result in a change in maximum).
The other elements that we can place (and have to place) when making a transition from $$$i$$$ to $$$j$$$ are those belonging to $$$smaller[i]$$$ and those strictly between $$$i$$$ and $$$j$$$ but not belonging to $$$smaller[j]$$$. Let the number of such elements be $$$count$$$. Now we have some number of free positions that hasn't been occupied yet. This number is exactly $$$n - i + smaller[i] - 1$$$ (because we have already placed all elements before $$$i$$$ except for those belonging to $$$smaller[i]$$$, and have also placed $$$j$$$).
We can freely place all $$$count$$$ elements in any order we wish in these positions. The variable $$$factor$$$ basically gives the number of ways to do this. We pick exactly $$$count$$$ of the free positions, and then we can freely permute the order of the fishermen in those positions, thereby giving what you see in the code.
Then we just sum over all valid $$$j$$$ (i.e for all $$$j$$$ such that $$$a[j] \ge 2 * a[i]$$$) for each $$$dp[i]$$$ thereby giving an $$$O(n^2)$$$ solution. The answer is just $$$dp[0]$$$ (according to how I've implemented it), and by default I take $$$a[0] = 0$$$.
There's also an O(sort fishermen) + O(n) solution which relies on mod being prime.
Explanation is commented: https://mirror.codeforces.com/contest/1437/submission/99661882
Problem F is solvable in nlogn. Here is the code link https://mirror.codeforces.com/contest/1437/submission/96998991.
I can not understand the editorial of problem A,someone please explain in more detailed way.
For a continuous interval modulo operation, the result is continuous.
So just let
a = r + 1
and check ifl >= a / 2
Can somebody please explain me why can we reach the lower bound in B?
check out for the simple approach for B submission
Can you please explain the idea behind that code? Thanks in advance.
In D, what we exactly needed to do? My approach:-
int c=1; for(int i=2;i<n;i++) { if(v[i]<v[i-1]) c++; } cout<<c<<endl;
We will not increase depth every time when v[i]<v[i-1]. We have to find the minimum depth.
So if there are more nodes in the above level, we can just switch to another node (the node which is a sibling of the parent node).
We increase the depth only when there is no node left with zero child in the above level.
Problem A was rated 800. Is it a joke? I know it's a one liner, but just look at the number of people solved it during the contest. This problem is at least 1000. Probably 1100. People got angry because of the div. 4 rating inflation and now they increased the difficulty of all div 2 contests leaving the problem rating as before. Amazing job, guys.
Can someone explain the "slope trick" mentioned in the editorial of problem c. I do get the o(n^2) solution. The o(nlogn) solution seems to be interesting
I've implemented it here: https://mirror.codeforces.com/contest/1437/submission/97535399
Problem B was too tricky for div2 B..
For Problem C, i used greedy approach by looking at the tag which is on the problem statement. But greedy actually don't work. In that case, are people above 1900 misleading with tags or is there actually a greedy solution for problem C ? Please let me know. Thanks.
Ig sorting by time taken for each dish is greedy part
The B solution is so precise.I learn a lot from it!
G let me review the AC automaton... but TLE on test 70 :(, maybe cuz the pointers in my ugly code spend too much time...
Passed! I used a heuristic method: maintain a pointer "nex", which is the closest node in the fail tree that is the end of some $$$s_i$$$. When initing the fail pointer of AC, the nex pointer can be inited by the way. Then, we can jump faster. I don't know whether the worst complexity is still $$$O(q\sqrt n)$$$, but it works.
For problem G, why is "Thus, that's bounded by the square root of the total length." true? (Actually, I'm also confused by what's meant by "that").
Problem B. Can someone explain why is the answer number of consecutive pairs divided by 2?