Hello everyone!
Below is the editorial for the informatics division of CAMA along with the solution code to every problem. We have also uploaded PDFs with the editorial both in Spanish and English on our website. The contest can be found on this gym.
The problems were prepared by Esomer, danx, Hectorungo_18, javiergm06 and Rigobertus. We would also like to thank our VIP tester BernatP for his dedication towards the contest.
A. Saving the cinema
You should do what the problem says with an if statement. Be careful to add the final dot.
#include <bits/stdc++.h>
using namespace std;
int main(){
string s; cin >> s;
if(s == "Yoda") cout << "Pertenece a Star Wars." << endl;
else if(s == "Spock") cout << "Pertenece a Star Trek." << endl;
else cout << "No pertenece ni a Star Wars ni a Star Trek." << endl;
}
B. Operation
First we need to realize that the substraction and division of positive integers just make the result lower, whereas the multiplication and addition do not.
So since there are just two possible options to optimize the solution we can just compare both of them and output the bigger one.
#include<iostream>
using namespace std;
int main(){
int a, b;
cin >> a >> b;
int ans = max(a+b, a*b);
cout << ans << "\n";
}
C. Maximum profit
To solve this problem we need to sort the array in non-decreasing order and then calculate the sum of the last k elements.
Complexity of the solution: O(n⋅logn).
#include<bits/stdc++.h>
using namespace std;
int main(){
int n, k; cin >> n >> k;
vector<int> v(n);
for(auto &i : v) cin >> i;
sort(v.begin(), v.end());
reverse(v.begin(), v.end());
int sum = 0;
for(int i = 0; i < k; i++) sum += v[i];
cout << sum << endl;
}
D. jbum
To find m, that is the mininum amount of minutes Javier needs to wait until he can perform with the desired weight, we just need to calculate ⌈log2(n)⌉, because in the i-th minute we can add 2(i−1) discs if we always put in the duplicator all the discs we have.
Once we know m, we need to find the lexicographically smallest sequence of x, where xi is the number of discs that Javier puts into the duplicator in the i-th time. We will call ai the amount of discs we have in the i-th minute. So we have to find an array a that satisfies ai≥2⋅ai+1, but as we need to find the lexicographically smallest between of all possible x, a also needs to satisfie that ai=⌈ai+12⌉. This happens beacuse if ai>⌈ai+12⌉ it means that we could decrease ai, until it becomes equal to ⌈ai+12⌉. And as we know that ar=n, we can calculate how many discs Javier needs to put in the duplicator in each minute, first calculating the (m−1)-th one, and ending knowing how many discs we have to put in the duplicator in the first minute.
Final complexity: O(logn).
#include<bits/stdc++.h>
using namespace std;
int main(){
ios_base::sync_with_stdio(0);
cin.tie(0);
int n; cin >> n;
vector<int> ans;
while(n > 1){
int x = n / 2;
ans.push_back(x);
n -= x;
}
reverse(ans.begin(), ans.end());
cout << (int)ans.size() << endl;
for(int x : ans) cout << x << " "; cout << endl;
}
E. Looking for palindromes
The answer is 10⌈n−22⌉⋅90⋅⌈n−12⌉. This formula is basically the number of palindromes of length n−2, and multiplying by the positions and combinations of the newly added digits.
Final complexity: O(logn) or O(n), depending on how you do the exponentiation.
#include<bits/stdc++.h>
using namespace std;
const int MOD = 1e9 + 7;
long long pw(int a, int b){
long long ans = 1;
for(int i = 0; i < b; i++){
ans *= a; ans %= MOD;
}
return ans;
}
int main(){
ios_base::sync_with_stdio(0);
cin.tie(0);
int tt; cin >> tt;
while(tt--){
int n; cin >> n;
if(n == 1) {cout << "0\n"; continue;}
long long palindromes = pw(10, (n-1)/2);
palindromes *= 90;
palindromes %= MOD;
cout << (palindromes * (n / 2)) % MOD << "\n";
}
}
F. Harry Potter in CMS
This problem is solved using the set data structure, or another Balanced Binary Search Tree.
If we let s be the set which stores the first query that gets each subtask correct, the answer the size of s. The idea was to store, for each subtask, the queries in which Harry got that subtask as correct, and add the first of those queries to s. Then, for the removing query, we update the set of the subtasks that were correct in that query and update set s accordingly. This part was mostly implementation, we recommend looking at the code if you still have doubts.
Final complexity: O(qlogq+klogk).
#include<bits/stdc++.h>
using namespace std;
const int maxInd = 2e5;
int main(){
ios_base::sync_with_stdio(0);
cin.tie(0);
int tt; cin >> tt;
while(tt--){
int q; cin >> q;
vector<set<int>> occ(maxInd + 1);
vector<vector<int>> subtasks(q+1);
set<int> mn;
for(int ind = 1; ind <= q; ind++){
int type; cin >> type;
if(type == 1){
int k; cin >> k;
while(k--){
int x; cin >> x;
subtasks[ind].push_back(x);
if((int)occ[x].size() > 0) mn.erase(*occ[x].begin());
occ[x].insert(ind);
mn.insert(*occ[x].begin());
}
}else if(type == 2){
int i; cin >> i;
for(int x : subtasks[i]){
mn.erase(*occ[x].begin());
occ[x].erase(i);
if((int)occ[x].size() > 0) mn.insert(*occ[x].begin());
}
}else cout << (int)mn.size() << "\n";
}
}
}
G. XOR + Constructive = Love
First of all, we would like to apologize for misjudging the difficulty of this problem. It turned out to be much harder than what we expected.
Firstly, instead of using the array b mentioned in the statament, we will use another array cnt which will count the amount of elements that have each bit, similar to the described array a in the statement, which is easier to work with. Because of the first condition, for all i, cnti should be at least ai, so we initially set cnti=ai for i<30, and cnti=0 for i≥30. Then, it is also easy to check the XOR condition for each bit. If bit i isn't satisfying the XOR condition, we add 1 to cnti.
Once we have done this, we should only worry about meeting the sum condition, knowing that we have to add bits in pairs, i.e. the parity of each cnti should remain constant.
Now, one of the main observations is that, if there exists an answer, the answer will be at most max. This makes it easier because you can iterate over the possible sizes and check if there exists a solution of that size.
To do this, let the current size be m and let left be s - \sum\limits_{0 \le i < 60} cnt_i \cdot 2^i. If left is negative, it is impossible to fulfil the condition. Otherwise, we just iterate over the bits from most significant to less significant, and add as many as we can. Because we don't want to exceed left and we can only add at most m - cnt_i to each bit i, we could add \min(\lfloor \frac{left}{2^i} \rfloor, m - cnt_i) to bit i. Additionally, we should check once again if the XOR condition is met (it would stop being fulfilled if we added an odd number of ocurrences for a bit), and if it isn't, substract 1 to the amount we are adding. We update left and repeat the process for the next bit. If at the end of the process left = 0, then it is possible to build an array b of size m.
If we didn't find any possible size, we should print -1.
Final complexity: O(\log s).
#include<bits/stdc++.h>
using namespace std;
typedef long long ll;
int main(){
ios_base::sync_with_stdio(0);
cin.tie(0);
int tt; cin >> tt;
while(tt--){
ll s, x; cin >> s >> x;
vector<ll> a(60, 0);
ll sum = 0;
ll mx = 0;
for(int i = 0; i < 30; i++){
cin >> a[i];
if(a[i]%2){
if(!((1ll << i)&x)) a[i]++;
}else{
if((1ll << i)&x) a[i]++;
}
sum += (1ll << i) * a[i];
mx = max(mx, a[i]);
}
if(sum > s){
cout << -1 << "\n";
continue;
}
bool found = false;
for(int m = mx; m <= mx + 2; m++){
ll left = s - sum;
for(int i = 59; i >= 0; i--){
ll mxUse = left / (1ll << i);
mxUse = min(mxUse, m - a[i]);
if((mxUse + a[i])%2){
if(!((1ll << i) & x)) mxUse--;
}else{
if((1ll << i) & x) mxUse--;
}
left -= mxUse * (1ll << i);
}
if(!left){
cout << m << "\n";
found = true;
break;
}
}
if(!found) cout << -1 << "\n";
}
}
H. Menorca's ants
Let's imagine that we have a subset of x ants where the maximum number of ants of the same type is y and we want to know the minimum number of throws needed to throw all the ants.
The m condition depends on x and the k condition depends on y. It can be proven that the answer will be max((x+ m - 1) / m, (y + k - 1) / k).
Proof:
(x+ m - 1) / m = minimum number of throws needed without the k condition, and it doesn't depend of the order of the throwed ants.
(y + k - 1) / k = minimum number of throws needed without the m condition, and it depends of the order of the throwed ants.
We can always use the optimal order to minimize the maximum number of ants of the same type used and we will have enough moves.
The last thing we need is to find a subset of p ants whith the maximum number of ants of the same type minimized. To do this we can just binary search on the answer (z) and checking if the sum of min(z, a_{i}) for every i is \geq p.
There are other ways to do this, for example with sorting.
Complexity of the solution: \mathcal{O}(n \cdot log_{A}) where A is the maximum a_{i} among every i.
#include<bits/stdc++.h>
using namespace std;
#define endl "\n"
typedef long long ll;
const int MOD = 998244353;
void solve(){
int n; cin >> n;
ll m, k; cin >> m >> k;
vector<ll> a(n);
for(auto &i : a) cin >> i;
ll p; cin >> p;
sort(a.begin(), a.end());
reverse(a.begin(), a.end());
a.push_back(0);
ll sum = 0;
for(ll x : a) sum += x;
ll discard = sum - p;
ll mx = a[0];
for(int i = 1; i <= n; i++){
if(mx - a[i] >= discard / i) {mx = mx - discard / i; break;}
else{
discard -= i * (mx - a[i]);
mx = a[i];
}
}
cout << max((mx + k - 1) / k, (p + m - 1) / m) << endl;
}
signed main(){
ios_base::sync_with_stdio(0);
cin.tie(0);
int tt; cin >> tt;
// int tt = 1;
while(tt--) solve();
}
I. Fake bills
The main observation for this problem is that the state of the room is cyclic every 4 turns, because the cameras return to their initial position.
With that observation, you can store if each cell will be covered by a camera or not for each of the 4 states, so you can keep states of the form (t, r, c), where t is the time that has passed (denoting the state of the cell), r is the row and c is the column. To store this information, it is important to note that you can't just iterate over the cameras and then over the cells that each camera covers, because this has a complexity of O(m \cdot n), which is too slow. Instead, you should keep the minimum and maximum coordinate of any camera pointing in each direction for each row or column, and then determine if a cell is covered in a state using this information in O(n^2).
To do this, if a camera points up, you update the maximum of that column with the row of the camera. If it points down, you do similarly with the minimum of that column. If it points right, you update the minimum of the row with the column of the camera, similarly with the maximum if it points left. Then, a cell at (r, c) is covered if and only if:
- \max_r \geq c or,
- \min_r \leq c or,
- \max_c \geq r or,
- \min_c \leq r
Then, you do a BFS or DFS of the room, starting at (0, 0, 0) and going to adjacent cells with (t+1) \mod 4 as state which aren't covered by any cameras.
If you reach cell (n, n) at any point, a robber can reach the vault. Otherwise, it will be impossible for him.
Final complexity: O(n^2).
#include <bits/stdc++.h>
using namespace std;
int main(){
ios_base::sync_with_stdio(0);
cin.tie(0);
int tt; cin >> tt;
while(tt--){
int n, m; cin >> n >> m;
vector<vector<int>> cam(n, vector<int>(n, 4));
vector<pair<int, int>> rows(n, {1e9, -1});
vector<pair<int, int>> cols(n, {1e9, -1});
for(int i = 0; i < m; i++){
int r, c; cin >> r >> c; r--; c--;
char d; cin >> d;
if(d == 'U'){
cam[r][c] = 0;
cols[c].second = max(cols[c].second, r);
}else if(d == 'L'){
cam[r][c] = 1;
rows[r].second = max(rows[r].second, c);
}else if(d == 'D'){
cam[r][c] = 2;
cols[c].first = min(cols[c].first, r);
}else{
cam[r][c] = 3;
rows[r].first = min(rows[r].first, c);
}
}
vector<vector<vector<bool>>> vis(4, vector<vector<bool>>(n, vector<bool>(n, false)));
for(int t = 0; t < 4; t++){
vector<pair<int, int>> nwRows(n, {1e9, -1});
vector<pair<int, int>> nwCols(n, {1e9, -1});
for(int r = 0; r < n; r++){
for(int c = 0; c < n; c++){
if(cam[r][c] != 4){
vis[t][r][c] = true;
cam[r][c] = (cam[r][c] + 1) % 4;
if(cam[r][c] == 0){
nwCols[c].second = max(nwCols[c].second, r);
}else if(cam[r][c] == 1){
nwRows[r].second = max(nwRows[r].second, c);
}else if(cam[r][c] == 2){
nwCols[c].first = min(nwCols[c].first, r);
}else{
nwRows[r].first = min(nwRows[r].first, c);
}
}else{
if(rows[r].first < c || rows[r].second > c ||
cols[c].first < r || cols[c].second > r) vis[t][r][c] = true;
}
}
}
rows = nwRows;
cols = nwCols;
}
queue<tuple<int, int, int>> q; q.push({0, 0, 0});
vis[0][0][0] = true;
bool found = false;
while(!q.empty()){
int t = get<0>(q.front()); int r = get<1>(q.front()); int c = get<2>(q.front()); q.pop();
if(r == n - 1 && c == n - 1){
found = true;
break;
}
for(int ai = -1; ai <= 1; ai++){
for(int aj = -1; aj <= 1; aj++){
if(aj * ai != 0) continue;
int i = ai + r; int j = aj + c;
if(i < 0 || j < 0 || i >= n || j >= n) continue;
if(!vis[(t+1)%4][i][j]){
vis[(t+1)%4][i][j] = true;
q.push({(t+1)%4, i, j});
}
}
}
}
cout << (found ? "YES" : "NO") << "\n";
}
}
J. Force Perturbation
This problem is solved using knapsack DP. Calculate, for each number of elements, the nearest sum to x that it is possible to make without using an element twice. Let that sum for a number of elements i be denoted by dp_i.
Then, the answer is \min\limits_{i=1}^n \lceil \frac{x - dp_i}{i} \rceil.
Final complexity: O(n^2 \cdot x).
#include<bits/stdc++.h>
using namespace std;
int main(){
ios_base::sync_with_stdio(0);
cin.tie(0);
int tt; cin >> tt;
while(tt--){
int n, x; cin >> n >> x;
vector<int> a(n);
for(auto &i : a) cin >> i;
vector<vector<bool>> knapsack(n+1, vector<bool>(x+1, false));
knapsack[0][0] = true;
for(int y : a){
for(int sz = n; sz >= 1; sz--){
for(int w = 0; w + y <= x; w++){
if(!knapsack[sz-1][w]) continue;
knapsack[sz][w+y] = true;
}
}
}
int ans = -1;
for(int sz = 1; sz <= n; sz++){
for(int sum = x; sum >= 1; sum--){
if(knapsack[sz][sum]){
if(ans == -1) ans = (x-sum+sz-1) / sz;
else ans = min(ans, (x-sum+sz-1) / sz);
break;
}
}
}
cout << ans << "\n";
}
}
K. Óscar and his battle
Firstly, we sort the characters by their attack and the monsters by their defense. Then, we iterate over the characters. Everytime we get to a new character i, we process all the monsters j such that d_j \le a_i. This is done to two pointers to keep a complexity of O(n \log n + m \log m), due to the initial sorting. When we process a monster, what we do is add e_j to the index c_j of a Segment Tree or Fenwick Tree.
Once we have processed all the monsters, to know how many coins we could win with a character i, we just have to query the sum of elements less or equal to b_i in our data structure. This is because, due to the initial sorting, we know that all the monsters in the data structure have a defense lower or equal to the attack of the current character, and then we only sum the coins given by monsters with an attack lower or equal to the defense of the current character, so we know that the character will be able to defeat all of them.
We keep the maximum of the answers for each character as the final answer to the problem.
Note that, due to the magnitude of the stats of the players and monsters, one should coordinate compress the values or use a sparse data structure. Briefly put, coordinate compressing x values means assigning a unique id between 1 and x to each different value, preserving their relative order.
Final complexity: O(n \log n + m \log m).
#include <bits/stdc++.h>
using namespace std;
struct segTree{
vector<long long> sum;
int sz;
void init(int n){
sz = 1;
while(sz < n) sz *= 2;
sum.assign(2 * sz, 0);
}
void add(int i, long long v, int x, int lx, int rx){
if(rx - lx == 1){
sum[x] += v;
return;
}
int m = (lx + rx) / 2;
if(i < m) add(i, v, 2 * x + 1, lx, m);
else add(i, v, 2 * x + 2, m, rx);
sum[x] = sum[2 * x + 1] + sum[2 * x + 2];
}
void add(int i, long long v){
add(i, v, 0, 0, sz);
}
long long calc(int l, int r, int x, int lx, int rx){
if(lx >= l && rx <= r) return sum[x];
else if(lx >= r || rx <= l) return 0;
int m = (lx + rx) / 2;
return calc(l, r, 2 * x + 1, lx, m) + calc(l, r, 2 * x + 2, m, rx);
}
long long calc(int l, int r){
return calc(l, r, 0, 0, sz);
}
};
int main(){
ios_base::sync_with_stdio(0);
cin.tie(0);
int tt; cin >> tt;
while(tt--){
int n, m; cin >> n >> m;
vector<tuple<int, int, int>> all;
vector<pair<int, int>> players(n);
for(int i = 0; i < n; i++){
int a, b; cin >> a >> b;
players[i] = {a, b};
all.push_back({a, i, 0});
all.push_back({b, i, 1});
}
vector<tuple<int, int, int>> monsters(m);
for(int j = 0; j < m; j++){
int c, d, e; cin >> c >> d >> e;
monsters[j] = {d, c, e};
all.push_back({c, j, 3});
all.push_back({d, j, 2});
}
sort(all.begin(), all.end());
int cntId = -1;
for(int i = 0; i < (int)all.size(); i++){
if(i == 0 || get<0>(all[i]) != get<0>(all[i-1])) cntId++;
if(get<2>(all[i]) > 1){
if(get<2>(all[i])%2==0) get<0>(monsters[get<1>(all[i])]) = cntId;
else get<1>(monsters[get<1>(all[i])]) = cntId;
}else{
if(get<2>(all[i])%2==0) players[get<1>(all[i])].first = cntId;
else players[get<1>(all[i])].second = cntId;
}
}
cntId++;
segTree st; st.init(cntId);
sort(players.begin(), players.end());
sort(monsters.begin(), monsters.end());
int ind = 0;
long long ans = 0;
for(pair<int, int> p : players){
while(ind < m && get<0>(monsters[ind]) <= p.first){
st.add(get<1>(monsters[ind]), get<2>(monsters[ind]));
ind++;
}
ans = max(ans, st.calc(0, p.second + 1));
}
cout << ans << "\n";
}
}
L. Random intervals
First of all we need to check if the given intervals intersect, this can be easily done by sorting the intervals.
To solve this problem we need to calculate a dp_{[i] [j]} = number of ways to put i intervals on the first j spaces between existing intervals, O(n^{2}) states with O(n) transitions.
If we have already calculated dp_{[i] [j]}, we can make transitions of the following form: dp_{[i+k] [j+1]} = dp_{[i+k] [j+1]} + dp_{[i] [j]} \cdot W for 0 \leq k \leq n-i, were W is the number of ways to put k intervals on the space j+1.
To calculate the dp we need to know that the number of ways to put a intervals in a space of length b is \binom{b + 2 \cdot a - 1}{2 \cdot a}. You can get more info by searching for balls and boxes combinatorics on internet.
Then we need to erase the duplicates, this happens when we have intervals of length one because we can put the same interval either at the left or at the right. To do this we just calculate for each space dp2_{[i] [j]} = number of ways to put i intervals in that space with j intervals of length one at the left. There are other ways to do the same without an additional dp, with some casework.
We also need to precalculate factorials and inverse factorials.
Complexity of the solution: \mathcal{O}(n^3 + m).
#include<bits/stdc++.h>
using namespace std;
#define endl "\n"
typedef long long ll;
const int MOD = 998244353;
vector<ll> fact;
ll binpow(ll a, ll b, int mod = MOD){
if(!b) return 1;
else if(b % 2 == 0){
ll x = binpow(a, b / 2, mod);
return (x * x) % mod;
}else{
ll x = binpow(a, b / 2, mod);
return (((x * x) % mod) * a) % mod;
}
}
ll inv(ll x, ll mod = MOD){
x %= mod;
return binpow(x, mod - 2, mod);
}
ll C(int n, int k){
if(n < k || min(n, k) < 0) return 0;
return (fact[n] * inv(fact[k] * fact[n - k], MOD)) % MOD;
}
void init_fac(int n){
fact.resize(n + 1);
fact[0] = 1;
for(int i = 1; i <= n; i++){
fact[i] = fact[i - 1] * i;
fact[i] %= MOD;
}
}
void solve(){
ll n, MAX; cin >> n >> MAX;
set<pair<int, int>> prov;
for(int i = 0; i < n; i++){
int l, r; cin >> l >> r;
prov.insert({l, r});
}
vector<pair<int, int>> prov2;
for(pair<int, int> p : prov) prov2.push_back(p);
vector<pair<int, int>> v;
for(int i = 0; i < (int)prov2.size(); i++){
if(prov2[i].first != prov2[i].second) {v.push_back(prov2[i]); continue;}
if((i > 0 && prov2[i-1].second == prov2[i].first) || (i < (int)prov2.size() - 1 && prov2[i+1].first == prov2[i].first)) continue;
v.push_back(prov2[i]);
}
int m = (int)v.size();
int lst = 1;
vector<ll> ways(n+1, 0);
ways[0] = 1;
for(int i = 0; i < m; i++){
int l = v[i].first; int r = v[i].second;
int space = l - lst + 1;
vector<ll> nwWays(n+1, 0);
for(int j = 0; j <= n; j++){
ll prob;
if(i > 0 && lst == v[i-1].first) prob = C(space - 1 + 2 * j - 1, 2 * j) + C(space - 1 + 2 * j - 2, 2 * j - 1);
else prob = C(space + 2 * j - 1, 2 * j);
for(int w = n-j; w >= 0; w--){
nwWays[w + j] += (ways[w] * prob) % MOD;
nwWays[w+j] %= MOD;
}
}
ways = nwWays;
lst = r;
}
int space = MAX - lst + 1;
vector<ll> nwWays(n+1, 0);
for(int j = 0; j <= n; j++){
ll prob;
if(lst == v[m-1].first) prob = C(space - 1 + 2 * j - 1, 2 * j) + C(space - 1 + 2 * j - 2, 2 * j - 1);
else prob = C(space + 2 * j - 1, 2 * j);
for(int w = n-j; w >= 0; w--){
nwWays[w + j] += (ways[w] * prob) % MOD;
nwWays[w + j] %= MOD;
}
}
if(v[m-1].first != MAX) ways = nwWays;
ll allPos = binpow(((MAX * (MAX-1)) / 2 + MAX) % MOD, n);
cout << (ways[n] * inv(allPos)) % MOD << "\n";
}
signed main(){
ios_base::sync_with_stdio(0);
cin.tie(0);
init_fac(1e7);
int tt; cin >> tt;
// int tt = 1;
while(tt--) solve();
}
M. The battle of Helm's Deep
The first thing to notice is that the towers are independent between them, the state of a tower doesn't affect another tower, just the final damage. So, this leads us to think that we can compute, for each tower, how much damage the inner walls would take if we assign some number of soldiers to it.
Additionally, an important observation is that the damage the inners walls take from a tower depends only on when the tower falls, so we can compute how many soldiers we need to assign to a tower i so that it falls in wave j. As there are a total of q waves over all towers, this means that we only need to compute this information O(q) times. So, for each tower, we will iterate over the waves that attack this tower and maintain the minimum number of soldiers needed for the tower to fall in that wave (special consideration for the case of when a tower doesn't fall at all). There are multiple ways to do this, some easier and some harder, the author does it in O(m + q \log q). However, an easier solution in O(m \cdot q) which also passes is for each number of soldiers, iterate over all the attacks on that tower in order, and calculate when it would fall. The code uses this approach, as it is easier to understand.
With this information, we can easily calculate dmg_{i,k}, denoting the damage the inner walls would take from the i-th tower if k soldiers are in it. This will be helpful later.
Once you have calculated the minimum number of soldiers needed for the tower to fall in some wave j, you can convert that to some "weights" of the form (soldiers, damage), where damage is q - j, and do knapsack DP with them in O(m \cdot q). The knapsack DP would have states dp_{i,k}, which denotes the minimum damage the inner walls can take if we consider the last i towers and we place k soldiers in total.
We can already know the minimum damage the inner walls can take, given by dp_{n,m}, but we now need to construct the lexicographically minimum such sequence for which the inner walls take that damage. To do this, we iterate from the first to last tower, maintaining two variables s and d, denoting the soldiers we have already used and the damage we have already taken. Then, for a tower i (1-indexed), we iterate over the number of soldiers we want to place in it, and we place the minimum number of soldiers k such that d + dmg_{i,k} + dp_{n-i,m-s-k} = dp_{n,m}.
Note: The code has a slight twist, instead of having dp_{i, j} denoting the minimum damage considering the last i towers if j soldiers are placed, it denotes the minimum damage placing j soldiers in towers from i to n. The former was described in the editorial because it seems easier to understand.
Final complexity: O(m \cdot (n+q)).
#include<bits/stdc++.h>
using namespace std;
const int INF = 2e9;
void update(long long power, map<int, int>& mp, long long& sum, int& cnt){
if((int)mp.size() == 0) return;
auto it = mp.begin();
while((*it).first <= power){
long long val = (*it).first;
long long cn = (*it).second;
cnt -= cn;
sum -= val * cn;
mp.erase(val);
if((int)mp.size() == 0) return;
it = mp.begin();
}
}
int main(){
ios_base::sync_with_stdio(0);
cin.tie(0);
int tt; cin >> tt;
while(tt--){
int n, m, q; cin >> n >> m >> q;
vector<vector<pair<int, int>>> queries(n);
vector<pair<int, int>> stats(n);
for(int i = 0; i < n; i++){
int a, b; cin >> a >> b;
stats[i] = {a, b};
}
for(int i = 0; i < q; i++){
int x, y; cin >> x >> y; y--;
queries[y].push_back({i, x});
}
vector<vector<int>> minDamage(n+1, vector<int>(m + 1, 0));
vector<vector<int>> damage(n, vector<int>(m+1, 0));
for(int i = n - 1; i >= 0; i--){
minDamage[i] = minDamage[i+1];
if((int)queries[i].size() == 0) continue;
sort(queries[i].begin(), queries[i].end());
vector<int> ans((int)queries[i].size(), -1);
long long sum = 0;
int cnt = 0;
map<int, int> mp;
int curr = 0;
int curr_damage = q - 1 - queries[i][0].first;
for(int soldiers = 0; soldiers <= m; soldiers++){
if(curr >= (int)queries[i].size()){
damage[i][soldiers] = 0;
continue;
}
long long power = soldiers * (long long)stats[i].first;
update(power, mp, sum, cnt);
long long damageToTower = sum - power * cnt;
if(power < queries[i][curr].second) damageToTower += queries[i][curr].second - power;
if(damageToTower < stats[i].second){
ans[curr] = soldiers;
mp[queries[i][curr].second]++;
sum += queries[i][curr].second;
cnt++;
curr++;
soldiers--;
curr_damage = 0;
if(curr < (int)queries[i].size()) curr_damage = q - 1 - queries[i][curr].first;
}else damage[i][soldiers] = curr_damage;
}
vector<pair<int, int>> weights = {{q - 1 - queries[i][0].first, 0}};
queries[i].push_back({q - 1, 0});
for(int j = 0; j < (int)queries[i].size() - 1; j++){
if(ans[j] == -1) break;
weights.push_back({q - 1 - queries[i][j+1].first, ans[j]});
}
vector<int> nw(m + 1, INF);
for(pair<int, int> p : weights){
for(int w = m; w >= 0; w--){
if(w + p.second > m) continue;
nw[w + p.second] = min(nw[w + p.second], minDamage[i][w] + p.first);
}
}
for(int i = 1; i <= m; i++){
nw[i] = min(nw[i], nw[i-1]);
}
minDamage[i] = nw;
}
vector<int> p(n);
int left = m;
int mnDamage = minDamage[0][m];
int taken = 0;
for(int i = 0; i < n; i++){
for(int soldier = 0; soldier <= left; soldier++){
if(damage[i][soldier] + taken + minDamage[i+1][left - soldier] == mnDamage){
p[i] = soldier;
left -= soldier;
taken += damage[i][soldier];
break;
}
}
}
cout << mnDamage << endl;
for(int x : p) cout << x << " "; cout << "\n";
}
}
N. The Omer's orange tree
We will solve this problem by processing the queries offline and doing a sweepline. That is possible because \sum_{i=a}^b f(u, i) = \sum_{i=1}^b f(u, i) - \sum_{i=1}^{a-1} f(u, i). So, we will iterate over each j such that 1 \le j \le n and maintain a data structure such that we can calculate \sum_{i=1}^j f(u, i) quickly for any u. Additionally, we will store the answer of each query in an array ans, so that ans_k is the answer to the k-th query, which we will update over the sweepline.
The first thing we should do is flatten the tree into a one dimensional array using the Euler Tour Technique, so we can calculate the sum on the subtree of some node in O(\log n) by using a Fenwick Tree or Segment Tree.
Once we have computed the Euler Tour array, we will iterate over all j such that 1 \le j \le n. Firstly, we will need to update our data structure taking into account the new j, so we will iterate over all multiples of j and add 1 in the position in the Euler Tour array of the node with a weight equal to that multiple. Once we have done this, we can calculate \sum_{i=1}^j f(u, i) for any u in O(\log n) time by doing a range sum query on our data structure. Now, we will iterate over all the queries of the form (u, a, b) such that j + 1 = a and for each query k, substract \sum_{i=1}^j f(u, i) from ans_k. Similarly, we will iterate over all the queries such that j = b and for each query k, add \sum_{i=1}^j f(u, i) to ans_k.
To calculate the complexity of the solution one must take into account that, in total, we will visit \sum_{i=1}^n \frac{n}{i} multiples. That sum is known as the n-th harmonic number, which has a value of approximately n \ln n. Because we do a point update query on our data structure for each of those multiples, the complexity of this part is O(n \log^2 n).
Note that it can also be solved online with merge sort tree.
Final complexity: O(n \log^2 n).
#include<bits/stdc++.h>
using namespace std;
#define endl "\n"
typedef long long ll;
const int MOD = 998244353;
int cntCurr;
vector<int> fenwick;
int lsb(int pos) {
return pos & -pos;
}
void update(int pos) {
pos++;
while (pos <= cntCurr) {
fenwick[pos] += 1;
pos += lsb(pos);
}
}
int query(int pos) {
int sum = 0;
while (pos > 0) {
sum += fenwick[pos];
pos -= lsb(pos);
}
return sum;
}
int calc(int l, int r){
if(l > 0) return query(r+1) - query(l);
else return query(r+1);
}
void DFS(int x, vector<vector<int>>& adj, int p, vector<pair<int, int>>& times){
times[x].first = cntCurr; cntCurr++;
for(int node : adj[x]){
if(node != p) DFS(node, adj, x, times);
}
times[x].second = cntCurr; cntCurr++;
}
void solve(){
int n, q; cin >> n >> q;
vector<vector<int>> which(n+1);
for(int i = 0; i < n; i++){
int w; cin >> w;
which[w].push_back(i);
}
vector<vector<int>> adj(n);
for(int i = 0; i < n - 1; i++){
int u, v; cin >> u >> v; u--; v--;
adj[u].push_back(v);
adj[v].push_back(u);
}
cntCurr = 0;
vector<pair<int, int>> times(n);
DFS(0, adj, -1, times);
fenwick.assign(cntCurr + 5, 0);
vector<int> ans(q);
vector<vector<pair<int, int>>> starts(n+1);
vector<vector<pair<int, int>>> ends(n+1);
for(int i = 0; i < q; i++){
int u, a, b; cin >> u >> a >> b; u--;
starts[a].push_back({u, i});
ends[b].push_back({u, i});
}
for(int i = 1; i <= n; i++){
for(pair<int, int> p : starts[i]){
ans[p.second] -= calc(times[p.first].first, times[p.first].second);
}
for(int j = i; j <= n; j += i){
for(int v : which[j]){
update(times[v].first);
}
}
for(pair<int, int> p : ends[i]){
ans[p.second] += calc(times[p.first].first, times[p.first].second);
}
}
for(ll x : ans) cout << x << " "; cout << endl;
}
signed main(){
ios_base::sync_with_stdio(0);
cin.tie(0);
int tt; cin >> tt;
// int tt = 1;
while(tt--) solve();
}
O. Bea the maximizer
To solve this problem we will convert the arrays in a bipartite graph named G, nodes in the left will be positions on array a and nodes in the left will be positions on array b.
Let's define sz_{x} as the cardinality of the maximum bipartite matching if there is an edge between two nodes u and v if a_{u} + b_{v} is a submask of x.
First we will construct the answer bit per bit, in decreasing order of bits. When checking if we can set a certain bit i having a current solution x, we will check if sz_{x + 2^{i}} = n.
After doing this, we will know the maximum possible value, and to find the minimum maximum distance we will do a binary search of the distance and remove the edges of the graph between two nodes u and v if |u-v| is greater than the value being checked, then we will check if the size of the maximum bipartite matching is n.
Both Khun's algorithm and Hopcroft Karp can get AC.
Complexity of the solution: \mathcal{O}(n^3 \cdot \log{A}) where A is the upper bound of the values in the arrays.
Note that, even though the code shared here in the editorial runs in 2200ms, we have a solution which runs in 700ms, it is just more complicated.
#include<bits/stdc++.h>
using namespace std;
#define endl "\n"
typedef long long ll;
const int MOD = 998244353;
const int N = 2000;
int n;
array<int, N> matched, has_edge, used;
vector<int> adj[N];
bool kuhn(int x){
if(used[x]) return false;
used[x] = 1;
for(int node : adj[x]){
if(matched[node] == -1 || kuhn(matched[node])){
has_edge[x] = 1;
matched[node] = x;
return true;
}
}
return false;
}
bool checkPerfectMatching(){
matched.fill(-1);
has_edge.fill(0);
for(int i = 0; i < n; i++){
for(int node : adj[i]){
if(matched[node] == -1){
matched[node] = i;
has_edge[i] = 1;
break;
}
}
}
for(int i = 0; i < n; i++){
if(has_edge[i]) continue;
used.fill(0);
kuhn(i);
}
for(int i = 0; i < n; i++){
if(matched[i] == -1) return false;
}
return true;
}
void solve(){
cin >> n;
vector<int> a(n);
for(auto &i : a) cin >> i;
vector<int> b(n);
for(auto &i : b) cin >> i;
vector<pair<int, int>> edges;
for(int i = 0; i < n; i++){
for(int j = 0; j < n; j++){
edges.push_back({i, j});
}
}
int ans = 0;
for(int j = 30; j >= 0; j--){
for(int i = 0; i < n; i++) adj[i].clear();
vector<pair<int, int>> nw_edges;
for(pair<int, int> p : edges){
if((1 << j) & (a[p.first] + b[p.second])){
nw_edges.push_back(p);
adj[p.first].push_back(p.second);
}
}
bool gd = checkPerfectMatching();
if(gd){
ans += (1 << j);
edges = nw_edges;
}
}
int dist = n - 1;
int lo = 0;
int hi = n - 1;
while(lo <= hi){
int mid = (lo + hi) / 2;
for(int i = 0; i < n; i++) adj[i].clear();
for(pair<int, int> p : edges){
if(abs(p.first - p.second) <= mid) adj[p.first].push_back(p.second);
}
bool gd = checkPerfectMatching();
if(gd){
dist = mid;
hi = mid - 1;
}else lo = mid + 1;
}
cout << ans << " " << dist << endl;
}
signed main(){
ios_base::sync_with_stdio(0);
cin.tie(0);
int tt; cin >> tt;
// int tt = 1;
for(int t = 1; t <= tt; t++){
solve();
}
}
P. Ski resort
The first observation in this problem is that we can compress the graph into a graph of O(k) nodes with weighted edges, the nodes are the nodes from which there is an outgoing ski lift, which we will denote by special nodes, and the edges are the maximum number of seconds you can spend going from a to b by using exactly 1 ski lift. To calculate the weight of the edges, one can store, for each node, an array dist, where dist_{i, j} denotes the distance from node i to the j-th special node.
To compute this array, as the graph is acyclic, you can start a BFS from the nodes wich have no outgoing edges. If you currently are in node u, you compute dist_u in the following way: if u is the special node number i, then dist_{u, i} = 0, if it is not a special node, we don't do this step. Then, for each outgoing edge from u to v, for each i, dist_{u, i} = \max(dist_{u, i}, 1 + dist_{v, i}).
After computing dist_u, for all edges from some v to u, you substract 1 from the outdegree of v, if the outdegree of v becomes 0, you add it to the queue.
This algorithm ensures that the distance is always maximal, as you are considering all the possibilities and taking the maximum out of all of them, due to the order in which it is done.
Similar algorithms could work, for example using the topological order of the graph or doing a bottom-up DP. The important aspect is that all the children of a node are processed before it.
For the representation of the compressed graph, we will use a matrix A, A_{i, j} denoting the maximum distance from the i-th special node to the j-th special node, while using one of the outgoing ski lifts from the i-th special node.
Once we have calculated dist_{i, j}, we can calculate the matrix A by iterating over the ski lifts. Let the current ski lift go from a to b, then for all special nodes c reachable from b, A_{a,c} = \max(A_{a,c}, 1 + dist_{b, c}).
Now that we have calculated this matrix, it is easy to observe that we can easily get the distance from any special node to any other special node using k lifts by "merging" A with itself k-1 times. We use "merging" instead of exponentiating A to k, because the operation is not exactly the same as matrix multiplication, although the algorithm for computing it and the complexity are the same. Instead, if we "merge" two matrices A and B of size k \times k, in this problem we need the resulting matrix C to be defined in the following way:
Even if is not exactly matrix multiplication, we will denote A merged with itself k-1 times as A^k, for simplicity. So, we can calculate A^(2^p), for each p from 0 to \log_2 x, in O(k^3 \cdot \log x). With this matrices, we will do a greedy algorithm to determine the minimum number of ski lifts necessary. Note that we consider separately whether it is possible to spend x minutes using 0 or 1 ski lifts, but we won't discuss it as it is very easy.
We will iterate over the powers of 2 of the matrix that we previously calculated, going from p = \log_2 x to p = 0, while we keep a current matrix, the current ski lifts we have taken and the answer. Then, for each p, if merging our current matrix with A^(2^p) results in another matrix B such that the maximum element of B (plus taking into consideration the initial path to the ski lift and the final path from the ski lift, which is just small casework) is at least x, then we set the answer to be 2^p + \text{the current ski lifts we have taken}. Otherwise, we sum 2^p to the current ski lifts we have taken and set the current matrix to matrix B.
Final complexity: O(k^3 \log x).
#include <bits/stdc++.h>
using namespace std;
vector<vector<long long>> mult(vector<vector<long long>>& a, vector<vector<long long>>& b){
int n = (int)a.size();
vector<vector<long long>> ans(n, vector<long long>(n, -1));
for(int i = 0; i < n; i++){
for(int j = 0; j < n; j++){
for(int q = 0; q < n; q++){
if(a[i][q] == -1 || b[q][j] == -1) continue;
ans[i][j] = max(ans[i][j], a[i][q] + b[q][j]);
}
}
}
return ans;
}
bool isGood(vector<vector<long long>>& dp, int x, int y, vector<vector<int>>& dist, vector<int>& mx, vector<int>& mxLift, vector<int>& revId){
int cntLift = (int)dp.size();
for(int i = 0; i < cntLift; i++){
for(int j = 0; j < cntLift; j++){
if(dist[y][i] == -1 || dp[i][j] == -1) continue;
if(dist[y][i] + mxLift[j] + 1 + dp[i][j] >= x) return true;
}
}
return false;
}
int main(){
ios_base::sync_with_stdio(0);
cin.tie(0);
int tt; cin >> tt;
while(tt--){
int n, m, k; cin >> n >> m >> k;
vector<vector<int>> adj(n);
vector<vector<int>> radj(n);
vector<int> out(n, 0);
for(int i = 0; i < m; i++){
int u, v; cin >> u >> v; u--; v--;
adj[u].push_back(v);
radj[v].push_back(u);
out[u]++;
}
vector<vector<int>> lifts(n);
vector<int> id(n, -1);
vector<int> revId;
int cntLift = 0;
for(int i = 0; i < k; i++){
int a, b; cin >> a >> b; a--; b--;
lifts[a].push_back(b);
if(id[a] == -1){
revId.push_back(a);
id[a] = cntLift; cntLift++;
}
}
queue<int> q;
for(int i = 0; i < n; i++){
if(!out[i]) q.push(i);
}
vector<vector<int>> dist(n, vector<int>(cntLift, -1));
vector<int> mx(n, 0);
while(!q.empty()){
int x = q.front(); q.pop();
for(int node : adj[x]){
for(int j = 0; j < cntLift; j++){
if(dist[node][j] == -1) continue;
dist[x][j] = max(dist[x][j], dist[node][j] + 1);
}
mx[x] = max(mx[x], mx[node] + 1);
}
if(id[x] != -1) dist[x][id[x]] = 0;
for(int node : radj[x]){
out[node]--;
if(!out[node]) q.push(node);
}
}
vector<vector<long long>> distLift(cntLift, vector<long long>(cntLift, -1));
vector<int> mxLift(cntLift, 0);
for(int i = 0; i < n; i++){
if(id[i] == -1) continue;
for(int node : lifts[i]){
mxLift[id[i]] = max(mxLift[id[i]], mx[node]);
for(int j = 0; j < cntLift; j++){
distLift[id[i]][j] = max(distLift[id[i]][j], (long long)dist[node][j]);
}
}
}
int x, y; cin >> x >> y; y--;
if(mx[y] >= x){
cout << 0 << "\n"; continue;
}
bool found = false;
for(int i = 0; i < cntLift; i++){
if(dist[y][i] == -1) continue;
if(dist[y][i] + mxLift[i] + 1 >= x){
cout << 1 << "\n";
found = true;
break;
}
}
if(found) continue;
vector<vector<vector<long long>>> dp(30, vector<vector<long long>>(cntLift, vector<long long>(cntLift)));
dp[0] = distLift;
for(int pw = 1; pw < 30; pw++){
dp[pw] = mult(dp[pw-1], dp[pw-1]);
}
int ans = 0;
int curr = 0;
vector<vector<long long>> currDist;
for(int pw = 29; pw >= 0; pw--){
if((int)currDist.size() == 0){
if(isGood(dp[pw], x - (curr + (1 << pw)), y, dist, mx, mxLift, revId)) ans = curr + (1 << pw);
else{
currDist = dp[pw];
curr += (1 << pw);
}
}else{
vector<vector<long long>> nw = mult(currDist, dp[pw]);
if(isGood(nw, x - (curr + (1 << pw)), y, dist, mx, mxLift, revId)) ans = curr + (1 << pw);
else{
currDist = nw;
curr += (1 << pw);
}
}
}
cout << ans + 1 << "\n";
}
}