A. The Oracle's Curse
Author: aditya1977
Observation
An alarm rings when a stock with a low starting price passes a stock with a high starting price. Since all stocks go up by exactly $$$1$$$ every day, a lower stock can only pass a higher stock if the higher one hits its maximum limit and stops growing. Once it passes, it stays ahead forever. Because the total number of alarms only goes up over time (it never goes back down), we can easily use Binary Search. In coding terms, when a smaller starting value overtakes a larger one, we call this an inversion.
Definition
Let $$$N$$$ be the number of stocks and $$$g$$$ be the target alarms. First, we sort all stocks by their starting prices $$$s_i$$$. Each stock is stored as a pair:
For any guessed day $$$D$$$, the new projected price $$$P_i$$$ of the $$$i$$$-th stock will be either its limit or its current grown value, whichever is smaller:
Steps and Logic
1. Guess the Day We use binary search to guess the answer $$$D$$$ between $$$0$$$ and $$$10^{10}$$$. Let's call our current guess $$$mid$$$.
2. Find the Prices For our guessed day $$$mid$$$, we calculate what all the new prices $$$P_i$$$ will be and update our array.
3. Count the Inversions We count the inversions in these new prices using Merge Sort.
While merging two sorted halves together, if a number on the right side is smaller than a number on the left side, it means it has passed all the remaining numbers on the left. We add them to our total like this:
4. Change the Guess If $$$\text{inversions} \geq g$$$, this day works! But maybe an earlier day works too, so we search the lower half (hi = mid). If we don't have enough alarms yet, we need more days, so we search the upper half (lo = mid).
Complexity Analysis
We need to find the exact day when the alarms hit our target $$$g$$$. Checking every single day one by one is way too slow. Instead, we guess the day using Binary Search< and count the alarms using Merge Sort.
This gives us a time complexity of:
#include <iostream>
#include <vector>
#include <algorithm>
using namespace std;
void merge(vector<pair<long long, long long>>& arr, int left, int mid, int right, long long& count) {
int left_size = mid - left + 1;
int right_size = right - mid;
vector<long long> L(left_size);
vector<long long> R(right_size);
for (int i = 0; i < left_size; ++i) L[i] = arr[left + i].second;
for (int i = 0; i < right_size; ++i) R[i] = arr[mid + i + 1].second;
int i = 0, j = 0, k = left;
while (i < left_size && j < right_size) {
if (L[i] <= R[j]) {
arr[k++].second = L[i++];
} else {
arr[k++].second = R[j++];
count += left_size - i;
}
}
while (i < left_size) arr[k++].second = L[i++];
while (j < right_size) arr[k++].second = R[j++];
}
void mergeSort(vector<pair<long long, long long>>& arr, int left, int right, long long& count) {
if (left < right) {
int mid = left + (right - left) / 2;
mergeSort(arr, left, mid, count);
mergeSort(arr, mid + 1, right, count);
merge(arr, left, mid, right, count);
}
}
bool check(long long days, vector<pair<long long, long long>>& arr, vector<long long>& original_u, int n, long long g) {
long long inversions = 0;
for (int i = 0; i < n; i++) {
original_u[i] = arr[i].second; // Save the original ceiling
arr[i].second = min(arr[i].second, arr[i].first + days);
}
mergeSort(arr, 0, n - 1, inversions);
for (int i = 0; i < n; i++) {
arr[i].second = original_u[i];
}
return inversions >= g;
}
int main() {
ios_base::sync_with_stdio(false);
cin.tie(NULL);
int n;
long long g;
if (cin >> n >> g) {
vector<long long> s(n), u(n);
for (int i = 0; i < n; i++) cin >> s[i];
for (int i = 0; i < n; i++) cin >> u[i];
vector<pair<long long, long long>> arr(n);
for (int i = 0; i < n; i++) {
arr[i] = {s[i], u[i]};
}
sort(arr.begin(), arr.end());
vector<long long> original_u(n, 0);
long long lo = 0;
long long hi = 10000000000LL;
while (lo + 1 < hi) {
long long mid = lo + (hi - lo) / 2;
if (check(mid, arr, original_u, n, g)) {
hi = mid;
} else {
lo = mid;
}
}
if (lo == 10000000000LL - 1) {
cout << -1 << "\n";
} else {
cout << lo << "\n";
}
}
return 0;
}
B. Array Construction
Author: Utsav_Kasvala
Because $$$x$$$ can be as large as $$$10^9$$$, an $$$O(x)$$$ solution is infeasible.
Key Observation
The actual values inside the array do not matter. Only the length of the current consecutive streak matters.
Since all $$$x$$$ values behave symmetrically, we only track the streak length of the last chosen value.
DP State Definition
Let $$$dp[k]$$$ represent the number of valid arrays of the current length where the last value appears exactly $$$k$$$ consecutive times.
Initial State
For arrays of length $$$1$$$:
(Choose any of the $$$x$$$ values)
Transition Logic
To extend an array from length $$$L$$$ to length $$$L+1$$$, we have two choices:
1. Choose a Different Value
There are $$$x-1$$$ choices.
This resets the streak length to $$$1$$$.
2. Choose the Same Value
Allowed only if current streak length $$$k \lt y$$$.
So:
Matrix Representation
We define a transition matrix $$$M$$$ of size $$$y \times y$$$:
Meaning:
- First row transitions to streak length $$$1$$$
- Subdiagonal shifts $$$k \rightarrow k+1$$$
Initial Vector
Final State
To compute arrays of length $$$x$$$:
Final answer:
Complexity Analysis
Given $$$y \leq 60$$$, this easily fits within limits.
Conclusion
Instead of tracking complete arrays, we only track the current streak length.
This converts the problem into a linear recurrence system solvable using matrix exponentiation, allowing efficient handling of very large $$$x$$$.
#include<bits/stdc++.h>
using namespace std;
//-----------------------------------------------------------------------------------------
#define hi_bro ios_base::sync_with_stdio(false); cin.tie(NULL); cout.tie(NULL);
#define test int T; cin>>T; for(int t=1;t<=T;t++)
#define int long long
#define ld long double
#define yes cout<<"YES"<<endl
#define no cout<<"NO"<<endl
#define f(i,x,n) for(int i = x; i < n; i++)
#define fo(i,x,n) for(int i=x;i>=n;i--)
#define pb push_back
#define pp pop_back()
#define fi first
#define se second
#define all(v) (v).begin(), (v).end()
#define array_le_le ll a[n]; f(i,0,n){cin>>a[i];}
//-----------------------------------------------------------------------------------------
const int mod=1e9+7;
vector<vector<long long>> multiply(vector<vector<long long> > &A,vector<vector<long long> > &B)
{
int b=A.size();
vector<vector<long long> > C(b, vector<long long>(b));
for (int i = 0; i < b; i++) {
for (int j = 0; j < b; j++) {
for (int k = 0; k < b; k++) {
C[i][j]= (C[i][j] + ((A[i][k]) * (B[k][j]))%mod)%mod;
}
}
}
return C;
}
vector<vector<long long> >power(vector<vector<long long> > &M, int expo)
{
int b=M.size();
vector<vector<long long> > ans(b,vector<long long>(b,0));
for(int i=0;i<b;i++){
for(int j=0;j<b;j++){
if(i==j)ans[i][j]=1;
}
}
while (expo) {
if (expo & 1){
ans=multiply(ans, M);
}
M=multiply(M, M);
expo >>= 1;
expo%=mod;
}
return ans;
}
void solve()
{
int a,b;
cin>>a>>b;
vector<int> base(b,0),ans(b,0);
base[0]=a;
vector<vector<int>> temp(b,vector<int>(b,0)),op;
for(int i=0;i<b;i++){
temp[0][i]=a-1;
}
for(int i=1;i<b;i++){
temp[i][i-1]=1;
}
op=power(temp,a-1);
for(int i=0;i<b;i++){
for(int j=0;j<b;j++){
ans[i]+=(op[i][j]*base[j])%mod;
ans[i]%=mod;
}
}
int fans=0;
for(int i=0;i<b;i++){
fans+=ans[i];
fans%=mod;
}
cout<<fans<<"\n";
}
int32_t main()
{
hi_bro;
solve();
return 0;
}
C. Kings pawn endgame
Author: rky_cse
Problem Summary
You are analyzing a simplified chess endgame on an 8x8 board. The pieces are a White King, a White Pawn, and a Black King. * White's Goal: Win the game by safely promoting the pawn to the top row or capturing the Black King. * Black's Goal: Force a Draw by capturing the pawn, forcing a stalemate, or surviving indefinitely (triggering the 50-move rule).
Assuming both players play perfectly, you must determine if White can force a win or if Black can hold a draw.
The Core Insight: The State Space is Tiny
In many game theory problems, the best strategy is to first count how many possible board configurations (states) can exist. Let's look at this game: * White King: 8 × 8 = 64 squares * White Pawn: 8 × 8 = 64 squares * Black King: 8 × 8 = 64 squares * Turn: 2 options (White or Black)
The absolute maximum number of states is 64 × 64 × 64 × 2 = 524,288. Since 5.2 × 10⁵ is a very small number for modern computers, we don't need to write a complex chess AI. Instead, we can map out every single possible state, connect them based on legal moves, and solve the game by working backwards from the ending positions using a Reverse Breadth-First Search (BFS).
The Approach: Reverse BFS
Instead of simulating the game forward (which can get stuck in infinite loops), we figure out the outcomes of the very last moves of the game, and trace backwards to see which player can force those outcomes.
Here is the step-by-step logic:
1. State Representation (Pure Coordinates) To keep things incredibly simple and avoid confusing index math, we represent our states exactly as they appear on the board using a 7-Dimensional array: result[wx][wy][px][py][bx][by][turn]. Whenever we read a coordinate like px or bx, we immediately know exactly where the piece is.
2. Graph Construction We use 7 nested loops to generate every possible state. For each valid state, we look at all legal moves a piece can make. * We count how many valid moves leave this state and store it in an out_degree variable. * We also build a parents list. If State A can legally move to State B, we add State A to State B's list of parents. This allows us to travel backwards later.
3. Base Cases (Terminal States) While checking moves, if we find a move that immediately ends the game, we label it and add it to our BFS queue: * Instant Win: White moves the pawn to the 8th row safely, or the pawn captures the Black King. * Instant Draw: Black captures the pawn. * No Moves Left: If we finish checking a state and its out_degree is 0, the game is over. * If it is White's turn, it's a Stalemate (Draw). * If it is Black's turn, we check if Black is in check. If yes, it's Checkmate (White Wins). Otherwise, Stalemate (Draw).
4. Backward Propagation (The BFS) We pull resolved states out of our queue one by one. Let's say we pull out State V (which we know the outcome of). We look at all of its parent states U: * If it is White's turn at state U: White wants to win. If V is a "Win", White will gladly choose this move! So U becomes a "Win" and we add it to the queue. If V is a "Draw", White avoids it. We decrease the out_degree of U. If out_degree hits 0, White is trapped—every move leads to a draw, so U becomes a "Draw". * If it is Black's turn at state U: Black wants to draw. If V is a "Draw", Black will happily choose it. So U becomes a "Draw" and goes into the queue. If V is a "Win", Black avoids it, so we decrease the out_degree. If it hits 0, Black is doomed, and U becomes a "Win".
5. The 50-Move Rule (Cycles) What happens to states stuck in a loop that never reach the queue? In chess, if nobody makes progress, the 50-move rule activates and the game is a Draw. Therefore, any state that remains "UNKNOWN" at the end of our BFS is simply printed as a Draw!
Complexity
- Time Complexity: $$$\mathcal{O}(V + E)$$$ where $$$V \le 524,288$$$ (states) and $$$E \le 8 \times V$$$ (max moves per state).
- Space/Memory Complexity: $$$\mathcal{O}(V + E)$$$ to store the reverse graph and arrays.
#include <bits/stdc++.h>
using namespace std;
const int UNKNOWN = 0;
const int WIN = 1;
const int DRAW = 2;
struct State
{
int wx, wy, px, py, bx, by, turn;
};
// Dimensions: [White King X][White King Y][White Pawn X][White Pawn Y][Black King X][Black King Y][Turn]
vector<State> parents[8][8][8][8][8][8][2];
int out_degree[8][8][8][8][8][8][2];
int result[8][8][8][8][8][8][2];
// Helper functions for chess rules
bool in_bounds(int x, int y)
{
return x >= 0 && x < 8 && y >= 0 && y < 8;
}
bool kings_adjacent(int wx, int wy, int bx, int by)
{
return abs(wx - bx) <= 1 && abs(wy - by) <= 1;
}
bool attacked_by_white(int target_x, int target_y, int wx, int wy, int px, int py)
{
if (kings_adjacent(target_x, target_y, wx, wy))
return true; // Attacked by White King
// Attacked by White Pawn (diagonal captures)
if (target_y == py + 1 && (target_x == px - 1 || target_x == px + 1))
return true;
return false;
}
int main()
{
ios_base::sync_with_stdio(false);
cin.tie(NULL);
int start_wx, start_wy, start_px, start_py, start_bx, start_by;
cin >> start_wx >> start_wy >> start_px >> start_py >> start_bx >> start_by;
queue<State> q;
// Step 1 & 2: Build the state graph by iterating through every possible coordinate
for (int wx = 0; wx < 8; wx++)
{
for (int wy = 0; wy < 8; wy++)
{
for (int px = 0; px < 8; px++)
{
for (int py = 0; py < 8; py++)
{
for (int bx = 0; bx < 8; bx++)
{
for (int by = 0; by < 8; by++)
{
for (int turn = 0; turn < 2; turn++)
{
// Filter out physically impossible board setups
if ((wx == px && wy == py) || (wx == bx && wy == by) || (px == bx && py == by))
continue;
if (kings_adjacent(wx, wy, bx, by))
continue;
if (py == 0 || py == 7)
continue; // Pawns are never naturally on the 1st or 8th rank
State curr = {wx, wy, px, py, bx, by, turn};
int degree = 0;
bool instant_win = false;
bool instant_draw = false;
if (turn == 0)
{ // --- WHITE'S TURN ---
// 1. White King moves
for (int dx = -1; dx <= 1; ++dx)
{
for (int dy = -1; dy <= 1; ++dy)
{
if (dx == 0 && dy == 0)
continue;
int nx = wx + dx, ny = wy + dy;
if (in_bounds(nx, ny) && !(nx == px && ny == py) && !kings_adjacent(nx, ny, bx, by))
{
parents[nx][ny][px][py][bx][by][1].push_back(curr);
degree++;
}
}
}
// 2. White Pawn moves Forward 1 step
int npy1 = py + 1;
if (in_bounds(px, npy1) && !(px == bx && npy1 == by) && !(px == wx && npy1 == wy))
{
if (npy1 == 7)
{
// Promotion logic: Win if protected or Black can't immediately capture
bool w_protects = kings_adjacent(wx, wy, px, npy1);
bool b_captures = kings_adjacent(bx, by, px, npy1);
if (w_protects || !b_captures)
instant_win = true;
}
else
{
parents[wx][wy][px][npy1][bx][by][1].push_back(curr);
degree++;
}
}
// 3. White Pawn moves Forward 2 steps (only valid from row 1)
if (py == 1)
{
int npy_block = 2, npy2 = 3;
bool blocked_1 = (px == wx && npy_block == wy) || (px == bx && npy_block == by);
bool blocked_2 = (px == wx && npy2 == wy) || (px == bx && npy2 == by);
if (!blocked_1 && !blocked_2)
{
parents[wx][wy][px][npy2][bx][by][1].push_back(curr);
degree++;
}
}
// 4. White Pawn diagonal capture (Immediate Win condition)
if (by == py + 1 && abs(bx - px) == 1)
{
instant_win = true;
}
}
else
{ // --- BLACK'S TURN ---
// 1. Black King moves
for (int dx = -1; dx <= 1; ++dx)
{
for (int dy = -1; dy <= 1; ++dy)
{
if (dx == 0 && dy == 0)
continue;
int nx = bx + dx, ny = by + dy;
if (in_bounds(nx, ny) && !(nx == wx && ny == wy) && !kings_adjacent(wx, wy, nx, ny))
{
// Black King cannot move into check
if (!attacked_by_white(nx, ny, wx, wy, px, py))
{
if (nx == px && ny == py)
{
instant_draw = true; // Capturing the pawn guarantees at least a draw
}
else
{
parents[wx][wy][px][py][nx][ny][0].push_back(curr);
degree++;
}
}
}
}
}
}
out_degree[wx][wy][px][py][bx][by][turn] = degree;
// Step 3: Base Case Setup - Queue terminal states
if (turn == 0)
{ // White
if (instant_win)
{
result[wx][wy][px][py][bx][by][turn] = WIN;
q.push(curr);
}
else if (degree == 0)
{
result[wx][wy][px][py][bx][by][turn] = DRAW;
q.push(curr); // Stalemate
}
}
else
{ // Black
if (instant_draw)
{
result[wx][wy][px][py][bx][by][turn] = DRAW;
q.push(curr);
}
else if (degree == 0)
{
// Determine if no moves means Checkmate or Stalemate
bool in_check = (by == py + 1 && abs(bx - px) == 1);
result[wx][wy][px][py][bx][by][turn] = in_check ? WIN : DRAW;
q.push(curr);
}
}
}
}
}
}
}
}
}
// Step 4: Backward Propagation (Reverse BFS)
while (!q.empty())
{
State curr = q.front();
q.pop();
int res = result[curr.wx][curr.wy][curr.px][curr.py][curr.bx][curr.by][curr.turn];
for (State p : parents[curr.wx][curr.wy][curr.px][curr.py][curr.bx][curr.by][curr.turn])
{
if (result[p.wx][p.wy][p.px][p.py][p.bx][p.by][p.turn] != UNKNOWN)
continue; // Skip already resolved states
if (p.turn == 0)
{ // Parent is White's turn
if (res == WIN)
{
result[p.wx][p.wy][p.px][p.py][p.bx][p.by][p.turn] = WIN;
q.push(p); // White found a guaranteed win!
}
else
{
out_degree[p.wx][p.wy][p.px][p.py][p.bx][p.by][p.turn]--;
if (out_degree[p.wx][p.wy][p.px][p.py][p.bx][p.by][p.turn] == 0)
{
result[p.wx][p.wy][p.px][p.py][p.bx][p.by][p.turn] = DRAW;
q.push(p); // White is trapped into a draw
}
}
}
else
{ // Parent is Black's turn
if (res == DRAW)
{
result[p.wx][p.wy][p.px][p.py][p.bx][p.by][p.turn] = DRAW;
q.push(p); // Black found a guaranteed draw!
}
else
{
out_degree[p.wx][p.wy][p.px][p.py][p.bx][p.by][p.turn]--;
if (out_degree[p.wx][p.wy][p.px][p.py][p.bx][p.by][p.turn] == 0)
{
result[p.wx][p.wy][p.px][p.py][p.bx][p.by][p.turn] = WIN;
q.push(p); // Black is doomed to lose
}
}
}
}
}
// Output Result
if (result[start_wx][start_wy][start_px][start_py][start_bx][start_by][0] == WIN)
{
cout << "White Wins\n";
}
else
{
// Step 5: If the state is still UNKNOWN, it represents an infinite cycle (50-move rule).
cout << "Draw\n";
}
return 0;
}
D. Mirror Substring Cost
Author:isani_alis, Utsav_Kasvala
Observation 1: Split Into Two Parts
The problem naturally divides into two tasks: computing array $$$A$$$, and computing insertion costs efficiently. We solve both parts separately.
Part 1: Computing Distinct Substring Counts
We need many prefix and suffix distinct substring counts. Recomputing each value independently would be too slow.
We use a dynamic suffix tree supporting:
- append_back(c) → add a character at the end
- pop_front() → remove the first character
- no_distinct() → current number of distinct substrings
Each operation works in amortized $$$O(1)$$$ time.
Prefix Values
For every $$$i=0$$$ to $$$mid$$$, insert $$$S[i]$$$ and store the current count.
So $$$A[i]=distinct(S[0..i])$$$.
Suffix Values
Initialize the structure using $$$S[mid+1..n-1]$$$.
Then repeatedly store current count and remove the first character.
So for all $$$i \gt mid$$$, $$$A[i]=distinct(S[i..n-1])$$$.
Observation 2: Coordinate Compression
Values inside $$$A$$$ may be large, while Fenwick Tree requires compact indices.
So we compress all values into ranks while preserving relative order.
Example:
$$$[15,7,15,20]\rightarrow[2,1,2,3]$$$
Part 2: Computing Costs Efficiently
We process $$$A$$$ from left to right.
For current compressed value $$$x$$$:
$$$less=query(x-1)$$$
$$$greater=inserted-query(x)$$$
Then:
$$$cost=\min(less,greater)$$$
Add this to the answer and insert $$$x$$$ into Fenwick Tree.
Each step takes $$$O(\log n)$$$.
Why Fenwick Tree Works
Fenwick Tree supports point updates and prefix sum queries.
Hence we can efficiently count:
- previous values smaller than current
- previous values greater than current
in logarithmic time.
Complexity Analysis
Building array $$$A$$$: $$$O(n)$$$
Coordinate compression: $$$O(n\log n)$$$
Fenwick processing: $$$O(n\log n)$$$
Total complexity: $$$O(n\log n)$$$
This easily fits $$$n\le2\times10^5$$$.
Conclusion
The solution combines a dynamic suffix tree for substring counting and a Fenwick Tree for online rank statistics.
By separating both tasks cleanly, the complete problem is solved efficiently in $$$O(n\log n)$$$.
#include <algorithm>
#include <bits/stdc++.h>
#include <numeric>
#include <queue>
#include <string>
#include <utility>
#define ll long long
#define pb push_back
#define fr(a,b) for(int i = a; i < b; i++)
#define rep(i,a,b) for(int i = a; i < b; i++)
#define pii pair<int,int>
#define MOD 998244353
#define inf (1LL<<60)
#define all(x) (x).begin(), (x).end()
#define prDouble(x) cout << fixed << setprecision(10) << x << endl;
#define triplet pair<ll,pair<ll,ll>>
#define goog(tno) cout << "Case #" << tno <<": "
#define fast_io ios_base::sync_with_stdio(false);cin.tie(NULL);cout.tie(0);
#define read(x) int x; cin >> x
#define arr(a,n) int a[n]; rep(i,0,n){cin>>a[i];}
#define vi vector<ll>
#define vii vector<vector<ll>>
#define vb vector<bool>
#define vc vector<char>
#define vp vector<pii>
#define vs vector<string>
#define mii unordered_map<ll,ll>
#define mib unordered_map<ll,bool>
#define F first
#define S second
using namespace std;
using lli = long long int;
using llu = unsigned long long int;
using ld = long double;
#define int long long
#define sz(x) x.size()
void init_code(){
fast_io;
}
template<class T> bool ckmin(T& a, const T& b) { return b < a ? a = b, 1 : 0; }
template<class T> bool ckmax(T& a, const T& b) { return a < b ? a = b, 1 : 0; }
template<typename T>
istream& operator>>(istream& in, vector<T>& v) {
for(auto &x : v) in >> x;
return in;
}
template<typename T>
ostream& operator<<(ostream& out, const vector<T>& v) {
for(auto &x : v) out << x << " ";
return out;
}
struct SuffixTree {
private:
string S;
ll cnt = 0;
int remmed_leaf = 0;
struct Node {
SuffixTree *ctx;
int l, r, leaf_id = -1;
Node *par, *suflink = NULL;
map<char, Node*> nxt;
Node(SuffixTree *T, int L, int R, Node *P, bool LEAF) : ctx(T), l(L), r(R), par(P) {
if(LEAF) leaf_id = sz(ctx->leaves);
}
bool is_leaf() { return leaf_id != -1; }
int size() { return is_leaf() ? sz(ctx->S) - l : (par ? r - l + 1 : 0); }
};
struct State {
Node *u; int pos;
State() {}
State(Node *U, int P = -666) : u(U), pos(P) { if(P == -666) pos = u->size() - 1; }
void go_back() { if(pos-- == 0) u = u->par, pos = u->size() - 1; }
} st;
Node *root;
vector<Node*> nodes, leaves;
bool rule3;
Node* make_node(int L, int R, Node *P, bool LEAF) {
nodes.push_back(new Node(this, L, R, P, LEAF));
return nodes.back();
}
Node* walkdown(char c) {
Node *v = st.u;
if(st.pos == v->size() - 1) {
if(v->is_leaf()) { v->r++; st.pos++; return NULL; }
if(!v->nxt[c]) {
v->nxt[c] = make_node(sz(S) - 1, sz(S) - 1, v, true); cnt++;
leaves.push_back(v->nxt[c]);
} else rule3 = true;
st.u = v->nxt[c]; st.pos = 0;
return NULL;
}
Node *p = v->par;
if(S[v->l + st.pos + 1] == c) { st.pos++; rule3 = true; return NULL; }
Node *mid = make_node(v->l, v->l + st.pos, p, false);
p->nxt[S[v->l]] = mid; mid->nxt[S[v->l + st.pos + 1]] = v; v->par = mid; v->l += st.pos + 1;
mid->nxt[c] = make_node(sz(S) - 1, sz(S) - 1, mid, true); cnt++;
st.u = mid->nxt[c]; st.pos = 0; leaves.push_back(st.u);
return mid;
}
void find_suflink(int l, bool add = true) {
Node *v = st.u;
if(st.pos == v->size() - 1 && v->suflink) { st = State(v->suflink); return; }
Node *p = v->par;
if(p->suflink) { l = sz(S) - st.pos - add - 1; st = State(p->suflink); }
else st = State(root, -1);
while(l < sz(S) - add) {
int rem = sz(S) - add - l, prem = st.u->size() - 1 - st.pos;
if(rem <= prem) { st.pos += rem; break; }
l += prem + 1; st.u = st.u->nxt[S[l - 1]]; st.pos = 0;
}
}
void delete_node(Node *n) {
if(!n->par) return;
if(st.u == n) {
int old_pos = st.pos; find_suflink(sz(leaves) + 1, false);
cnt -= (n->size() - old_pos - 1);
n->leaf_id = sz(leaves); n->suflink = NULL; leaves.push_back(n);
n->l = sz(S) - old_pos - 1; return;
}
cnt -= n->size();
Node *p = n->par; p->nxt.erase(S[n->l]);
if(p->nxt.empty()) delete_node(p);
}
Node* traverse(string &P) {
if(P.empty()) return root;
Node *curr = root; int idx = 0;
while(idx < sz(P)) {
char c = P[idx];
if(!curr->nxt.count(c)) return NULL;
Node *nxt = curr->nxt[c];
int len = nxt->size();
for(int i = 0; i < len && idx < sz(P); i++, idx++)
if(S[nxt->l + i] != P[idx]) return NULL;
curr = nxt;
}
return curr;
}
int count_leaves(Node *u) {
if(u->is_leaf()) return 1;
int ans = 0;
for(auto &p : u->nxt) ans += count_leaves(p.second);
return ans;
}
public:
SuffixTree(string s = "") {
root = new Node(this, -1, -1, NULL, false);
nodes.push_back(root);
for(char c : s) append_back(c);
}
/* Adds char to sliding window.
Time: Amortized O(1) */
void append_back(char c) {
S += c; rule3 = false;
if(sz(S) == 1) { st = State(root, -1); walkdown(c); return; }
cnt += sz(leaves) - remmed_leaf;
st = State(leaves.back()); st.go_back();
Node *old_w = NULL;
for(int l = sz(leaves); l < sz(S); l++) {
find_suflink(l);
Node *w = walkdown(c); st.go_back();
if(old_w) old_w->suflink = st.u; old_w = w;
if(rule3) { walkdown(c); return; }
}
}
/* Removes oldest char from window.
Time: Amortized O(1) */
void pop_front() {
if(remmed_leaf < sz(leaves)) delete_node(leaves[remmed_leaf++]);
}
/* Total distinct substrings in window.
Time: O(1) */
ll no_distinct() { return cnt; }
};
class BIT {
vector<int> bit;
public:
BIT(int size) {
bit.assign(size + 1, 0);
}
int getSum(int idx) {
int sum = 0;
for (; idx > 0; idx -= idx & (-idx))
sum += bit[idx];
return sum;
}
int getSumRange(int left, int right) {
return getSum(right) - getSum(left - 1);
}
void addValue(int idx, int val) {
for (; idx < bit.size(); idx += idx & (-idx))
bit[idx] += val;
}
};
/*
*/
void solve(){
//Part 1
read(n); string s; cin>>s;
SuffixTree st,st1;
vector<int> v;
for(int i=0;i<=(n-1)/2;i++){
st.append_back(s[i]);
v.pb(st.no_distinct());
}
for(int i=(n+1)/2;i<n;i++){
st1.append_back(s[i]);
}
for(int i=(n+1)/2;i<n;i++){
v.push_back(st1.no_distinct());
st1.pop_front();
}
//cout<<v<<endl;
//coordinate compression
set<int> ms;
for(auto &i:v) ms.insert(i);
map<int,int> mp; int cnt=1;
for(auto &i:ms) mp[i]=cnt++;
for(int i=0;i<v.size();i++) v[i]=mp[v[i]];
//Part 2
int maxi=*max_element(all(v)),mod=1e9+7;
BIT b(maxi+1);
int ans=0;
for(auto &i:v){
int l=b.getSumRange(0,i-1),r=b.getSumRange(i+1,maxi);
ans+=min(l,r)%mod;
ans%=mod;
b.addValue(i,1);
}
cout<<ans<<endl;
}
int32_t main() {
init_code();
solve();
return 0;
}
E. Refresh at Your Own Risk
Author: ayushman18
Think backward. Use Expected Value DP from the last seat down to the first. Because rolling a $$$0$$$ keeps you at the same seat but changes the mode, the states depend on each other in a loop. Write out the equations and substitute them into each other to break the loop!
Let's work backward from seat $$$N-1$$$ down to $$$0$$$.
State Definition:
- $$$X_m(i)$$$: Expected clicks for Alice starting from seat $$$i$$$, on Alice's turn in mode $$$m$$$.
- $$$Y_m(i)$$$: Expected clicks for Alice starting from seat $$$i$$$, on Bob's turn in mode $$$m$$$.
Base Case: If $$$i \ge N$$$, the ticket is already booked! So $$$X_m(i)=0$$$ and $$$Y_m(i)=0$$$.
Transitions:
When Alice clicks in mode $$$m$$$, she rolls $$$k$$$ from $$$0$$$ to $$$A_m$$$. The probability of any roll is $$$p_A(m)=\frac{1}{A_m+1}$$$.
- If $$$k=0$$$: Progress stays at $$$i$$$, mode becomes $$$(m+1)\%3$$$, and it's Bob's turn.
- If $$$k \gt 0$$$: Progress moves to $$$i+k$$$, mode resets to $$$0$$$, and it's Bob's turn.
- We add $$$1$$$ because Alice made a click.
This gives us the equation for Alice's turn:
Bob's turns are similar, but we add $$$0$$$ because we only count Alice's clicks:
Solving the Loop:
At a fixed seat $$$i$$$, the sum parts only use future seats ($$$i+k$$$), which we already calculated! Let's call these constant parts $$$C_X(m)$$$ and $$$C_Y(m)$$$.
This gives a loop of 6 simple equations:
- $$$X_0=C_X(0)+p_A(0) \cdot Y_1$$$
- $$$Y_1=C_Y(1)+p_B(1) \cdot X_2$$$
- $$$X_2=C_X(2)+p_A(2) \cdot Y_0$$$
- $$$Y_0=C_Y(0)+p_B(0) \cdot X_1$$$
- $$$X_1=C_X(1)+p_A(1) \cdot Y_2$$$
- $$$Y_2=C_Y(2)+p_B(2) \cdot X_0$$$
If we substitute $$$Y_1$$$ and $$$X_2$$$ into the first equation, we can write $$$X_0$$$ entirely in terms of $$$Y_0$$$:
(Where $$$K_X$$$ and $$$P_X$$$ are constants from the substitution).
Similarly, we can write $$$Y_0$$$ entirely in terms of $$$X_0$$$:
Now we just have a system of two linear equations! Solve for $$$X_0$$$:
Once we have $$$X_0$$$, we can plug it back in to easily find $$$Y_0$$$ and all the other states for seat $$$i$$$.
Optimization:
Maintain a suffix sum array for $$$X_0$$$ and $$$Y_0$$$ as you iterate backward. This lets you calculate the $$$\sum$$$ parts in $$$O(1)$$$ time.
Complexity: Time complexity is $$$O(N)$$$. The final answer is simply $$$X_0(0)$$$.
#include <iostream>
#include <iomanip>
#include <vector>
using namespace std;
void solve() {
int N;
cin >> N;
vector<int> A(3), B(3);
for(int i = 0; i < 3; ++i) cin >> A[i];
for(int i = 0; i < 3; ++i) cin >> B[i];
// Precalculate probabilities of failure (k=0)
vector<double> pA(3), pB(3);
for(int m = 0; m < 3; ++m) {
pA[m] = 1.0 / (A[m] + 1.0);
pB[m] = 1.0 / (B[m] + 1.0);
}
// Suffix sum arrays to achieve O(N) time complexity
// Sized to N + 2005 to prevent out-of-bounds when i + k > N
vector<double> sum_X0(N + 2005, 0.0);
vector<double> sum_Y0(N + 2005, 0.0);
// Compute expected values from N-1 down to 0
for (int i = N - 1; i >= 0; --i) {
// 1. Calculate Future Sums (SA and SB) in O(1) time
vector<double> SA(3, 0.0), SB(3, 0.0);
for(int m = 0; m < 3; ++m) {
SA[m] = sum_Y0[i + 1] - sum_Y0[i + A[m] + 1];
SB[m] = sum_X0[i + 1] - sum_X0[i + B[m] + 1];
}
// 2. Compute the Constants (C)
// Alice adds 1 for her click. Bob adds 0.
double CX0 = 1.0 + pA[0] * SA[0];
double CX1 = 1.0 + pA[1] * SA[1];
double CX2 = 1.0 + pA[2] * SA[2];
double CY0 = 0.0 + pB[0] * SB[0];
double CY1 = 0.0 + pB[1] * SB[1];
double CY2 = 0.0 + pB[2] * SB[2];
// 3. Compute derived components for the 2x2 system
double KX = CX0 + pA[0] * CY1 + pA[0] * pB[1] * CX2;
double KY = CY0 + pB[0] * CX1 + pB[0] * pA[1] * CY2;
double PX = pA[0] * pB[1] * pA[2];
double PY = pB[0] * pA[1] * pB[2];
// 4. Solve for the current state (X0 and Y0)
double X0 = (KX + PX * KY) / (1.0 - PX * PY);
double Y0 = KY + PY * X0;
// 5. Update suffix sums for the next iteration (i-1)
sum_X0[i] = sum_X0[i + 1] + X0;
sum_Y0[i] = sum_Y0[i + 1] + Y0;
}
// The answer is the expected clicks for Alice starting at seat 0
// sum_X0[0] - sum_X0[1] gives the exact value of X0 at i=0
cout << fixed << setprecision(8) << sum_X0[0] - sum_X0[1] << "\n";
}
int main() {
// Fast I/O
ios_base::sync_with_stdio(false);
cin.tie(NULL);
solve();
return 0;
}
F. The Pyramid Gaze
Author: aryan_lem
Instead of a tree, if this were an array, how would you compute $$$\sum_{l \le i \lt j \le r} A_i A_j$$$ efficiently for any subarray $$$[l,r]$$$?
When a value in the range $$$[l,r]$$$ changes, how can you efficiently update the value of $$$\sum_{l \le i \lt j \le r} A_i A_j$$$ without recomputing it from scratch?
Try simulating this idea on a tree.
The problem asks us to simulate the pharaoh's journey across paths in a tree, conditionally updating node values and computing a specific happiness score for each path. We can solve this efficiently using Euler Tour and Binary Lifting, along with a Segment Tree.
Mathematical Simplification: Let the path from $$$i$$$ to $$$i+1$$$ consist of nodes $$$v_1, v_2, \dots, v_k$$$. The happiness gained is:
This can be simplified as:
Thus, we only need the sum of values and the sum of squares on the path.
Tree Processing: We root the tree at node $$$1$$$ and run a DFS to compute depth, entry time $$$tin[u]$$$, and exit time $$$tout[u]$$$. Using binary lifting, we build $$$parent[u][j]$$$ for LCA and $$$dp[u][j]$$$, which stores the maximum height up to the $$$2^j$$$-th ancestor. This allows us to compute both LCA and maximum height on a path in $$$O(\log n)$$$.
Maintaining Path Sums: We use an Euler Tour with a difference array to encode node contributions. For each node $$$u$$$, we add:
and similarly for $$$A[u]^2$$$. A prefix sum up to $$$tin[x]$$$ gives the sum of values on the path from the root to node $$$x$$$. These values are maintained using a segment tree to support updates and queries. This effectively converts tree path queries into root-to-node prefix queries, which can then be combined using LCA.
Path Query: Using LCA, the sum on a path $ u \to v$ is computed as:
The same formula applies for the sum of squares.
Simulation: For each $$$i$$$ from $$$1$$$ to $$$n-1$$$, we compute $$$lca(i, i+1)$$$ and find the maximum height on that path using binary lifting. If $$$H[i]$$$ is not the maximum, we increment $$$A[i]$$$ and update its contribution. Then we query the path sums $$$S$$$ and $$$S2$$$ and add $$$\frac{S^2 - S2}{2}$$$ to the answer.
Complexity: The preprocessing takes $$$O(n \log n)$$$, and each step takes $$$O(\log n)$$$, giving an overall complexity of $$$O(n \log n)$$$.
#include <bits/stdc++.h>
using namespace std;
#define int long long
int n;
vector<int> h,a,depth;
vector<vector<int>> adj, parent, dp;
vector<int> tin,tout,euler1,euler2;
vector<int> segA,segA2;
int timer=0;
// ---------- DFS for LCA + max height ----------
void dfs_lca(int u,int p){
parent[u][0]=p;
dp[u][0]=max(h[u],h[p]);
for(int j=1;j<30;j++){
parent[u][j]=parent[parent[u][j-1]][j-1];
dp[u][j]=max(dp[u][j-1],dp[parent[u][j-1]][j-1]);
}
for(int v:adj[u]){
if(v==p) continue;
depth[v]=depth[u]+1;
dfs_lca(v,u);
}
}
// ---------- Euler Tour ----------
void dfs_euler(int u,int p){
tin[u]=timer;
euler1[timer]=a[u];
euler2[timer]=a[u]*a[u];
timer++;
for(int v:adj[u]){
if(v==p) continue;
dfs_euler(v,u);
}
tout[u]=timer;
euler1[timer]=-a[u];
euler2[timer]=-(a[u]*a[u]);
timer++;
}
// ---------- Segment Tree ----------
void build(int node,int l,int r){
if(l==r){
segA[node]=euler1[l];
segA2[node]=euler2[l];
return;
}
int mid=(l+r)/2;
build(2*node,l,mid);
build(2*node+1,mid+1,r);
segA[node]=segA[2*node]+segA[2*node+1];
segA2[node]=segA2[2*node]+segA2[2*node+1];
}
void update(int node,int l,int r,int idx){
if(l==r){
segA[node]=euler1[l];
segA2[node]=euler2[l];
return;
}
int mid=(l+r)/2;
if(idx<=mid) update(2*node,l,mid,idx);
else update(2*node+1,mid+1,r,idx);
segA[node]=segA[2*node]+segA[2*node+1];
segA2[node]=segA2[2*node]+segA2[2*node+1];
}
int queryA(int node,int l,int r,int ql,int qr){
if(qr<l || r<ql) return 0;
if(ql<=l && r<=qr) return segA[node];
int mid=(l+r)/2;
return queryA(2*node,l,mid,ql,qr)+queryA(2*node+1,mid+1,r,ql,qr);
}
int queryA2(int node,int l,int r,int ql,int qr){
if(qr<l || r<ql) return 0;
if(ql<=l && r<=qr) return segA2[node];
int mid=(l+r)/2;
return queryA2(2*node,l,mid,ql,qr)+queryA2(2*node+1,mid+1,r,ql,qr);
}
// ---------- LCA ----------
int lca(int u,int v){
if(depth[u]<depth[v]) swap(u,v);
int diff=depth[u]-depth[v];
for(int j=0;j<30;j++){
if(diff&(1<<j)) u=parent[u][j];
}
if(u==v) return u;
for(int j=29;j>=0;j--){
if(parent[u][j]!=parent[v][j]){
u=parent[u][j];
v=parent[v][j];
}
}
return parent[u][0];
}
// ---------- max height on path ----------
int getMax(int u,int v){
int mx=h[u];
int diff=depth[u]-depth[v];
for(int j=0;j<30;j++){
if(diff&(1<<j)){
mx=max(mx,dp[u][j]);
u=parent[u][j];
}
}
return mx;
}
// ---------- Main Logic ----------
int solve(){
int ans=0;
for(int i=1;i<n;i++){
int u=i,v=i+1;
int L=lca(u,v);
int mx=max(getMax(u,L),getMax(v,L));
// update
if(h[i]<mx){
a[i]++;
euler1[tin[i]]++;
euler1[tout[i]]--;
euler2[tin[i]]=a[i]*a[i];
euler2[tout[i]]=-a[i]*a[i];
update(1,0,2*n-1,tin[i]);
update(1,0,2*n-1,tout[i]);
}
// path sum
int S=queryA(1,0,2*n-1,0,tin[u])
+queryA(1,0,2*n-1,0,tin[v])
-2*queryA(1,0,2*n-1,0,tin[L])
+a[L];
int S2=queryA2(1,0,2*n-1,0,tin[u])
+queryA2(1,0,2*n-1,0,tin[v])
-2*queryA2(1,0,2*n-1,0,tin[L])
+a[L]*a[L];
ans += (S*S - S2);
}
return ans/2;
}
// ---------- Main ----------
int32_t main(){
ios::sync_with_stdio(false);
cin.tie(NULL);
cin>>n;
h.resize(n+1);
a.resize(n+1);
depth.resize(n+1);
adj.resize(n+1);
parent.assign(n+1,vector<int>(30));
dp.assign(n+1,vector<int>(30));
tin.resize(n+1);
tout.resize(n+1);
euler1.resize(2*n);
euler2.resize(2*n);
segA.resize(8*n);
segA2.resize(8*n);
for(int i=1;i<=n;i++) cin>>a[i];
for(int i=1;i<=n;i++) cin>>h[i];
for(int i=0;i<n-1;i++){
int u,v;
cin>>u>>v;
adj[u].push_back(v);
adj[v].push_back(u);
}
dfs_lca(1,0);
dfs_euler(1,0);
build(1,0,2*n-1);
cout<<solve()<<"\n";
}
G. MST Partition
Author:sushmit2022
Sort the array. Optimal components will be contiguous segments.
Binary search the answer $$$mid$$$. Check if you can form $$$\le k$$$ segments with $$$\max - \min \le mid$$$ and one segment of size $$$\ge M$$$.
Because the graph is complete and edge weights are defined as $$$|a_i - a_j|$$$, directly working on the graph is infeasible. Instead, we reduce the problem using sorting and binary search.
The key idea is that the exact graph structure does not matter. Since edge weights depend only on differences, the cost of a component (its Minimum Spanning Tree) is simply $$$\max - \min$$$. To minimize this, each connected component will be a contiguous segment in the sorted array.
Instead of minimizing the cost directly, we binary search the answer. Let $$$mid$$$ denote the maximum allowed difference inside any component:
Now we need to check whether we can partition the array into at most $$$k$$$ segments, such that each segment satisfies the above condition and at least one segment has size $$$\ge M$$$.
After sorting, we greedily form segments to cover the array. We precompute:
$$$pre[i]$$$: the minimum number of segments needed to cover the prefix $$$[0 \dots i]$$$
$$$suf[i]$$$: the minimum number of segments needed to cover the suffix $$$[i \dots n-1]$$$
Now, for each potential starting index $$$i$$$ of the special segment (the one with size $$$\ge M$$$):
First, let $$$next$$$ be the first index such that $$$a[next] \gt a[i] + mid$$$. This is the furthest possible boundary our segment can reach while maintaining cost $$$\le mid$$$.
To ensure the size is at least $$$M$$$, the segment must end at or after index $$$i + M - 1$$$. For convenience, we define:
Also, to ensure we can still form $$$k$$$ components in total, the special segment cannot be too long. We restrict:
If $$$minn \le maxx$$$, then a valid segment starting at $$$i$$$ exists.
We then compute the total number of components:
If this value is $$$\le k$$$, then $$$mid$$$ is valid.
We binary search the minimum such $$$mid$$$.
The complexity is $$$O(n \log n \log A)$$$, where $$$O(n \log n)$$$ comes from sorting and the check function, and $$$O(\log A)$$$ from the binary search.
Thus, the problem reduces to a standard minimize the maximum pattern using binary search and greedy checking.
#include <bits/stdc++.h>
#include <ext/pb_ds/assoc_container.hpp>
#include <ext/pb_ds/tree_policy.hpp>
#include <algorithm>
using namespace std;
using namespace __gnu_pbds;
template <typename T>
using pbds = tree<T, null_type, less<T>, rb_tree_tag, tree_order_statistics_node_update>;
#define int long long
const int mod = 998244353;
#define INF 1e16
/* main code */
bool check(int mid,vector<int>&v,int n,int k,int m){
vector<int>pre(n),suf(n);
int cnt=1;
for(int i=0;i<n;i++){
int next=upper_bound(v.begin(),v.end(),v[i]+mid)-v.begin();
for(int j=i;j<next;j++){
pre[j]=cnt;
}
i=next-1;
cnt++;
}
cnt=1;
for(int i=n-1;i>=0;i--){
int next=lower_bound(v.begin(),v.end(),v[i]-mid)-v.begin();
for(int j=next;j<=i;j++){
suf[j]=cnt;
}
i=next;
cnt++;
}
for(int i=0;i<n;i++){
int next=upper_bound(v.begin(),v.end(),v[i]+mid)-v.begin();
int minn=i+m;
int maxx=min(next,i+n-k+1);
if(minn<=maxx){
int comp=1;
if(i>0)comp+=pre[i-1];
if(maxx<n)comp+=suf[maxx];
if(comp<=k)return true;
}
}
return false;
}
void solve(int tc){
int n,k,m;
cin>>n>>k>>m;
vector<int>v(n);
for(int i=0;i<n;i++){
cin>>v[i];
}
sort(v.begin(),v.end());
int l=0,h=1e18;
int ans=-1;
while(l<=h){
int mid=(l+h)/2;
if(check(mid,v,n,k,m)){
ans=mid;
h=mid-1;
}
else{
l=mid+1;
}
}
cout<<ans;
}
int32_t main()
{
ios_base::sync_with_stdio(false);
cin.tie(0);
cout.tie(0);
// freopen("intputf.in", "r", stdin);
// freopen("test1.in", "w", stdout);
int t=1;
cin>>t;
for (int i = 1; i <= t; i++)
{
solve(i);
cout<<endl;
}
return 0;
}
H. Grid Game
Author: sushmit2022
Can you assume that all extra moves (special power) are applied at the end?
After $$$k$$$ steps, compute your position from the repeated string. When can the remaining Manhattan distance be covered using at most $$$k$$$ extra moves?
We are given a movement string $$$S$$$ and can repeat it any number of times. Additionally, after each step, we may move one extra unit in any direction.
The goal is to find the minimum number of steps such that we can reach $$$(x,y,z)$$$.
First, observe that after one full traversal of the string $$$S$$$, we have a fixed displacement:
where:
If we repeat the string $$$k$$$ times, the displacement becomes:
Additionally, we may take a prefix of the string.
Now suppose we take $$$mid$$$ total steps. Then:
- We repeat the string $$$\lfloor mid / n \rfloor$$$ times
- Then take the first $$$mid \bmod n$$$ characters
We can compute the resulting position $$$(currX, currY, currZ)$$$ using prefix sums.
Now comes the key observation:
After $$$mid$$$ steps, we can make at most $$$mid$$$ extra unit moves.
Let the remaining distance to the target be:
We can reach the target if:
Thus, we binary search the minimum $$$mid$$$ such that this condition holds.
Complexity
#include<bits/stdc++.h>
using namespace std;
#define int long long
void solve(int tc){
int n;
cin>>n;
string s;
cin>>s;
int x,y,z;
cin>>x>>y>>z;
// E=0,W=1,N=2,S=3,U=4,D=5
map<char,int>mp={{'E',0},{'W',1},{'N',2},{'S',3},{'U',4},{'D',5}};
vector<vector<int>>dp(n,vector<int>(6));
for(int i=0;i<n;i++){
if(i>0){
dp[i]=dp[i-1];
}
dp[i][mp[s[i]]]+=1;
}
int ans=-1;
int l=0,h=1e16;
int X=dp[n-1][mp['E']]-dp[n-1][mp['W']];
int Y=dp[n-1][mp['N']]-dp[n-1][mp['S']];
int Z=dp[n-1][mp['U']]-dp[n-1][mp['D']];
while(l<=h){
int mid=(l+h)/2ll;
int cnt=mid/n;
int r=mid%n;
int currX=X*cnt;
int currY=Y*cnt;
int currZ=Z*cnt;
if(r){
currX+=dp[r-1][mp['E']]-dp[r-1][mp['W']];
currY+=dp[r-1][mp['N']]-dp[r-1][mp['S']];
currZ+=dp[r-1][mp['U']]-dp[r-1][mp['D']];
}
int remaining_dist=abs(x-currX)+abs(y-currY)+abs(z-currZ);
if(mid>=remaining_dist){
ans=mid;
h=mid-1;
}
else{
l=mid+1;
}
}
cout<<ans<<endl;
}
int32_t main(){
int t=0;
cin>>t;
for(int i=1;i<=t;i++){
solve(i);
}
return 0;
}
I. Painting Party
Author: aditya1977
Key Observation
There are two major bottlenecks in this problem: the rotations and the overlapping paint operations.
- Handling Rotations: A 180° rotation is simply reversing the array. Instead of actually reversing the array, we can just keep track of a boolean flag indicating if the fence is currently flipped. If it is flipped, a paint query on the range $$$[a, b]$$$ mathematically maps to the absolute original coordinates $$$[L - b + 1, L - a + 1]$$$.
- Handling Overwrites (Reverse Processing): Later paint operations overwrite earlier ones. If we process the queries in reverse order (from the last query to the first), the first time we color an unpainted plank, that color is guaranteed to be its final color.
Solution
1. Forward Pass (Coordinate Resolution) We read all queries from start to finish. If we see a rotate operation, we toggle $$$is_flipped$$$. If we see a paint operation on $$$[a, b]$$$, we check $$$is_flipped$$$. If true, we map the range to its absolute coordinates:
We store these absolute coordinates so all queries share the same frame of reference.
2. Reverse Pass (Painting via DSU) We iterate through the stored queries from the last one down to the first. For a paint query on range $$$[l, r]$$$ with color $$$c$$$: We find the first unpainted plank using $$$pos = \text{find_set}(l)$$$. While $$$pos \leq r$$$: - We color the plank at $$$pos$$$ with $$$c$$$. - We update its DSU parent to point to the next plank: $$$parent[pos] = pos + 1$$$. - We find the next available unpainted plank: $$$pos = \text{find_set}(pos)$$$.
3. Final Output After processing all queries, the array contains the final colors in the original orientation. If the final state of $$$is_flipped$$$ is true, the fence ends up reversed, so we must print the array backwards. Otherwise, we print it forwards.
Complexity Analysis
Simulating the painting and rotating operations directly would take $$$O(L \cdot Q)$$$ time, which will result in a Time Limit Exceeded (TLE) error. By using Disjoint Set Union (DSU) to jump over already painted planks and resolving rotations mathematically, we optimize the solution to:
where $$$\alpha(L)$$$ is the inverse Ackermann function, effectively making the time complexity near-linear.
#include <bits/stdc++.h>
using namespace std;
const int MAX = 1e5 + 5;
int parent[MAX], final_colors[MAX];
int L, Q;
struct Query {
int type;
int a, b, c;
};
int find_set(int x) {
if (parent[x] == x) return x;
return parent[x] = find_set(parent[x]);
}
int main() {
ios::sync_with_stdio(false);
cin.tie(nullptr);
cin >> L >> Q;
vector<Query> queries(Q);
bool is_flipped = false;
for (int i = 0; i < Q; i++) {
cin >> queries[i].type;
if (queries[i].type == 1) {
int a, b, c;
cin >> a >> b >> c;
if (!is_flipped) {
queries[i].a = a;
queries[i].b = b;
} else {
queries[i].a = L - b + 1;
queries[i].b = L - a + 1;
}
queries[i].c = c;
} else {
is_flipped = !is_flipped;
}
}
for (int i = 1; i <= L + 1; i++) parent[i] = i;
for (int i = Q - 1; i >= 0; i--) {
if (queries[i].type == 2) {
continue;
}
int l = queries[i].a;
int r = queries[i].b;
int c = queries[i].c;
for (int pos = find_set(l); pos <= r; pos = find_set(pos)) {
final_colors[pos] = c;
parent[pos] = pos + 1;
}
}
if (!is_flipped) {
for (int i = 1; i <= L; i++) {
cout << final_colors[i] << (i == L ? "" : " ");
}
} else {
for (int i = L; i >= 1; i--) {
cout << final_colors[i] << (i == 1 ? "" : " ");
}
}
cout << "\n";
return 0;
}
J. The Sasuke Recovery Mission
Author: uday51
Since the paths strictly go up to the LCA, can we treat the queries offline during a single tree traversal?
The problem requires us to find the closest node with value $$$Z$$$ on the paths moving strictly upwards from $$$X$$$ to $$$LCA(X, Y)$$$ and from $$$Y$$$ to $$$LCA(X, Y)$$$. We can solve this optimally using Offline Queries paired with Binary Lifting.
Implementation Steps
Precompute the depths of all nodes and build a binary lifting table to quickly find the $$$LCA(X, Y)$$$ for any query. The highest valid point (minimum depth) either shinobi can reach is $$$\text{depth}[LCA]$$$.
Offline Queries: Instead of traversing the tree for every query, break each query into two independent tasks. For a query $$$(X, Y, Z)$$$, attach one task to node $$$X$$$ and another to node $$$Y$$$. Both tasks are searching for value $$$Z$$$ and cannot go higher than $$$\text{depth}[LCA]$$$.
DFS with State Reversion: Run a single Depth-First Search starting from the root (node 1). Maintain a $$$\text{last_seen}$$$ array that tracks the deepest level at which each chakra signature has appeared on the current path from the root.
Processing: When arriving at node $$$u$$$, temporarily update $$$\text{last_seen}[A_u]$$$ to $$$\text{depth}[u]$$$. Then, check all tasks attached to $$$u$$$. If $$$\text{last_seen}[Z] \ge \text{depth}[LCA]$$$, the chakra signature $$$Z$$$ exists on their valid path, and the distance traveled is simply $$$\text{depth}[u] - \text{last_seen}[Z]$$$. Revert the state of $$$\text{last_seen}$$$ when backtracking up the tree.
#include <bits/stdc++.h>
using namespace std;
const int N_MAX = 100005;
int up[N_MAX][20];
int lvl[N_MAX];
int a[N_MAX];
vector<int> adj[N_MAX];
struct Query {
int id, target_z, limit, is_y;
};
vector<Query> tasks[N_MAX];
int last_seen[N_MAX];
int ans_x[N_MAX], ans_y[N_MAX];
void dfs_lca(int u, int p) {
up[u][0] = p;
for (int i = 1; i < 20; i++) {
up[u][i] = up[up[u][i-1]][i-1];
}
for (int v : adj[u]) {
if (v != p) {
lvl[v] = lvl[u] + 1;
dfs_lca(v, u);
}
}
}
int get_lca(int u, int v) {
if (lvl[u] < lvl[v]) swap(u, v);
int diff = lvl[u] - lvl[v];
for (int i = 19; i >= 0; i--) {
if ((diff >> i) & 1) u = up[u][i];
}
if (u == v) return u;
for (int i = 19; i >= 0; i--) {
if (up[u][i] != up[v][i]) {
u = up[u][i];
v = up[v][i];
}
}
return up[u][0];
}
void dfs_solve(int u, int p) {
int prev_depth = last_seen[a[u]];
last_seen[a[u]] = lvl[u];
for (auto q : tasks[u]) {
if (last_seen[q.target_z] >= q.limit) {
int dist = lvl[u] - last_seen[q.target_z];
if (q.is_y) ans_y[q.id] = dist;
else ans_x[q.id] = dist;
}
}
for (int v : adj[u]) {
if (v != p) dfs_solve(v, u);
}
last_seen[a[u]] = prev_depth;
}
int main() {
ios_base::sync_with_stdio(false);
cin.tie(NULL);
int n;
if (!(cin >> n)) return 0;
for (int i = 1; i <= n; i++) cin >> a[i];
for (int i = 0; i < n - 1; i++) {
int u, v;
cin >> u >> v;
adj[u].push_back(v);
adj[v].push_back(u);
}
lvl[1] = 0;
dfs_lca(1, 1);
int q;
cin >> q;
for (int i = 0; i < q; i++) {
int x, y, z;
cin >> x >> y >> z;
int lca = get_lca(x, y);
ans_x[i] = 1e9;
ans_y[i] = 1e9;
tasks[x].push_back({i, z, lvl[lca], 0});
tasks[y].push_back({i, z, lvl[lca], 1});
}
for(int i = 0; i <= n; i++) last_seen[i] = -1;
dfs_solve(1, 1);
for (int i = 0; i < q; i++) {
int res = min(ans_x[i], ans_y[i]);
if (res == 1e9) cout << -1 << "\n";
else cout << res << "\n";
}
return 0;
}
- Time Complexity: $$$O((N + Q) \log N)$$$. Building the binary lifting table takes $$$O(N \log N)$$$. Processing $$$Q$$$ queries to compute the LCA for each takes $$$O(Q \log N)$$$. The final offline DFS visits every node and processes exactly $$$2Q$$$ tasks, running in $$$O(N + Q)$$$ time.
- Space Complexity: $$$O(N \log N + Q)$$$ for the \texttt{up} table and to store the offline query tasks.








