I am getting MLE for this problem and I can't figure it out. Any help would be appreciated: https://mirror.codeforces.com/problemset/problem/1775/D
Spoiler
#pragma GCC optimize("Ofast,unroll-loops")
#pragma GCC target("avx,avx2,fma")
#include <bits/stdc++.h>
using namespace std;
const int MAX = 3e5 + 2;
const int oo = 3e5;
vector<int> a[MAX * 2];
int b[MAX], dist[MAX * 2], visited[MAX * 2], pre[MAX * 2], n, s, t, cnt;
vector<int> res;
void BFS(int u) {
for (int i = 1; i <= oo * 2; i++) {
dist[i] = -1;
pre[i] = -1;
}
dist[u] = 0;
queue<int> q;
q.push(u);
visited[u] = 1;
while (!q.empty()) {
int x = q.front();
q.pop();
for (auto i : a[x]) {
if (visited[i] == 0) {
visited[i] = 1;
pre[i] = x;
dist[i] = dist[x] + 1;
q.push(i);
}
}
}
}
void solve() {
cin >> n;
for (int i = 1; i <= n; i++) {
cin >> b[i];
for (int j = 2; j <= int(sqrt(b[i])); j++) {
if (b[i] % j == 0) {
a[j + oo].push_back(i);
a[i].push_back(j + oo);
if (j * j != b[i]) {
a[b[i] / j + oo].push_back(i);
a[i].push_back(b[i] / j + oo);
}
}
}
if (b[i] != 1) {
a[b[i] + oo].push_back(i);
a[i].push_back(b[i] + oo);
}
}
cin >> s >> t;
BFS(s);
if (visited[t] == 0) {
cout << -1;
return;
}
/*for (int i = 1; i <= n; i++) cout << pre[i] << " ";
cout << "\n";*/
/*for (int i = int(3e5) + 1; i <= int(3e5) + 20; i++) {
for (auto j : a[i]) cout << j << " ";
cout << "\n";
}*/
int k = t;
cnt = 1;
res.push_back(k);
while (pre[k] != -1) {
k = pre[k];
cnt++;
if (cnt % 2 == 1) res.push_back(k);
}
cout << res.size() << "\n";
reverse(res.begin(), res.end());
for (int i = 0; i < res.size(); i++) cout << res[i] << " ";
}
int main() {
ios_base::sync_with_stdio(false);
cin.tie(0); cout.tie(0);
solve();
}
I guess that your code is creating a graph with too many edges. I think in your code, the number of edges from node i is roughly equal to the number of divisors that b[i] has. In worst case scenario, b[i] has around 240 divisors, so we have 240 edges with each of 6*10^5 vertices * 4 bytes per int value = about 576MB, which is twice the memory limit