In this blog, I will share an approach that achieves fewer expected queries than the official editorial.
The standard solution uses exactly 33 queries, but we can do better in most cases.
The editorial performs three independent binary searches.
Each binary search takes at most 11 queries, totaling 33.
To optimize this, we can randomly shuffle all indices at the beginning.
By shuffling, the target positions are uniformly distributed, which often reduces the search range for the second and third binary searches.
Here is the probability distribution of total queries after randomization.
| Queries | Counts | Probabllity |
|---|---|---|
| <= 29 | 46891456 | 3.5% |
| >= 30 | 1286441544 | 96.5% |
| >= 31 | 1212227208 | 90.9% |
| >= 32 | 1027092744 | 77% |
| == 33 | 643170824 | 48.2% |
(There are a total of 1333333000 possible combinations)
As shown in the table, the probability of hitting the 33-query limit is only about 48.2%.
The expected number of queries is approximately 32.1064 .
You can verify it with the following code:
#include<bits/stdc++.h>
#pragma GCC optimize(2)
#define int long long
using namespace std;
int cnt , mx30 , mx31 , mx32 , mx33 , cntt;
double avg;
signed main()
{
ios::sync_with_stdio(0);
ios_base::sync_with_stdio(0);
cin.tie(0);
cout.tie(0);
for(int i = 1 ; i <= 1999 ; i++)
{
for(int j = i + 1 ; j <= 2000 ; j++)
{
for(int k = j + 1 ; k <= 2001 ; k++)
{
cnt = ceil(log2(j)) + ceil(log2(k)) + ceil(log2(2001));
if(cnt >= 30)
{
mx30++;
}
if(cnt >= 31)
{
mx31++;
}
if(cnt >= 32)
{
mx32++;
}
if(cnt >= 33)
{
mx33++;
}
avg += cnt;
cntt++;
}
}
}
cout << avg / cntt << ' ' << cntt << '\n';
cout << mx30 << ' ' << mx31 << ' ' << mx32 << ' ' << mx33;
return 0;
}
Below is the optimized implementation.
#include<bits/stdc++.h>
#pragma GCC optimize(2)
#define int long long
using namespace std;
int t , n , x , l , r , w[2005] , b , c , mid , ans , anss , ansss;
inline bool chk(int x , int y)
{
if(x % 2 == y % 2)
{
return 0;
}
return 1;
}
signed main()
{
ios::sync_with_stdio(0);
ios_base::sync_with_stdio(0);
cin.tie(0);
cout.tie(0);
srand(time(0));
cin >> t;
while(t--)
{
cin >> n;
for(int i = 1 ; i <= n * 2 + 1 ; i++)
{
w[i] = i;
}
random_shuffle(w + 1 , w + n * 2 + 2);
l = 1;
r = n * 2 + 1;
while(l <= r)
{
mid = (l + r) / 2;
cout << "? " << mid;
for(int i = 1 ; i <= mid ; i++)
{
cout << ' ' << w[i];
}
cout << endl;
cin >> x;
if(chk(x , mid))
{
ans = mid;
r = mid - 1;
}
else
{
l = mid + 1;
}
}
c = ans;
ans = w[ans];
l = 1;
r = c - 1;
while(l <= r)
{
mid = (l + r) / 2;
cout << "? " << mid + 1;
for(int i = 1 ; i <= mid ; i++)
{
cout << ' ' << w[i];
}
cout << ' ' << ans << endl;
cin >> x;
if(chk(x , mid + 1))
{
anss = mid;
r = mid - 1;
}
else
{
l = mid + 1;
}
}
b = anss;
anss = w[anss];
l = 1;
r = b - 1;
while(l <= r)
{
mid = (l + r) / 2;
cout << "? " << mid + 2;
for(int i = 1 ; i <= mid ; i++)
{
cout << ' ' << w[i];
}
cout << ' ' << ans << ' ' << anss << endl;
cin >> x;
if(chk(x , mid + 2))
{
ansss = mid;
r = mid - 1;
}
else
{
l = mid + 1;
}
}
ansss = w[ansss];
cout << "! " << ans << ' ' << anss << ' ' << ansss << endl;
}
return 0;
}
(Note: For a more robust randomization, shuffle() with mt19937 is recommended over random_shuffle().)
As you can see, the optimization is so massive that you can barely notice it. But in a world of 33-query limits, 0.9 is a luxury I'm willing to flex.
Thanks for reading!









In this blog of more than 4000 bytes, codes takes up more than a half XD.