Algorithmic Contest Analysis: Range Queries, Grid Optimization, and Combinatorics
Contest Overview
Misreading the first problem caused a 20-minute delay. The second problem presented a psychological barrier despite being solvable for partial points. Skipping the third problem's statement cost easy points, while the fourth problem failed due to inefficient modular inverse preprocessing—using fast exponentiation in a loop instead of linear backward computation.
Problem 1: Haybale Feast
To find the minimum maximum spiciness in a subarray where the sum of flavors meets a threshold, we can combine two techniques. A Sparse Table efficiently handles Range Maximum Queries for the spiciness values. Coupling this with a two-pointer approach allows sliding the window to find the contiguous segment satisfying the flavor sum constraint in O(n) time.
#include <iostream>
#include <vector>
#include <cmath>
#include <algorithm>
using namespace std;
using ll = long long;
int main() {
int n; ll m;
cin >> n >> m;
vector<ll> flavor(n + 1), spice(n + 1);
vector<vector<ll>> sparseTable(n + 1, vector<ll>(20));
for (int i = 1; i <= n; i++) {
cin >> flavor[i] >> spice[i];
sparseTable[i][0] = spice[i];
}
int maxLog = log2(n) + 1;
for (int j = 1; j < maxLog; j++)
for (int i = 1; i + (1 << j) - 1 <= n; i++)
sparseTable[i][j] = max(sparseTable[i][j - 1], sparseTable[i + (1 << (j - 1))][j - 1]);
auto queryMax = [&](int l, int r) {
int k = log2(r - l + 1);
return max(sparseTable[l][k], sparseTable[r - (1 << k) + 1][k]);
};
vector<ll> minSpice(n + 1, 0);
ll currentSum = 0;
int left = 1;
for (int right = 1; right <= n; right++) {
currentSum += flavor[right];
while (currentSum >= m) {
minSpice[left] = queryMax(left, right);
currentSum -= flavor[left];
left++;
}
}
ll ans = 1e18;
for (int i = 1; i <= n; i++) if (minSpice[i]) ans = min(ans, minSpice[i]);
cout << ans << endl;
return 0;
}Problem 2: Grid Maximization
When placing a point on an N x M grid to maximize Manhattan distance to K existing points, corners are intuitively optimal. If corners are occupied, the best alternative points are directly adjacent to occupied cells. To calculate distances efficiently, sort the coordinates of the K points. Prefix sums for the sorted axes allow computing the total Manhattan distance for any query point in O(log K) using binary search.
#include <iostream>
#include <vector>
#include <set>
#include <algorithm>
using namespace std;
using ll = long long;
int N, M, K;
vector<ll> coordX, coordY, prefX, prefY;
set<pair<int, int>> occupied;
ll computeDist(int px, int py) {
if (px < 1 || px > N || py < 1 || py > M || occupied.count({px, py})) return -1;
auto itX = lower_bound(coordX.begin() + 1, coordX.end(), px) - coordX.begin();
auto itY = lower_bound(coordY.begin() + 1, coordY.end(), py) - coordY.begin();
ll distX = (itX - 1) * px - prefX[itX - 1] + (prefX[K] - prefX[itX - 1]) - (K - itX + 1) * px;
ll distY = (itY - 1) * py - prefY[itY - 1] + (prefY[K] - prefY[itY - 1]) - (K - itY + 1) * py;
return distX + distY;
}
int main() {
cin >> N >> M >> K;
coordX.resize(K + 1); coordY.resize(K + 1);
prefX.resize(K + 1, 0); prefY.resize(K + 1, 0);
vector<pair<int, int>> points(K);
for (int i = 1; i <= K; i++) {
cin >> coordX[i] >> coordY[i];
points[i - 1] = {coordX[i], coordY[i]};
occupied.insert({coordX[i], coordY[i]});
}
sort(coordX.begin() + 1, coordX.end());
sort(coordY.begin() + 1, coordY.end());
for (int i = 1; i <= K; i++) {
prefX[i] = prefX[i - 1] + coordX[i];
prefY[i] = prefY[i - 1] + coordY[i];
}
ll maxDist = max({computeDist(1, 1), computeDist(1, M), computeDist(N, 1), computeDist(N, M)});
for (auto& p : points) {
int cx = p.first, cy = p.second;
maxDist = max({maxDist, computeDist(cx, 1), computeDist(cx, M), computeDist(1, cy), computeDist(N, cy),
computeDist(cx - 1, cy), computeDist(cx + 1, cy), computeDist(cx, cy - 1), computeDist(cx, cy + 1)});
}
cout << maxDist << endl;
return 0;
}Problem 3: Binary Search Tree Depths
Given a permutation inserted into a BST, calculating the total number of recursive insert calls is equivalent to finding the sum of node depths. A node's depth is determined by its predecessor and successor in the BST. Specifically, the depth of a node is one plus the maximum depth of its closest smaller and closest larger ancestors. This relationship can be resolved using a monotonic stack to track nearest smaller elements to the left and nearest larger elements to the right.
#include <iostream>
#include <vector>
using namespace std;
using ll = long long;
int main() {
int n;
cin >> n;
vector<int> sequence(n + 1), pos(n + 1), depth(n + 1, 0), leftBound(n + 1), rightBound(n + 1);
for (int i = 1; i <= n; i++) {
cin >> sequence[i];
pos[sequence[i]] = i;
}
vector<int> stk; stk.push_back(0);
for (int i = 1; i <= n; i++) {
while (stk.back() != 0 && pos[stk.back()] > pos[i]) stk.pop_back();
leftBound[i] = stk.back();
stk.push_back(i);
}
stk.clear(); stk.push_back(0);
for (int i = n; i >= 1; i--) {
while (stk.back() != 0 && pos[stk.back()] > pos[i]) stk.pop_back();
rightBound[i] = stk.back();
stk.push_back(i);
}
ll totalOps = 0;
cout << 0 << "\n";
for (int i = 2; i <= n; i++) {
depth[i] = max(depth[leftBound[i]], depth[rightBound[i]]) + 1;
totalOps += depth[i];
cout << totalOps << "\n";
}
return 0;
}Problem 4: Modular Combinatorics Optimization
When evaluating combinatorial formulas under a modulus, precomputing factorials and inverse factorials is standard. However, computing the modular inverse for every factorial using fast exponentiation leads to Time Limit Exceeded due to the O(log MOD) cost per element. The correct approach calculates the inverse factorial of the largest value using Fermat's Little Theorem once, then iterates backwards to derive all preceding inverse factorials in O(1) per step.
#include <iostream>
#include <vector>
using namespace std;
using ll = long long;
const ll MOD = 1e9 + 7;
const int MAXN = 2e7 + 5;
vector<ll> fact(MAXN), invFact(MAXN);
ll modPow(ll base, ll exp) {
ll res = 1;
base %= MOD;
while (exp > 0) {
if (exp % 2 == 1) res = (res * base) % MOD;
base = (base * base) % MOD;
exp /= 2;
}
return res;
}
void precompute() {
fact[0] = 1;
for (int i = 1; i < MAXN; i++) fact[i] = (fact[i - 1] * i) % MOD;
invFact[MAXN - 1] = modPow(fact[MAXN - 1], MOD - 2);
for (int i = MAXN - 2; i >= 0; i--) invFact[i] = (invFact[i + 1] * (i + 1)) % MOD;
}
ll combinations(int n, int r) {
if (r > n) return 0;
return (((fact[n] * invFact[r]) % MOD) * invFact[n - r]) % MOD;
}
int main() {
ios::sync_with_stdio(false);
cin.tie(nullptr);
precompute();
int t;
cin >> t;
while (t--) {
int n, k;
cin >> n >> k;
ll halfK = (k + 1) / 2;
ll res = (2 * combinations(n + halfK - 1, n) % MOD + combinations(n - 2 + halfK, n - 1) * ((k - 2 * halfK + MOD) % MOD) % MOD) % MOD;
cout << res << "\n";
}
return 0;
}