Fading Coder

One Final Commit for the Last Sprint

Home > Tech > Content

Inverted Dynamic Programming with Time Blocking for Graph Updates

Tech May 8 3

Consider processing graph updates in reverse. The state for each node represents the minimum cost required to become 'safe'. Each update modifies only a single node b_i, requiring an examination of its neighbors. A square root decomposition approach based on vertex degree allows for efficient handling of high-degree nodes.

We can partition nodes into heavy (degree > √m) and light sets. For heavy nodes, we maintain a sorted structure of current DP values from neighbors. However, this method incurs a logarithmic factor, which we aim to eliminate. Using time blocking—processing queries in batches of size B—enables a O(q√m) solution by precomputing for each node the top (B+1) neighbor DP values.

Within a time block, at most B nodes are updated. Thus, when evaluating a node, the optimal value must be among these precomputed candidates, ensuring correctness without sorting.

#include <bits/stdc++.h>
using namespace std;
using ll = long long;
const int MAXN = 100005;
const ll INF = 1e18;
const int MOD = 998244353;
vector<pair<int, int>> adj[MAXN];
ll dp[MAXN];
int val[MAXN], upd[MAXN];
int n, m, q;

int main() {
    ios::sync_with_stdio(false);
    cin.tie(nullptr);
    cin >> n >> m >> q;
    for (int i = 1; i <= n; ++i) cin >> val[i];
    for (int i = 0; i < m; ++i) {
        int u, v, w;
        cin >> u >> v >> w;
        adj[u].emplace_back(v, w);
        adj[v].emplace_back(u, w);
    }
    for (int i = 1; i <= q; ++i) cin >> upd[i];
    fill(dp + 1, dp + n + 1, INF);
    int block = ceil(sqrt(m));
    for (int r = q; r > 0; r -= block) {
        for (int u = 1; u <= n; ++u) {
            if (adj[u].size() > block) {
                nth_element(adj[u].begin(), adj[u].begin() + block, adj[u].end(),
                    [](auto &a, auto &b) { return dp[a.first] + a.second < dp[b.first] + b.second; });
            }
        }
        for (int i = r; i > max(0, r - block); --i) {
            int u = upd[i];
            dp[u] = INF;
            int cnt = 0;
            for (auto &[v, w] : adj[u]) {
                dp[u] = min(dp[u], dp[v] + w);
                if (++cnt > block) break;
            }
        }
    }
    int ans = 0;
    for (int i = 1; i <= n; ++i) ans = (ans + (dp[i] % MOD) * val[i]) % MOD;
    cout << ans << '\n';
    return 0;
}

For interactive graph reconstruction, assign each edge a bitmask mask_e indicating the experiments in which it appears. For an edge, its connectivity state is its mask; for a non-edge, the state is the intersection of masks along any path. By ensuring each mask has Hamming weight limit/2, the intersection of any two masks has size less than limit/2. The tree can be reconstructed by peeling leaves: a leaf node appears exactly limit/2 times in its own component across experiments.

Complexities: naive O(n²·limit) is feasible; optimization using leaf properties reduces it to O(n·limit²).

#include "tree.h"
#include <bits/stdc++.h>
#include <ext/pb_ds/assoc_container.hpp>
using namespace std;
using vi = vector<int>;
using vvi = vector<vi>;
using __gnu_pbds::gp_hash_table;

vector<pair<int, int>> solve(int n) {
    int m = (n <= 2000) ? 7 : 5;
    int total = 2 * m;
    int cur = (1 << m) - 1;
    vi edge_mask(n - 1);
    for (int i = 0; i < n - 1; ++i) {
        edge_mask[i] = cur;
        int low = cur & -cur;
        int nxt = cur + low;
        cur = ((nxt ^ cur) >> (__builtin_ctz(low) + 2)) | nxt;
    }
    vvi comp(total, vi(n));
    vector<vvi> comp_list(total);
    vi comp_id(total), comp_sz(total), leaf_candidate(total, 0);
    vi deg(n, 0), order, pos(n);
    int tail = 0;
    for (int exp = 0; exp < total; ++exp) {
        vi query_vec(n - 1);
        for (int i = 0; i < n - 1; ++i) query_vec[i] = (edge_mask[i] >> exp) & 1;
        comp_list[exp] = query(query_vec);
        comp_sz[exp].resize(comp_list[exp].size());
        comp_id[exp].resize(comp_list[exp].size(), 0);
        for (size_t idx = 0; idx < comp_list[exp].size(); ++idx) {
            auto &c = comp_list[exp][idx];
            comp_sz[exp][idx] = c.size();
            for (int v : c) {
                comp[exp][v] = idx;
                comp_id[exp][idx] ^= v;
            }
            if (comp_sz[exp][idx] == 1) {
                int node = comp_id[exp][idx];
                if (++deg[node] == m) order.push_back(node);
            }
        }
    }
    for (int it = 0; it < order.size(); ++it) {
        int u = order[it];
        for (int exp = 0; exp < total; ++exp) {
            int cid = comp[exp][u];
            comp_id[exp][cid] ^= u;
            if (--comp_sz[exp][cid] == 1) {
                int cand = comp_id[exp][cid];
                if (++deg[cand] == m) order.push_back(cand);
            }
        }
        pos[u] = it;
    }
    vector<pair<int, int>> edges;
    gp_hash_table<int, gp_hash_table<int, bool>> seen;
    auto valid_edge = [&](int u, int v) -> bool {
        if (seen.find(u) != seen.end() && seen[u].find(v) != seen[u].end()) return false;
        seen[u][v] = true;
        int cnt = 0;
        for (int exp = 0; exp < total; ++exp) cnt += (comp[exp][u] != comp[exp][v]);
        return cnt == m;
    };
    for (int exp = 0; exp < total; ++exp) {
        for (auto &comp : comp_list[exp]) {
            int root = *max_element(comp.begin(), comp.end(), [&](int x, int y) { return pos[x] < pos[y]; });
            for (int v : comp) if (v != root && valid_edge(v, root)) edges.emplace_back(v, root);
        }
    }
    return edges;
}

A problem about splitting equivalence classes on a tree: an equivalence class contains edges whose coverage by non-tree edges is identical. Splitting occurs when a class has elements both inside and outside a query interval. By tracking the minimum predecessor and maximum successor within the interval, we can identify which classes to split.

This is managed online with balanced trees for set operations, achieving O(n log n) per query.

#include <bits/stdc++.h>
using namespace std;
mt19937_64 rng(chrono::steady_clock::now().time_since_epoch().count());
using ll = long long;
struct Node {
    int key, size;
    unsigned long priority;
    Node *left, *right, *parent;
    Node(int k) : key(k), size(1), priority(rng()), left(nullptr), right(nullptr), parent(nullptr) {}
};
int getSize(Node *x) { return x ? x->size : 0; }
void update(Node *x) {
    if (!x) return;
    x->size = 1 + getSize(x->left) + getSize(x->right);
    if (x->left) x->left->parent = x;
    if (x->right) x->right->parent = x;
}
Node* merge(Node *a, Node *b) {
    if (!a || !b) return a ? a : b;
    if (a->priority < b->priority) {
        a->right = merge(a->right, b);
        update(a);
        return a;
    } else {
        b->left = merge(a, b->left);
        update(b);
        return b;
    }
}
void split(Node *root, int k, Node *&a, Node *&b) {
    if (!root) { a = b = nullptr; return; }
    if (root->key <= k) {
        a = root;
        split(root->right, k, a->right, b);
        update(a);
    } else {
        b = root;
        split(root->left, k, a, b->left);
        update(b);
    }
}
Node* findRoot(Node *x) {
    while (x && x->parent) x = x->parent;
    return x;
}
const int MAXN = 250005;
int N, M;
int prevIdx[MAXN], nextIdx[MAXN];
int compID[MAXN], edgeCnt[MAXN];
Node* sets[MAXN];
int find(int x) { return compID[x] == x ? x : compID[x] = find(compID[x]); }
void solveCase() {
    cin >> N >> M;
    if (N == 1) { while (M--) { int l, r; cin >> l >> r; cout << "0\n"; } return; }
    for (int i = 1; i < N; ++i) {
        prevIdx[i] = i - 1;
        nextIdx[i] = i + 1;
        compID[i] = i;
        edgeCnt[i] = 0;
        sets[i] = new Node(i);
    }
    prevIdx[1] = 1; nextIdx[N-1] = N-1;
    ll totalPairs = (ll)(N-2) * (N-1) / 2;
    int compCnt = N-1, activeCnt = 0;
    for (int q = 1; q <= M; ++q) {
        int L, R; cin >> L >> R; if (L > R) swap(L, R); --R;
        if (L <= R) {
            for (int cur = find(L); cur <= R; cur = find(cur+1)) {
                if (edgeCnt[cur] == 0) { --compCnt; ++activeCnt; }
                else if (edgeCnt[cur] == 1) { --activeCnt; compID[cur] = cur+1; }
                ++edgeCnt[cur];
            }
            vector<pair<int, Node*>> toSplit;
            for (int i = L; i <= R; i = find(i+1)) {
                Node *root = findRoot(sets[i]);
                if (root && (root->key < L || root->key > R)) {
                    toSplit.emplace_back(i, root);
                }
            }
            for (auto &[idx, root] : toSplit) {
                totalPairs -= (ll)root->size * (root->size - 1) / 2;
                Node *left, *mid, *right;
                split(root, L-1, left, mid);
                split(mid, R, mid, right);
                if (left) left->parent = nullptr;
                if (mid) mid->parent = nullptr;
                if (right) right->parent = nullptr;
                if (mid) {
                    int l1 = prevIdx[idx];
                    int r1 = nextIdx[prevIdx[R]];
                    prevIdx[idx] = idx;
                    nextIdx[r1] = r1;
                    nextIdx[l1] = r1;
                    prevIdx[r1] = l1;
                    Node *newRoot = merge(left, right);
                    totalPairs += (ll)getSize(newRoot) * (getSize(newRoot)-1) / 2;
                    totalPairs += (ll)getSize(mid) * (getSize(mid)-1) / 2;
                } else {
                    Node *newRoot = merge(left, right);
                    totalPairs += (ll)getSize(newRoot) * (getSize(newRoot)-1) / 2;
                }
            }
        }
        cout << totalPairs + (ll)compCnt * (q + N - 1 - compCnt) + activeCnt << '\n';
    }
}
int main() {
    ios::sync_with_stdio(false);
    cin.tie(nullptr);
    int T; cin >> T;
    while (T--) solveCase();
    return 0;
}

Given constraints on local medians, a sequence must be zig-zag and its odd/even subsequences must be unimodal. This leads to a DP on a transformed sequence.

Construct a new sequence by concatenating reversed odd positions with even positions. This creates a bitonic sequence. After rotating to place the maximum at the front, it becomes unimodal. Then use interval DP with additional conditions to ensure the zig-zag property, counting valid permutations.

#include <bits/stdc++.h>
using namespace std;
const int MOD = 1e9+7;
inline void add(int &x, int v) { if ((x += v) >= MOD) x -= MOD; }
int main() {
    int T; cin >> T;
    while (T--) {
        int n; cin >> n;
        vector<int> p(n+1), pos(n+1);
        for (int i = 1; i <= n; ++i) cin >> p[i];
        vector<int> seq;
        for (int i = 1; i <= n; i += 2) seq.push_back(p[i]);
        int split = seq.size();
        for (int i = n - (n%2); i >= 1; i -= 2) seq.push_back(p[i]);
        for (int i = 1; i <= n; ++i) pos[seq[i-1]] = i;
        int ans = 0;
        for (int par = 0; par < 2; ++par) {
            vector<vector<int>> dp(2*n, vector<int>(2*n, 0));
            for (int i = 0; i < 2*n; ++i) {
                int val = seq[i % n];
                if (val == -1 || val == 1) dp[i][i] = ((i % n < split) ^ par) ? 1 : 0;
                else dp[i][i] = 0;
            }
            for (int len = 2; len <= n; ++len) {
                for (int l = 0, r = len-1; r < 2*n; ++l, ++r) {
                    dp[l][r] = 0;
                    int leftVal = seq[l % n];
                    if ((leftVal == -1 || leftVal == len) && ((l % n < split) ^ par)) {
                        add(dp[l][r], dp[l+1][r]);
                        if (len == n && ((l % n < split) ^ par)) add(ans, dp[l+1][r]);
                    }
                    int rightVal = seq[r % n];
                    if ((rightVal == -1 || rightVal == len) && ((r % n < split) ^ par)) {
                        add(dp[l][r], dp[l][r-1]);
                    }
                }
            }
        }
        cout << ans << '\n';
    }
    return 0;
}

Placing a tree on a plane by recursively partitioning subtrees using angular sorting ensures non‑intersecting convex hulls. For ternary trees, a divide‑and‑conquer along a weighted centroid chain using global balanced binary trees reduces complexity to O(n log n).

The process fixes the root and a leaf on the convex hull, then recursively partitions remaining points.

#include <bits/stdc++.h>
using namespace std;
struct Point { int x, y, id; };
vector<Point> pts;
vector<vector<int>> tree;
vector<int> subtreeSize, answer;

void computeSize(int u, int p) {
    subtreeSize[u] = 1;
    for (int v : tree[u]) if (v != p) {
        computeSize(v, u);
        subtreeSize[u] += subtreeSize[v];
    }
}

void assign(int l, int r, int u, int p) {
    int minIdx = l;
    for (int i = l+1; i <= r; ++i)
        if (pts[i].y < pts[minIdx].y) minIdx = i;
    swap(pts[l], pts[minIdx]);
    answer[pts[l].id] = u;
    int pivotX = pts[l].x, pivotY = pts[l].y;
    sort(pts.begin()+l+1, pts.begin()+r+1, [&](const Point &a, const Point &b) {
        return (a.y - pivotY) * (b.x - pivotX) < (a.x - pivotX) * (b.y - pivotY);
    });
    int cur = l+1;
    for (int v : tree[u]) if (v != p) {
        nth_element(pts.begin()+cur, pts.begin()+cur+subtreeSize[v], pts.begin()+r+1, [&](const Point &a, const Point &b) {
            return (a.y - pivotY) * (b.x - pivotX) < (a.x - pivotX) * (b.y - pivotY);
        });
        assign(cur, cur+subtreeSize[v]-1, v, u);
        cur += subtreeSize[v];
    }
}

int main() {
    int n; cin >> n;
    tree.resize(n+1);
    for (int i = 1; i < n; ++i) {
        int u, v; cin >> u >> v;
        tree[u].push_back(v);
        tree[v].push_back(u);
    }
    pts.resize(n);
    for (int i = 0; i < n; ++i) {
        cin >> pts[i].x >> pts[i].y;
        pts[i].id = i+1;
    }
    subtreeSize.resize(n+1);
    answer.resize(n+1);
    computeSize(1, 0);
    assign(0, n-1, 1, 0);
    for (int i = 1; i <= n; ++i) cout << answer[i] << " ";
    cout << '\n';
    return 0;
}

Given an initial grid and patterns, the goal is to match patterns by converting cells to wildcards. Reverse the process: operations replace a matching rectangle with wildcards. Simulate by repeatedly moving required characters into existing wildcard regions.

Complexity is manageable due to small grid size (≤20).

#include <bits/stdc++.h>
using namespace std;
const int MAX = 22, LIM = 400005;
int rows, cols, patterns;
int grid[MAX][MAX], target[MAX][MAX];
struct Pattern { int h, w, cell[MAX][MAX]; } pat[15];
int freq[65];
struct Action { int op, r, c; } ans[LIM];
int ansCnt = 0;
void move(int r1, int c1, int r2, int c2) {
    while (r1 != r2 || c1 != c2) {
        if (r1 > r2) { swap(grid[r1][c1], grid[r1-1][c1]); ans[ansCnt++] = {4, r1, c1}; --r1; }
        else if (r1 < r2) { swap(grid[r1][c1], grid[r1+1][c1]); ans[ansCnt++] = {3, r1, c1}; ++r1; }
        else if (c1 > c2) { swap(grid[r1][c1], grid[r1][c1-1]); ans[ansCnt++] = {2, r1, c1}; --c1; }
        else { swap(grid[r1][c1], grid[r1][c1+1]); ans[ansCnt++] = {1, r1, c1}; ++c1; }
    }
}
void applyPattern(int id) {
    ans[ansCnt++] = {5+id, 1, 1};
    for (int i = 1; i <= pat[id].h; ++i)
        for (int j = 1; j <= pat[id].w; ++j) {
            --freq[grid[i][j]];
            grid[i][j] = 0; // wildcard
            ++freq[0];
        }
}
bool canMatch(int id) {
    bool hasCommon = false;
    int needWild = 0;
    for (int v = 1; v <= 62; ++v) {
        if (pat[id].cell[v] && freq[v]) hasCommon = true;
        if (pat[id].cell[v] > freq[v]) needWild += pat[id].cell[v] - freq[v];
    }
    return (hasCommon || id==0) && needWild <= freq[0];
}
void fillPattern(int id) {
    for (int i = 1; i <= pat[id].h; ++i)
        for (int j = 1; j <= pat[id].w; ++j) {
            int val = pat[id].cell[i][j];
            if (freq[val]) {
                --freq[val];
                for (int r = 1; r <= rows; ++r)
                    for (int c = 1; c <= cols; ++c)
                        if (grid[r][c] == val) { move(r, c, i, j); break; }
            } else {
                --freq[0];
                for (int r = 1; r <= rows; ++r)
                    for (int c = 1; c <= cols; ++c)
                        if (grid[r][c] == 0) { move(r, c, i, j); break; }
            }
        }
}
int main() {
    cin >> rows >> cols >> patterns;
    for (int i = 1; i <= rows; ++i)
        for (int j = 1; j <= cols; ++j) {
            char ch; cin >> ch;
            target[i][j] = isdigit(ch) ? ch-'0'+53 : islower(ch) ? ch-'a'+1 : ch-'A'+27;
            ++freq[target[i][j]];
        }
    for (int id = 1; id <= patterns; ++id) {
        cin >> pat[id].h >> pat[id].w;
        for (int i = 1; i <= pat[id].h; ++i)
            for (int j = 1; j <= pat[id].w; ++j) {
                char ch; cin >> ch;
                pat[id].cell[i][j] = isdigit(ch) ? ch-'0'+53 : islower(ch) ? ch-'a'+1 : ch-'A'+27;
            }
    }
    memcpy(grid, target, sizeof grid);
    while (true) {
        int chosen = -1;
        for (int id = 1; id <= patterns; ++id) if (canMatch(id)) { chosen = id; break; }
        if (chosen == -1) break;
        fillPattern(chosen);
        applyPattern(chosen);
        for (int v = 1; v <= 62; ++v) {
            while (freq[v] && pat[chosen].cell[v]) {
                for (int i = 1; i <= rows; ++i)
                    for (int j = 1; j <= cols; ++j)
                        if (grid[i][j] == v) {
                            for (int r = 1; r <= pat[chosen].h; ++r)
                                for (int c = 1; c <= pat[chosen].w; ++c)
                                    if (pat[chosen].cell[r][c] == v) {
                                        move(i, j, r, c);
                                        applyPattern(chosen);
                                        break;
                                    }
                            break;
                        }
            }
        }
    }
    if (!canMatch(0)) { cout << "-1\n"; return 0; }
    fillPattern(0);
    cout << ansCnt << '\n';
    for (int i = ansCnt-1; i >= 0; --i) cout << ans[i].op << ' ' << ans[i].r << ' ' << ans[i].c << '\n';
    return 0;
}

For local deletion operations on an array, only O(log n) rounds occur. Build a layered structure storing array states after each round. Queries affect only endpoints; recursively propagate through layers.

Precomputing predecessor/successor yields O(n log n).

Given a DAG of moves in a game, Hoof and Brain play. Nodes with no outgoing edges are removed via topological sort. Nodes with a single outgoing edge are merged with their successor, as they become equivalent. After contraction, if both tokens are in the same contracted node, Brain wins; otherwise Hoof can move indefinitely.

Complexity O(m log n) with union‑find and priority queues.

#include <bits/stdc++.h>
#include <ext/pb_ds/assoc_container.hpp>
using namespace std;
using __gnu_pbds::gp_hash_table;
const int MAXN = 100005;
gp_hash_table<int, bool> incoming[MAXN];
int outDeg[MAXN], parent[MAXN], singleEdge[MAXN];
int find(int x) { return parent[x] == x ? x : parent[x] = find(parent[x]); }
struct Compare {
    bool operator()(int a, int b) { return incoming[a].size() > incoming[b].size(); }
};
int main() {
    ios::sync_with_stdio(false); cin.tie(nullptr);
    int n, m; cin >> n >> m;
    for (int i = 0; i < m; ++i) {
        int u, v; cin >> u >> v;
        ++outDeg[u];
        incoming[v][u] = true;
        singleEdge[u] ^= v;
    }
    queue<int> zeroOut;
    for (int i = 1; i <= n; ++i) {
        parent[i] = i;
        if (outDeg[i] == 0) zeroOut.push(i);
    }
    while (!zeroOut.empty()) {
        int u = zeroOut.front(); zeroOut.pop();
        for (auto &[v, _] : incoming[u]) {
            singleEdge[v] ^= u;
            if (--outDeg[v] == 0) zeroOut.push(v);
        }
        incoming[u].clear();
    }
    priority_queue<int, vector<int>, Compare> pq;
    for (int i = 1; i <= n; ++i) if (outDeg[i] == 1) pq.push(i);
    while (!pq.empty()) {
        int u = pq.top(); pq.pop();
        int v = singleEdge[u];
        if (u == v) continue;
        parent[u] = v;
        for (auto &[x, _] : incoming[u]) {
            singleEdge[x] ^= u;
            if (incoming[v].find(x) != incoming[v].end()) {
                if (--outDeg[x] == 1) pq.push(x);
            } else {
                incoming[v][x] = true;
                singleEdge[x] ^= v;
            }
        }
        incoming[u].clear();
    }
    int q; cin >> q;
    while (q--) {
        int a, b; cin >> a >> b;
        if (outDeg[a] == 0 || outDeg[b] == 0 || find(a) == find(b)) cout << 'B';
        else cout << 'H';
    }
    cout << '\n';
    return 0;
}

Given three independent graphs, the maximum‑weight independent set on thier Cartesian product is equivalent to the lexicographically largest independent set because weights differ exponentially. Processing nodes in descending total coordinate sum yields the set of P‑positions in the impartial game defined by directed edges from higher to lower sums. By the Sprague‑Grundy theorem, the SG function of the product is the XOR of the SG functions of each graph.

Since sparse DAGs have bounded SG values (O(√m)), we can compute convolution by iterating over possible SG values.

#include <bits/stdc++.h>
using namespace std;
const int MOD = 998244353, BASE = 716070898;
int main() {
    ios::sync_with_stdio(false); cin.tie(nullptr);
    int n; cin >> n;
    vector<int> powW(n+1); powW[0] = 1;
    for (int i = 1; i <= n; ++i) powW[i] = 1LL * powW[i-1] * BASE % MOD;
    vector<int> sg[3], cnt[3];
    for (int t = 0; t < 3; ++t) {
        int m; cin >> m;
        vector<vector<int>> adj(n+1);
        for (int i = 0; i < m; ++i) {
            int u, v; cin >> u >> v;
            if (u > v) swap(u, v);
            adj[u].push_back(v);
        }
        sg[t].resize(n+1); cnt[t].resize(512);
        vector<bool> seen(512);
        for (int u = n; u >= 1; --u) {
            for (int v : adj[u]) seen[sg[t][v]] = true;
            while (seen[sg[t][u]]) ++sg[t][u];
            for (int v : adj[u]) seen[sg[t][v]] = false;
            cnt[t][sg[t][u]] = (cnt[t][sg[t][u]] + powW[u]) % MOD;
        }
    }
    int ans = 0;
    for (int i = 0; i < 512; ++i)
        for (int j = 0; j < 512; ++j)
            ans = (ans + 1LL * cnt[0][i] * cnt[1][j] % MOD * cnt[2][i^j]) % MOD;
    cout << ans << '\n';
    return 0;
}

Minimizing the range of interval averages can be modeled as a path on a triangular grid. Each layer corresponds to intervals of fixed length. For each layer, only the predecessor and successor of the global average are candidates. Start by always choossing the left candidate, then transition to the right candidate in increasing order of value, maintaining the path and current range using a multiset.

#include <bits/stdc++.h>
using namespace std;
using ll = long long;
using lll = __int128;
int main() {
    ios::sync_with_stdio(false); cin.tie(nullptr);
    int n; cin >> n;
    vector<ll> prefix(n+1);
    for (int i = 1; i <= n; ++i) { int x; cin >> x; prefix[i] = prefix[i-1] + x; }
    vector<int> split(n);
    for (int len = 1; len < n; ++len) {
        int lo = 0, hi = n - len + 1;
        while (lo < hi) {
            int mid = (lo + hi) / 2;
            if ((lll)(prefix[mid+len] - prefix[mid]) * n <= (lll)prefix[n] * len) lo = mid + 1;
            else hi = mid;
        }
        split[len] = lo;
    }
    multiset<double> values;
    values.insert((double)prefix[n] / n);
    vector<pair<double, int>> events;
    int eventCount = 0;
    vector<double> leftVal(n), rightVal(n);
    vector<bool> choice(n, false);
    for (int len = 1; len < n; ++len) {
        int pos = split[len];
        leftVal[len] = (double)(prefix[pos+len-1] - prefix[pos-1]) / len;
        rightVal[len] = (double)(prefix[pos+len] - prefix[pos]) / len;
        if (pos > 0) {
            choice[len] = false;
            values.insert(leftVal[len]);
            events.emplace_back(leftVal[len], len);
        } else {
            choice[len] = true;
            values.insert(rightVal[len]);
        }
    }
    sort(events.begin(), events.end());
    double answer = *values.rbegin() - *values.begin();
    for (auto &[val, idx] : events) {
        values.erase(values.find(leftVal[idx]));
        values.insert(rightVal[idx]);
        answer = min(answer, *values.rbegin() - *values.begin());
    }
    cout << fixed << setprecision(10) << answer << '\n';
    return 0;
}

Related Articles

Understanding Strong and Weak References in Java

Strong References Strong reference are the most prevalent type of object referencing in Java. When an object has a strong reference pointing to it, the garbage collector will not reclaim its memory. F...

Comprehensive Guide to SSTI Explained with Payload Bypass Techniques

Introduction Server-Side Template Injection (SSTI) is a vulnerability in web applications where user input is improper handled within the template engine and executed on the server. This exploit can r...

Implement Image Upload Functionality for Django Integrated TinyMCE Editor

Django’s Admin panel is highly user-friendly, and pairing it with TinyMCE, an effective rich text editor, simplifies content management significantly. Combining the two is particular useful for bloggi...

Leave a Comment

Anonymous

◎Feel free to join the discussion and share your thoughts.