Luzhiled's Library

This documentation is automatically generated by competitive-verifier/competitive-verifier

View the Project on GitHub ei1333/library

:heavy_check_mark: Global Minimum Cut of Dynamic Star Augmented Graph (graph/flow/global-minimum-cut-of-dynamic-star-augmented-graph.hpp)

重み付き無向グラフ $G$ が与えられます。

頂点 $N$ を追加して、頂点 $i$ と頂点 $N$ との間に重み $a_i$ の辺を張ることによって得られるグラフを $H$ とします。

このとき、「$H$ 上の頂点 $x_i$ と頂点 $N$ との間に張られている辺の重みを $y_i$ に変更して、$H$ の全域最小カットの重みを求めるクエリ」を効率的に処理します。

コンストラクタ

GlobalMinimumCutofDynamicStarAugmentedGraph< T >(int n, const Edges<T> &es)

頂点数 $n$、辺が es からなる $G$ として $H$ を初期化します。

制約

計算量

update

T update(int v, T cost)

頂点 $v$ と頂点 $N$ との間に張られている辺の重みを cost に変更して、$H$ の全域最小カットの重みを返します。

制約

計算量

Depends on

Verified with

Code

#include "../../graph/tree/heavy-light-decomposition.hpp"
#include "../../structure/class/range-add-range-min.hpp"
#include "../../structure/segment-tree/lazy-segment-tree.hpp"
#include "../others/extreme-vertex-set.hpp"

template <typename T>
struct GlobalMinimumCutofDynamicStarAugmentedGraph {
 private:
  int n{};
  HeavyLightDecomposition<T> hld;
  vector<T> cur;

  LazySegmentTree<RangeAddRangeMin<T> > seg;

 public:
  GlobalMinimumCutofDynamicStarAugmentedGraph() = default;

  explicit GlobalMinimumCutofDynamicStarAugmentedGraph(int n,
                                                       const Edges<T> &es)
      : n(n),
        hld(extreme_vertex_set(n, es)),
        cur(n),
        seg(RangeAddRangeMin<T>(), 2 * n - 1) {
    hld.build((int)hld.size() - 1);
    vector<int64> vs(2 * n - 1);
    for (int i = 0; i < 2 * n - 1; i++) {
      for (auto &e : hld[i]) {
        vs[hld.in[e.to]] = e.cost;
      }
    }
    seg.build(vs);
  }

  T update(int v, T cost) {
    assert(0 <= v and v < n);
    hld.add(v, (int)hld.size() - 1,
            [&](int l, int r) { seg.apply(l, r, cost - cur[v]); });
    cur[v] = cost;
    return seg.all_prod();
  }
};
#line 2 "graph/tree/heavy-light-decomposition.hpp"

#line 2 "graph/graph-template.hpp"

template <typename T = int>
struct Edge {
  int from, to;
  T cost;
  int idx;

  Edge() = default;

  Edge(int from, int to, T cost = 1, int idx = -1)
      : from(from), to(to), cost(cost), idx(idx) {}

  operator int() const { return to; }
};

template <typename T = int>
struct Graph {
  vector<vector<Edge<T> > > g;
  int es;

  Graph() = default;

  explicit Graph(int n) : g(n), es(0) {}

  size_t size() const { return g.size(); }

  void add_directed_edge(int from, int to, T cost = 1) {
    g[from].emplace_back(from, to, cost, es++);
  }

  void add_edge(int from, int to, T cost = 1) {
    g[from].emplace_back(from, to, cost, es);
    g[to].emplace_back(to, from, cost, es++);
  }

  void read(int M, int padding = -1, bool weighted = false,
            bool directed = false) {
    for (int i = 0; i < M; i++) {
      int a, b;
      cin >> a >> b;
      a += padding;
      b += padding;
      T c = T(1);
      if (weighted) cin >> c;
      if (directed)
        add_directed_edge(a, b, c);
      else
        add_edge(a, b, c);
    }
  }

  inline vector<Edge<T> > &operator[](const int &k) { return g[k]; }

  inline const vector<Edge<T> > &operator[](const int &k) const { return g[k]; }
};

template <typename T = int>
using Edges = vector<Edge<T> >;
#line 4 "graph/tree/heavy-light-decomposition.hpp"

/**
 * @brief Heavy-Light-Decomposition(HL分解)
 * @see https://smijake3.hatenablog.com/entry/2019/09/15/200200
 */
template <typename T = int>
struct HeavyLightDecomposition : Graph<T> {
 public:
  using Graph<T>::Graph;
  using Graph<T>::g;
  vector<int> sz, in, out, head, rev, par, dep;

  void build(int root = 0) {
    sz.assign(g.size(), 0);
    in.assign(g.size(), 0);
    out.assign(g.size(), 0);
    head.assign(g.size(), 0);
    rev.assign(g.size(), 0);
    par.assign(g.size(), 0);
    dep.assign(g.size(), 0);
    dfs_sz(root, -1, 0);
    int t = 0;
    head[root] = root;
    dfs_hld(root, -1, t);
  }

  /* k: 0-indexed */
  int la(int v, int k) {
    while (1) {
      int u = head[v];
      if (in[v] - k >= in[u]) return rev[in[v] - k];
      k -= in[v] - in[u] + 1;
      v = par[u];
    }
  }

  int lca(int u, int v) const {
    for (;; v = par[head[v]]) {
      if (in[u] > in[v]) swap(u, v);
      if (head[u] == head[v]) return u;
    }
  }

  int dist(int u, int v) const { return dep[u] + dep[v] - 2 * dep[lca(u, v)]; }

  template <typename E, typename Q, typename F, typename S>
  E query(int u, int v, const E &ti, const Q &q, const F &f, const S &s,
          bool edge = false) {
    E l = ti, r = ti;
    for (;; v = par[head[v]]) {
      if (in[u] > in[v]) swap(u, v), swap(l, r);
      if (head[u] == head[v]) break;
      l = f(q(in[head[v]], in[v] + 1), l);
    }
    return s(f(q(in[u] + edge, in[v] + 1), l), r);
  }

  template <typename E, typename Q, typename F>
  E query(int u, int v, const E &ti, const Q &q, const F &f,
          bool edge = false) {
    return query(u, v, ti, q, f, f, edge);
  }

  template <typename Q>
  void add(int u, int v, const Q &q, bool edge = false) {
    for (;; v = par[head[v]]) {
      if (in[u] > in[v]) swap(u, v);
      if (head[u] == head[v]) break;
      q(in[head[v]], in[v] + 1);
    }
    q(in[u] + edge, in[v] + 1);
  }

  /* {parent, child} */
  vector<pair<int, int> > compress(vector<int> &remark) {
    auto cmp = [&](int a, int b) { return in[a] < in[b]; };
    sort(begin(remark), end(remark), cmp);
    remark.erase(unique(begin(remark), end(remark)), end(remark));
    int K = (int)remark.size();
    for (int k = 1; k < K; k++)
      remark.emplace_back(lca(remark[k - 1], remark[k]));
    sort(begin(remark), end(remark), cmp);
    remark.erase(unique(begin(remark), end(remark)), end(remark));
    vector<pair<int, int> > es;
    stack<int> st;
    for (auto &k : remark) {
      while (!st.empty() && out[st.top()] <= in[k]) st.pop();
      if (!st.empty()) es.emplace_back(st.top(), k);
      st.emplace(k);
    }
    return es;
  }

  explicit HeavyLightDecomposition(const Graph<T> &g) : Graph<T>(g) {}

 private:
  void dfs_sz(int idx, int p, int d) {
    dep[idx] = d;
    par[idx] = p;
    sz[idx] = 1;
    if (g[idx].size() && g[idx][0] == p) swap(g[idx][0], g[idx].back());
    for (auto &to : g[idx]) {
      if (to == p) continue;
      dfs_sz(to, idx, d + 1);
      sz[idx] += sz[to];
      if (sz[g[idx][0]] < sz[to]) swap(g[idx][0], to);
    }
  }

  void dfs_hld(int idx, int p, int &times) {
    in[idx] = times++;
    rev[in[idx]] = idx;
    for (auto &to : g[idx]) {
      if (to == p) continue;
      head[to] = (g[idx][0] == to ? head[idx] : to);
      dfs_hld(to, idx, times);
    }
    out[idx] = times;
  }
};
#line 1 "structure/class/range-add-range-min.hpp"
template <typename T>
struct RangeAddRangeMin {
  using S = T;
  using F = T;
  static constexpr S op(const S &a, const S &b) { return min(a, b); }
  static constexpr S e() { return numeric_limits<T>::max(); }
  static constexpr F mapping(const S &x, const F &f) { return x + f; }
  static constexpr F composition(const F &f, const F &g) { return f + g; }
  static constexpr F id() { return {0}; }
};
#line 2 "structure/class/acted-monoid.hpp"

template <typename S2, typename Op, typename E, typename F2, typename Mapping,
          typename Composition, typename Id>
struct LambdaActedMonoid {
  using S = S2;
  using F = F2;

  S op(const S &a, const S &b) const { return _op(a, b); }

  S e() const { return _e(); }

  S mapping(const S &x, const F &f) const { return _mapping(x, f); }

  F composition(const F &f, const F &g) const { return _composition(f, g); }

  F id() const { return _id(); }

  LambdaActedMonoid(Op _op, E _e, Mapping _mapping, Composition _composition,
                    Id _id)
      : _op(_op),
        _e(_e),
        _mapping(_mapping),
        _composition(_composition),
        _id(_id) {}

 private:
  Op _op;

  E _e;

  Mapping _mapping;

  Composition _composition;

  Id _id;
};

template <typename Op, typename E, typename Mapping, typename Composition,
          typename Id>
LambdaActedMonoid(Op _op, E _e, Mapping _mapping, Composition _composition,
                  Id _id)
    -> LambdaActedMonoid<decltype(_e()), Op, E, decltype(_id()), Mapping,
                         Composition, Id>;

/*
struct ActedMonoid {
  using S = ?;
  using F = ?;
  static constexpr S op(const S& a, const S& b) {}
  static constexpr S e() {}
  static constexpr S mapping(const S &x, const F &f) {}
  static constexpr F composition(const F &f, const F &g) {}
  static constexpr F id() {}
};
*/
#line 2 "structure/segment-tree/lazy-segment-tree.hpp"

template <typename ActedMonoid>
struct LazySegmentTree {
  using S = typename ActedMonoid::S;
  using F = typename ActedMonoid::F;

 private:
  ActedMonoid m;

  int n{}, sz{}, height{};

  vector<S> data;

  vector<F> lazy;

  inline void update(int k) {
    data[k] = m.op(data[2 * k + 0], data[2 * k + 1]);
  }

  inline void all_apply(int k, const F &x) {
    data[k] = m.mapping(data[k], x);
    if (k < sz) lazy[k] = m.composition(lazy[k], x);
  }

  inline void propagate(int k) {
    if (lazy[k] != m.id()) {
      all_apply(2 * k + 0, lazy[k]);
      all_apply(2 * k + 1, lazy[k]);
      lazy[k] = m.id();
    }
  }

 public:
  LazySegmentTree() = default;

  explicit LazySegmentTree(ActedMonoid m, int n) : m(m), n(n) {
    sz = 1;
    height = 0;
    while (sz < n) sz <<= 1, height++;
    data.assign(2 * sz, m.e());
    lazy.assign(2 * sz, m.id());
  }

  explicit LazySegmentTree(ActedMonoid m, const vector<S> &v)
      : LazySegmentTree(m, v.size()) {
    build(v);
  }

  void build(const vector<S> &v) {
    assert(n == (int)v.size());
    for (int k = 0; k < n; k++) data[k + sz] = v[k];
    for (int k = sz - 1; k > 0; k--) update(k);
  }

  void set(int k, const S &x) {
    k += sz;
    for (int i = height; i > 0; i--) propagate(k >> i);
    data[k] = x;
    for (int i = 1; i <= height; i++) update(k >> i);
  }

  S get(int k) {
    k += sz;
    for (int i = height; i > 0; i--) propagate(k >> i);
    return data[k];
  }

  S operator[](int k) { return get(k); }

  S prod(int l, int r) {
    if (l >= r) return m.e();
    l += sz;
    r += sz;
    for (int i = height; i > 0; i--) {
      if (((l >> i) << i) != l) propagate(l >> i);
      if (((r >> i) << i) != r) propagate((r - 1) >> i);
    }
    S L = m.e(), R = m.e();
    for (; l < r; l >>= 1, r >>= 1) {
      if (l & 1) L = m.op(L, data[l++]);
      if (r & 1) R = m.op(data[--r], R);
    }
    return m.op(L, R);
  }

  S all_prod() const { return data[1]; }

  void apply(int k, const F &f) {
    k += sz;
    for (int i = height; i > 0; i--) propagate(k >> i);
    data[k] = m.mapping(data[k], f);
    for (int i = 1; i <= height; i++) update(k >> i);
  }

  void apply(int l, int r, const F &f) {
    if (l >= r) return;
    l += sz;
    r += sz;
    for (int i = height; i > 0; i--) {
      if (((l >> i) << i) != l) propagate(l >> i);
      if (((r >> i) << i) != r) propagate((r - 1) >> i);
    }
    {
      int l2 = l, r2 = r;
      for (; l < r; l >>= 1, r >>= 1) {
        if (l & 1) all_apply(l++, f);
        if (r & 1) all_apply(--r, f);
      }
      l = l2, r = r2;
    }
    for (int i = 1; i <= height; i++) {
      if (((l >> i) << i) != l) update(l >> i);
      if (((r >> i) << i) != r) update((r - 1) >> i);
    }
  }

  template <typename C>
  int find_first(int l, const C &check) {
    if (l >= n) return n;
    l += sz;
    for (int i = height; i > 0; i--) propagate(l >> i);
    S sum = m.e();
    do {
      while ((l & 1) == 0) l >>= 1;
      if (check(m.op(sum, data[l]))) {
        while (l < sz) {
          propagate(l);
          l <<= 1;
          auto nxt = m.op(sum, data[l]);
          if (not check(nxt)) {
            sum = nxt;
            l++;
          }
        }
        return l + 1 - sz;
      }
      sum = m.op(sum, data[l++]);
    } while ((l & -l) != l);
    return n;
  }

  template <typename C>
  int find_last(int r, const C &check) {
    if (r <= 0) return -1;
    r += sz;
    for (int i = height; i > 0; i--) propagate((r - 1) >> i);
    S sum = m.e();
    do {
      r--;
      while (r > 1 and (r & 1)) r >>= 1;
      if (check(m.op(data[r], sum))) {
        while (r < sz) {
          propagate(r);
          r = (r << 1) + 1;
          auto nxt = m.op(data[r], sum);
          if (not check(nxt)) {
            sum = nxt;
            r--;
          }
        }
        return r - sz;
      }
      sum = m.op(data[r], sum);
    } while ((r & -r) != r);
    return -1;
  }
};
#line 2 "graph/others/extreme-vertex-set.hpp"

template <typename T>
Graph<T> extreme_vertex_set(int n, const Edges<T> &es) {
  for (auto &e : es) {
    assert(0 <= e.from and e.from < n);
    assert(0 <= e.to and e.to < n);
    assert(e.from != e.to);
    assert(0 <= e.cost);
  }
  using pi = pair<int, T>;
  Graph<T> res(2 * n - 1);
  vector<int> uf(n);
  vector<T> cur(2 * n - 1);
  iota(uf.begin(), uf.end(), 0);
  vector<bool> leaf(2 * n - 1);
  for (int i = 0; i < n; i++) {
    leaf[i] = true;
  }
  using qi = pair<T, int>;
  priority_queue<qi, vector<qi>, greater<> > que;
  for (int phase = 0; phase < n - 1; phase++) {
    Graph<T> g(2 * n - 1);
    vector<T> cost(2 * n - 1);
    for (auto e : es) {
      e.from = uf[e.from];
      e.to = uf[e.to];
      if (e.from != e.to) {
        cost[e.from] += e.cost;
        cost[e.to] += e.cost;
        g.add_edge(e.from, e.to, e.cost);
      }
    }
    for (int i = 0; i < 2 * n - 1; i++) {
      if (leaf[i]) {
        cur[i] = cost[i];
        que.emplace(cost[i], i);
      }
    }
    int x = -1, y = -1;
    while (not que.empty()) {
      auto [c, v] = que.top();
      que.pop();
      if (cur[v] == -1) {
        continue;
      }
      cur[v] = -1;
      y = x;
      x = v;
      for (auto &e : g[v]) {
        if (cur[e.to] != -1) {
          cur[e.to] -= e.cost;
          que.emplace(cur[e.to], e.to);
        }
      }
    }
    int z = n + phase;
    res.add_directed_edge(z, x, cost[x]);
    res.add_directed_edge(z, y, cost[y]);
    for (int i = 0; i < n; i++) {
      if (uf[i] == x or uf[i] == y) {
        uf[i] = z;
      }
    }
    leaf[x] = false;
    leaf[y] = false;
    leaf[z] = true;
  }
  return res;
}
#line 5 "graph/flow/global-minimum-cut-of-dynamic-star-augmented-graph.hpp"

template <typename T>
struct GlobalMinimumCutofDynamicStarAugmentedGraph {
 private:
  int n{};
  HeavyLightDecomposition<T> hld;
  vector<T> cur;

  LazySegmentTree<RangeAddRangeMin<T> > seg;

 public:
  GlobalMinimumCutofDynamicStarAugmentedGraph() = default;

  explicit GlobalMinimumCutofDynamicStarAugmentedGraph(int n,
                                                       const Edges<T> &es)
      : n(n),
        hld(extreme_vertex_set(n, es)),
        cur(n),
        seg(RangeAddRangeMin<T>(), 2 * n - 1) {
    hld.build((int)hld.size() - 1);
    vector<int64> vs(2 * n - 1);
    for (int i = 0; i < 2 * n - 1; i++) {
      for (auto &e : hld[i]) {
        vs[hld.in[e.to]] = e.cost;
      }
    }
    seg.build(vs);
  }

  T update(int v, T cost) {
    assert(0 <= v and v < n);
    hld.add(v, (int)hld.size() - 1,
            [&](int l, int r) { seg.apply(l, r, cost - cur[v]); });
    cur[v] = cost;
    return seg.all_prod();
  }
};
Back to top page