IOI 2016 - Aliens
On an m m grid, n points of interest must be covered by at most k axis-aligned square photos whose diagonals lie on the main diagonal. The cost of a side- s photo is s^2. Overlapping areas between consecutive photos m...
Source-first archive entry
This page is built from the copied files in competitive_programming/ioi/2016/aliens. Edit
competitive_programming/ioi/2016/aliens/solution.tex to update the written solution and
competitive_programming/ioi/2016/aliens/solution.cpp to update the implementation.
The website does not replace those files with hand-maintained HTML. It reads the copied source tree during the build and exposes the exact files below.
Problem Statement
Rendered from the "Problem Summary" section inside solution.tex because this entry has no separate statement file.
On an $m \times m$ grid, $n$ points of interest must be covered by at most $k$ axis-aligned square photos whose diagonals lie on the main diagonal. The cost of a side-$s$ photo is $s^2$. Overlapping areas between consecutive photos must be subtracted to avoid double-counting. Minimise the total cost.
Editorial
Rendered from the copied solution.tex file. The original TeX source remains
available below.
Solution
Reduction to 1D Intervals
Each point $(r_i, c_i)$ induces a constraint: a square starting at position $a \le \min(r_i, c_i)$ with side $s \ge \max(r_i, c_i) - a + 1$. Define $l_i = \min(r_i, c_i)$ and $u_i = \max(r_i, c_i) + 1$. After removing dominated intervals ($[l_j, u_j) \subseteq [l_i, u_i)$) and sorting, we obtain intervals with $l_1 < l_2 < \cdots$ and $u_1 < u_2 < \cdots$.
DP with Lambda Optimisation (WQS Binary Search)
Instead of constraining the number of photos to $\le k$, we add a penalty $\lambda$ per photo: \[ f(\lambda) = \min_{\text{partitions}} \left(\sum_j \text{cost}(j) + \lambda \cdot (\text{number of photos})\right). \] Binary search on $\lambda$ to find the value where the optimal count equals $k$.
Convex Hull Trick
Define $\text{ov}[i] = \max(0, u_{i-1} - l_i)$ for $i \ge 1$ and $\text{ov}[0] = 0$. The DP is: \[ \text{dp}[i] = \min_{0 \le p < i} \left\{\text{dp}[p] + (u_{i-1} - l_p)^2 - \text{ov}[p]^2 + \lambda\right\}. \] Expanding $(u_{i-1} - l_p)^2 = u_{i-1}^2 - 2 u_{i-1} l_p + l_p^2$, this becomes: \[ \text{dp}[i] = u_{i-1}^2 + \lambda + \min_p \left\{(\text{dp}[p] + l_p^2 - \text{ov}[p]^2) - 2 u_{i-1} l_p\right\}. \] This has the form $\min_p (m_p \cdot x + b_p)$ with $m_p = -2l_p$ and $b_p = \text{dp}[p] + l_p^2 - \text{ov}[p]^2$, solvable with the convex hull trick in $O(n)$ per $\lambda$ evaluation.
C++ Implementation
#include <bits/stdc++.h>
using namespace std;
typedef long long ll;
typedef __int128 lll;
struct Point { int l, u; };
struct Line {
ll m, b; int cnt;
ll eval(ll x) { return m * x + b; }
};
struct CHT {
deque<Line> hull;
bool bad(Line l1, Line l2, Line l3) {
return (lll)(l3.b - l1.b) * (l1.m - l2.m) <=
(lll)(l2.b - l1.b) * (l1.m - l3.m);
}
void add(Line l) {
while (hull.size() >= 2 &&
bad(hull[hull.size()-2], hull[hull.size()-1], l))
hull.pop_back();
hull.push_back(l);
}
pair<ll,int> query(ll x) {
while (hull.size() >= 2 && hull[0].eval(x) >= hull[1].eval(x))
hull.pop_front();
return {hull[0].eval(x), hull[0].cnt};
}
};
vector<Point> pts;
pair<ll,int> solve(ll lambda) {
int sz = pts.size();
vector<ll> dp(sz + 1);
vector<int> cnt(sz + 1);
vector<ll> ov(sz, 0);
for (int i = 1; i < sz; i++)
ov[i] = max(0, pts[i-1].u - pts[i].l);
dp[0] = 0; cnt[0] = 0;
CHT cht;
cht.add({-2LL * pts[0].l,
(ll)pts[0].l * pts[0].l, 0});
for (int i = 1; i <= sz; i++) {
ll x = pts[i-1].u;
auto [val, c] = cht.query(x);
dp[i] = val + x * x + lambda;
cnt[i] = c + 1;
if (i < sz) {
ll M = -2LL * pts[i].l;
ll B = dp[i] + (ll)pts[i].l * pts[i].l
- (ll)ov[i] * ov[i];
cht.add({M, B, cnt[i]});
}
}
return {dp[sz], cnt[sz]};
}
long long take_photos(int n, int m, int k, int r[], int c[]) {
vector<Point> raw(n);
for (int i = 0; i < n; i++) {
raw[i].l = min(r[i], c[i]);
raw[i].u = max(r[i], c[i]) + 1;
}
sort(raw.begin(), raw.end(), [](const Point &a, const Point &b) {
return a.l < b.l || (a.l == b.l && a.u > b.u);
});
pts.clear();
for (auto &p : raw) {
while (!pts.empty() && pts.back().u <= p.u)
pts.pop_back();
if (pts.empty() || p.u > pts.back().u)
pts.push_back(p);
}
ll lo = 0, hi = (ll)m * m;
ll ans = 0;
while (lo <= hi) {
ll mid = (lo + hi) / 2;
auto [val, cnt] = solve(mid);
if (cnt <= k) {
ans = val - (ll)k * mid;
hi = mid - 1;
} else {
lo = mid + 1;
}
}
return ans;
}
Complexity Analysis
Time: $O(n \log n + n \log C)$ where $C = m^2$ is the binary-search range for $\lambda$. Each DP evaluation is $O(n)$ with the convex hull trick.
Space: $O(n)$.
The lambda trick (WQS binary search / ``alien trick'') converts the $k$-constrained problem into an unconstrained Lagrangian relaxation, binary-searching for the right penalty.
Code
Exact copied C++ implementation from solution.cpp.
#include <bits/stdc++.h>
using namespace std;
typedef long long ll;
typedef __int128 lll;
/*
* IOI 2016 - Aliens
*
* Cover N points with at most K diagonal-aligned squares, minimize total area.
*
* 1) Transform (r,c) -> interval [min(r,c), max(r,c)+1]
* 2) Remove dominated intervals
* 3) Alien's trick (Lagrangian relaxation) on number of squares
* 4) For fixed penalty lambda, DP + Convex Hull Trick in O(n)
*
* dp[i] = min over 0<=p<i of { dp[p] + (U[i-1] - L[p])^2 - OV[p]^2 + lambda }
* where OV[p] = max(0, U[p-1] - L[p]) for p>=1, OV[0]=0
*
* CHT form: for transition from p, define line with
* slope = -2*L[p], intercept = dp[p] + L[p]^2 - OV[p]^2
* query at x = U[i-1], then dp[i] = result + x^2 + lambda
*/
struct Point { ll l, u; };
struct Line {
ll m, b;
int cnt;
ll eval(ll x) const { return m * x + b; }
};
// Monotone CHT (lines added with decreasing slopes, queries with increasing x)
struct CHT {
vector<Line> hull;
int ptr;
CHT() : ptr(0) {}
bool bad(const Line &l1, const Line &l2, const Line &l3) {
// l2 is redundant if intersection of l1,l3 is <= intersection of l1,l2
// (l3.b - l1.b) * (l1.m - l2.m) <= (l2.b - l1.b) * (l1.m - l3.m)
return (lll)(l3.b - l1.b) * (l1.m - l2.m) <=
(lll)(l2.b - l1.b) * (l1.m - l3.m);
}
void addLine(Line l) {
while ((int)hull.size() >= 2 &&
bad(hull[hull.size()-2], hull[hull.size()-1], l))
hull.pop_back();
hull.push_back(l);
}
pair<ll, int> query(ll x) {
// pointer-based query for increasing x
if (ptr >= (int)hull.size()) ptr = (int)hull.size() - 1;
while (ptr + 1 < (int)hull.size() &&
hull[ptr+1].eval(x) <= hull[ptr].eval(x))
ptr++;
return {hull[ptr].eval(x), hull[ptr].cnt};
}
};
static vector<Point> pts;
// solve(lambda) returns {dp_value, count_of_squares}
// dp_value already includes count * lambda
pair<ll, int> solve(ll lambda) {
int sz = (int)pts.size();
// OV[i] = max(0, U[i-1] - L[i]) for i>=1, OV[0] = 0
vector<ll> ov(sz + 1, 0);
for (int i = 1; i < sz; i++) {
ll overlap = pts[i-1].u - pts[i].l;
ov[i] = max(0LL, overlap);
}
vector<ll> dp(sz + 1, 0);
vector<int> cnt(sz + 1, 0);
CHT cht;
// Add line for p=0: slope = -2*L[0], intercept = dp[0] + L[0]^2 - OV[0]^2
// OV[0] = 0, dp[0] = 0
cht.addLine({-2 * pts[0].l, pts[0].l * pts[0].l, 0});
for (int i = 1; i <= sz; i++) {
ll x = pts[i-1].u;
auto [val, c] = cht.query(x);
dp[i] = val + x * x + lambda;
cnt[i] = c + 1;
if (i < sz) {
ll M = -2 * pts[i].l;
ll B = dp[i] + pts[i].l * pts[i].l - ov[i] * ov[i];
cht.addLine({M, B, cnt[i]});
}
}
return {dp[sz], cnt[sz]};
}
long long take_photos(int N, int M, int K, int r[], int c[]) {
// Step 1: transform to intervals
vector<Point> raw(N);
for (int i = 0; i < N; i++) {
ll lo = min(r[i], c[i]);
ll hi = max(r[i], c[i]) + 1;
raw[i] = {lo, hi};
}
// Step 2: sort by left endpoint, break ties by larger right endpoint first
sort(raw.begin(), raw.end(), [](const Point &a, const Point &b) {
return a.l < b.l || (a.l == b.l && a.u > b.u);
});
// Remove dominated intervals: after sorting, keep only intervals where
// u is strictly increasing (since l is non-decreasing)
pts.clear();
for (auto &p : raw) {
// Remove intervals that are fully contained in p (same l, smaller u)
// and intervals where p is contained (already handled by u check)
while (!pts.empty() && pts.back().u <= p.u && pts.back().l >= p.l)
pts.pop_back();
if (pts.empty() || p.u > pts.back().u)
pts.push_back(p);
}
int sz = (int)pts.size();
if (K >= sz) {
// Can use one square per interval, just sum individual areas minus overlaps
// Actually: each interval [l,u] has area (u-l)^2, and consecutive
// squares may overlap. Best is one square per interval.
ll ans = 0;
for (int i = 0; i < sz; i++) {
ll side = pts[i].u - pts[i].l;
ans += side * side;
if (i > 0) {
ll overlap = max(0LL, pts[i-1].u - pts[i].l);
ans -= overlap * overlap;
}
}
return ans;
}
// Step 3: Alien's trick - binary search on lambda
// Higher lambda penalizes more squares, so count decreases as lambda increases.
// We want count <= K.
ll lo = 0, hi = (ll)M * M;
ll best_val = 0;
int best_cnt = 0;
while (lo <= hi) {
ll mid = lo + (hi - lo) / 2;
auto [val, c] = solve(mid);
if (c <= K) {
// lambda might be too large (or just right), try smaller
best_val = val;
best_cnt = c;
hi = mid - 1;
} else {
// too many squares, increase lambda
lo = mid + 1;
}
}
// At this point, lo is the smallest lambda where count <= K.
// The answer is: dp_value - K * lambda
// where dp_value = best_val was computed with lambda = lo
// But best_val was computed at hi+1 = lo? Let's recompute at lo to be safe.
auto [final_val, final_cnt] = solve(lo);
// answer = val - K * lo
// val already includes final_cnt * lo in it, and we want the original cost
// for exactly K squares. The Alien's trick gives:
// f(K) = g(lambda) - K * lambda
// where g(lambda) = min over all cnt of (cost + cnt * lambda)
return final_val - (ll)K * lo;
}
int main() {
int N, M, K;
scanf("%d %d %d", &N, &M, &K);
vector<int> r(N), c(N);
for (int i = 0; i < N; i++) scanf("%d %d", &r[i], &c[i]);
printf("%lld\n", take_photos(N, M, K, r.data(), c.data()));
return 0;
}
Source Files
Exact copied source-of-truth files. Edit solution.tex for the write-up and solution.cpp for the implementation.
\documentclass[11pt,a4paper]{article}
\usepackage[utf8]{inputenc}
\usepackage[margin=1in]{geometry}
\usepackage{amsmath,amssymb,amsthm}
\usepackage{listings}
\usepackage{xcolor}
\usepackage{hyperref}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}[theorem]{Lemma}
\lstset{
language=C++,
basicstyle=\ttfamily\small,
keywordstyle=\color{blue}\bfseries,
commentstyle=\color{gray},
stringstyle=\color{red},
numbers=left,
numberstyle=\tiny\color{gray},
breaklines=true,
frame=single,
tabsize=4,
showstringspaces=false
}
\title{IOI 2016 -- Aliens}
\author{}
\date{}
\begin{document}
\maketitle
\section{Problem Summary}
On an $m \times m$ grid, $n$ points of interest must be covered by at most $k$ axis-aligned square photos whose diagonals lie on the main diagonal. The cost of a side-$s$ photo is $s^2$. Overlapping areas between consecutive photos must be subtracted to avoid double-counting. Minimise the total cost.
\section{Solution}
\subsection{Reduction to 1D Intervals}
Each point $(r_i, c_i)$ induces a constraint: a square starting at position $a \le \min(r_i, c_i)$ with side $s \ge \max(r_i, c_i) - a + 1$. Define $l_i = \min(r_i, c_i)$ and $u_i = \max(r_i, c_i) + 1$. After removing dominated intervals ($[l_j, u_j) \subseteq [l_i, u_i)$) and sorting, we obtain intervals with $l_1 < l_2 < \cdots$ and $u_1 < u_2 < \cdots$.
\subsection{DP with Lambda Optimisation (WQS Binary Search)}
Instead of constraining the number of photos to $\le k$, we add a penalty $\lambda$ per photo:
\[
f(\lambda) = \min_{\text{partitions}} \left(\sum_j \text{cost}(j) + \lambda \cdot (\text{number of photos})\right).
\]
Binary search on $\lambda$ to find the value where the optimal count equals $k$.
\subsection{Convex Hull Trick}
Define $\text{ov}[i] = \max(0, u_{i-1} - l_i)$ for $i \ge 1$ and $\text{ov}[0] = 0$. The DP is:
\[
\text{dp}[i] = \min_{0 \le p < i} \left\{\text{dp}[p] + (u_{i-1} - l_p)^2 - \text{ov}[p]^2 + \lambda\right\}.
\]
Expanding $(u_{i-1} - l_p)^2 = u_{i-1}^2 - 2 u_{i-1} l_p + l_p^2$, this becomes:
\[
\text{dp}[i] = u_{i-1}^2 + \lambda + \min_p \left\{(\text{dp}[p] + l_p^2 - \text{ov}[p]^2) - 2 u_{i-1} l_p\right\}.
\]
This has the form $\min_p (m_p \cdot x + b_p)$ with $m_p = -2l_p$ and $b_p = \text{dp}[p] + l_p^2 - \text{ov}[p]^2$, solvable with the convex hull trick in $O(n)$ per $\lambda$ evaluation.
\section{C++ Implementation}
\begin{lstlisting}
#include <bits/stdc++.h>
using namespace std;
typedef long long ll;
typedef __int128 lll;
struct Point { int l, u; };
struct Line {
ll m, b; int cnt;
ll eval(ll x) { return m * x + b; }
};
struct CHT {
deque<Line> hull;
bool bad(Line l1, Line l2, Line l3) {
return (lll)(l3.b - l1.b) * (l1.m - l2.m) <=
(lll)(l2.b - l1.b) * (l1.m - l3.m);
}
void add(Line l) {
while (hull.size() >= 2 &&
bad(hull[hull.size()-2], hull[hull.size()-1], l))
hull.pop_back();
hull.push_back(l);
}
pair<ll,int> query(ll x) {
while (hull.size() >= 2 && hull[0].eval(x) >= hull[1].eval(x))
hull.pop_front();
return {hull[0].eval(x), hull[0].cnt};
}
};
vector<Point> pts;
pair<ll,int> solve(ll lambda) {
int sz = pts.size();
vector<ll> dp(sz + 1);
vector<int> cnt(sz + 1);
vector<ll> ov(sz, 0);
for (int i = 1; i < sz; i++)
ov[i] = max(0, pts[i-1].u - pts[i].l);
dp[0] = 0; cnt[0] = 0;
CHT cht;
cht.add({-2LL * pts[0].l,
(ll)pts[0].l * pts[0].l, 0});
for (int i = 1; i <= sz; i++) {
ll x = pts[i-1].u;
auto [val, c] = cht.query(x);
dp[i] = val + x * x + lambda;
cnt[i] = c + 1;
if (i < sz) {
ll M = -2LL * pts[i].l;
ll B = dp[i] + (ll)pts[i].l * pts[i].l
- (ll)ov[i] * ov[i];
cht.add({M, B, cnt[i]});
}
}
return {dp[sz], cnt[sz]};
}
long long take_photos(int n, int m, int k, int r[], int c[]) {
vector<Point> raw(n);
for (int i = 0; i < n; i++) {
raw[i].l = min(r[i], c[i]);
raw[i].u = max(r[i], c[i]) + 1;
}
sort(raw.begin(), raw.end(), [](const Point &a, const Point &b) {
return a.l < b.l || (a.l == b.l && a.u > b.u);
});
pts.clear();
for (auto &p : raw) {
while (!pts.empty() && pts.back().u <= p.u)
pts.pop_back();
if (pts.empty() || p.u > pts.back().u)
pts.push_back(p);
}
ll lo = 0, hi = (ll)m * m;
ll ans = 0;
while (lo <= hi) {
ll mid = (lo + hi) / 2;
auto [val, cnt] = solve(mid);
if (cnt <= k) {
ans = val - (ll)k * mid;
hi = mid - 1;
} else {
lo = mid + 1;
}
}
return ans;
}
\end{lstlisting}
\section{Complexity Analysis}
\begin{itemize}
\item \textbf{Time}: $O(n \log n + n \log C)$ where $C = m^2$ is the binary-search range for $\lambda$. Each DP evaluation is $O(n)$ with the convex hull trick.
\item \textbf{Space}: $O(n)$.
\end{itemize}
The lambda trick (WQS binary search / ``alien trick'') converts the $k$-constrained problem into an unconstrained Lagrangian relaxation, binary-searching for the right penalty.
\end{document}
#include <bits/stdc++.h>
using namespace std;
typedef long long ll;
typedef __int128 lll;
/*
* IOI 2016 - Aliens
*
* Cover N points with at most K diagonal-aligned squares, minimize total area.
*
* 1) Transform (r,c) -> interval [min(r,c), max(r,c)+1]
* 2) Remove dominated intervals
* 3) Alien's trick (Lagrangian relaxation) on number of squares
* 4) For fixed penalty lambda, DP + Convex Hull Trick in O(n)
*
* dp[i] = min over 0<=p<i of { dp[p] + (U[i-1] - L[p])^2 - OV[p]^2 + lambda }
* where OV[p] = max(0, U[p-1] - L[p]) for p>=1, OV[0]=0
*
* CHT form: for transition from p, define line with
* slope = -2*L[p], intercept = dp[p] + L[p]^2 - OV[p]^2
* query at x = U[i-1], then dp[i] = result + x^2 + lambda
*/
struct Point { ll l, u; };
struct Line {
ll m, b;
int cnt;
ll eval(ll x) const { return m * x + b; }
};
// Monotone CHT (lines added with decreasing slopes, queries with increasing x)
struct CHT {
vector<Line> hull;
int ptr;
CHT() : ptr(0) {}
bool bad(const Line &l1, const Line &l2, const Line &l3) {
// l2 is redundant if intersection of l1,l3 is <= intersection of l1,l2
// (l3.b - l1.b) * (l1.m - l2.m) <= (l2.b - l1.b) * (l1.m - l3.m)
return (lll)(l3.b - l1.b) * (l1.m - l2.m) <=
(lll)(l2.b - l1.b) * (l1.m - l3.m);
}
void addLine(Line l) {
while ((int)hull.size() >= 2 &&
bad(hull[hull.size()-2], hull[hull.size()-1], l))
hull.pop_back();
hull.push_back(l);
}
pair<ll, int> query(ll x) {
// pointer-based query for increasing x
if (ptr >= (int)hull.size()) ptr = (int)hull.size() - 1;
while (ptr + 1 < (int)hull.size() &&
hull[ptr+1].eval(x) <= hull[ptr].eval(x))
ptr++;
return {hull[ptr].eval(x), hull[ptr].cnt};
}
};
static vector<Point> pts;
// solve(lambda) returns {dp_value, count_of_squares}
// dp_value already includes count * lambda
pair<ll, int> solve(ll lambda) {
int sz = (int)pts.size();
// OV[i] = max(0, U[i-1] - L[i]) for i>=1, OV[0] = 0
vector<ll> ov(sz + 1, 0);
for (int i = 1; i < sz; i++) {
ll overlap = pts[i-1].u - pts[i].l;
ov[i] = max(0LL, overlap);
}
vector<ll> dp(sz + 1, 0);
vector<int> cnt(sz + 1, 0);
CHT cht;
// Add line for p=0: slope = -2*L[0], intercept = dp[0] + L[0]^2 - OV[0]^2
// OV[0] = 0, dp[0] = 0
cht.addLine({-2 * pts[0].l, pts[0].l * pts[0].l, 0});
for (int i = 1; i <= sz; i++) {
ll x = pts[i-1].u;
auto [val, c] = cht.query(x);
dp[i] = val + x * x + lambda;
cnt[i] = c + 1;
if (i < sz) {
ll M = -2 * pts[i].l;
ll B = dp[i] + pts[i].l * pts[i].l - ov[i] * ov[i];
cht.addLine({M, B, cnt[i]});
}
}
return {dp[sz], cnt[sz]};
}
long long take_photos(int N, int M, int K, int r[], int c[]) {
// Step 1: transform to intervals
vector<Point> raw(N);
for (int i = 0; i < N; i++) {
ll lo = min(r[i], c[i]);
ll hi = max(r[i], c[i]) + 1;
raw[i] = {lo, hi};
}
// Step 2: sort by left endpoint, break ties by larger right endpoint first
sort(raw.begin(), raw.end(), [](const Point &a, const Point &b) {
return a.l < b.l || (a.l == b.l && a.u > b.u);
});
// Remove dominated intervals: after sorting, keep only intervals where
// u is strictly increasing (since l is non-decreasing)
pts.clear();
for (auto &p : raw) {
// Remove intervals that are fully contained in p (same l, smaller u)
// and intervals where p is contained (already handled by u check)
while (!pts.empty() && pts.back().u <= p.u && pts.back().l >= p.l)
pts.pop_back();
if (pts.empty() || p.u > pts.back().u)
pts.push_back(p);
}
int sz = (int)pts.size();
if (K >= sz) {
// Can use one square per interval, just sum individual areas minus overlaps
// Actually: each interval [l,u] has area (u-l)^2, and consecutive
// squares may overlap. Best is one square per interval.
ll ans = 0;
for (int i = 0; i < sz; i++) {
ll side = pts[i].u - pts[i].l;
ans += side * side;
if (i > 0) {
ll overlap = max(0LL, pts[i-1].u - pts[i].l);
ans -= overlap * overlap;
}
}
return ans;
}
// Step 3: Alien's trick - binary search on lambda
// Higher lambda penalizes more squares, so count decreases as lambda increases.
// We want count <= K.
ll lo = 0, hi = (ll)M * M;
ll best_val = 0;
int best_cnt = 0;
while (lo <= hi) {
ll mid = lo + (hi - lo) / 2;
auto [val, c] = solve(mid);
if (c <= K) {
// lambda might be too large (or just right), try smaller
best_val = val;
best_cnt = c;
hi = mid - 1;
} else {
// too many squares, increase lambda
lo = mid + 1;
}
}
// At this point, lo is the smallest lambda where count <= K.
// The answer is: dp_value - K * lambda
// where dp_value = best_val was computed with lambda = lo
// But best_val was computed at hi+1 = lo? Let's recompute at lo to be safe.
auto [final_val, final_cnt] = solve(lo);
// answer = val - K * lo
// val already includes final_cnt * lo in it, and we want the original cost
// for exactly K squares. The Alien's trick gives:
// f(K) = g(lambda) - K * lambda
// where g(lambda) = min over all cnt of (cost + cnt * lambda)
return final_val - (ll)K * lo;
}
int main() {
int N, M, K;
scanf("%d %d %d", &N, &M, &K);
vector<int> r(N), c(N);
for (int i = 0; i < N; i++) scanf("%d %d", &r[i], &c[i]);
printf("%lld\n", take_photos(N, M, K, r.data(), c.data()));
return 0;
}