summaryrefslogtreecommitdiff
path: root/book.tex
blob: dd9509ec56218cf4f9196a0d52b846a1ad9b2f43 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
\documentclass[a4paper]{book}
\usepackage[a4paper, top=3cm,bottom=3cm, left=3cm, right=3cm%
%, paperheight=23cm
]{geometry}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{mathtools}
\usepackage{tikz}
\usetikzlibrary{positioning,calc,shapes}
\usepackage{todonotes}
\usepackage{hyperref}
\usepackage[backend=bibtex]{biblatex}
\bibliography{bibtex}

\usepackage{lipsum}

\title{Advanced Data Structures}
\author{Martin Hafskjold Thoresen}
\date{\today}

\newcommand{\topics}[1]{Topics: \textit{#1}}
\newcommand{\code}[2]{\textsc{#1}$(#2)$}

\newcommand{\RMQ}{$\textsc{RMQ}$}
\newcommand{\LCA}{$\textsc{LCA}$}
\newcommand{\LA}{$\textsc{LA}$}


\newenvironment{example}[0]{{\parindent=0em \textbf{Example:}}}{\vspace{1em}}
\newenvironment{definition}[1]{{\parindent=0em \textbf{Definition} --- \emph{#1}:\\}}{\vspace{1em}}

\DeclareMathOperator*{\argmin}{arg\,min} % thin space, limits underneath in displays
\DeclarePairedDelimiter{\ceil}{\lceil}{\rceil}
\DeclarePairedDelimiter{\floor}{\lfloor}{\rfloor}

% Expectation symbol
\DeclareMathOperator*{\E}{\mathbb{E}}

\begin{document}
\maketitle

\chapter*{Introduction}
This book is a collection of notes the course \textit{Advanced Data Structures} at ETH Z\"urich, in the spring of 2017.
The chapters are arranged in the same way as the lectures, and some chapters covers material from two lectures.

\tableofcontents

\chapter{Hashing}
\topics{Universal hashing, tabulation hashing, hash tables, chaining, linear probing, cuckoo hashing.}

\section{Motivation}
Operations we need: \code{Query}{x}, \code{Insert}{x}, \code{Delete}{x},
where $x \in [\mathcal{U}]$.
$\mathcal{U}$ is called the \emph{Universe}.
We already know how to do this using Balanced Binary Search Trees (BBSTs), with $O(n)$ space and $O(\log n)$ time on all operations.
We would like to have $O(1)$ time, while still having $O(n)$ space.

\section{The Hash Function}

A \emph{Hash Function} is a function $h: [\mathcal{U}] \rightarrow [m]$ where $\mathcal{U} \gg m$.
$m$ is the table size.
Ideally, $h$ is a totally random family of hash functions, meaning there is no correlation between its input $u \in \mathcal{U}$ and $h(u)$.
However, encoding a totally random function takes $O(u \log m)$ space.

This is a problem since $n = U$, which is large.
Therefore we settle on a family $\mathcal{H}$ of hash functions of small size which is \emph{Universal}.
Universality means that the probability of two non-equal keys having the same hash value is $O(1/m)$:
$$\forall x \neq y\ P_{h \in \mathcal{H}}[h(x) = h(y)] = O(1/m)$$

\begin{example}
    $h(x) = ((a x) \mod p) \mod m$ where $0<a<p$, $p$ is prime and $p > m$ is a universal hash function.
\end{example}

\begin{definition}{k-independent}
    A family of hash functions $\mathcal{H}$ is k-independent if
    $$\forall x_1, \dots x_k\ \Pr[\bigwedge\limits_{i} h(x_i) = t_i] = O(1/m^k)$$
\end{definition}

\begin{example}
    $((ax + b) \mod p) \mod m$ is 2-independent.
\end{example}

\begin{example}
    A polynomial of degree $k$,
    $((\sum\limits_{i=0}^{k-1} a_i x^i)\mod p) \mod m$, is k-independent.
\end{example}

\section{Simple Tabulation Hashing}

Tabulation hashing is a hashing scheme.
We view $x$ as a vector of bit blocks $x_1, \dots, x_c$,
and use a totally random hash table on \emph{each} block.
Then we \texttt{xor} the blocks together to get the final value:
$$h(x) = T_1(x_1) \oplus T_2(x_2) \oplus \dots \oplus T_c(x_c)$$
Since a block is $u/c$ bits, the universe for one block is of size $u^{1/c}$.
One table is of size $u^{1/c}$ (we assume the hash value are machine words, as in~\cite{Patrascu:2011:PST:1993636.1993638}),
so the total space used is $O(cu^{1/c})$.
The time spent is $O(c)$, since each lookup is $O(1)$.
Simple tabulation hashing is 3-independent, but not 4-independent.


\section{Chaining}
Because of the birthday paradox, hashing collisions in the table are very probable\footnote{unless we know all keys up front, and $n\leq m$}.
Therefore we need a scheme to handle collisions, the simplest of which is \emph{chaining}.
Each buchet in the hash table is a linked list of the values mapping to that bucket.

What can we say about the length of the chain?
Let $C_t$ be the length of chain $t$.
$\E[C_t] = \sum\limits_i \Pr[h(x_i) = t]$
If we have universal hashing we know that $\Pr[h(x_i) = t] = O(1/m)$,
so $\E[C_t] = O(1)$, for $m = \Omega(n)$

Since we need to traverse the list for all operations, the cost is quadratic in $C_t$,
so we are interested in $\E[C_t^2]$:
$$ \E[C_t^2] = 1/m \sum_{s \in [m]} E[C_s^2] = 1/m \sum_{i \neq j} \Pr[h(x_i) = h(x_j)] $$
If we have universal hashing this is $\frac{1}{m} n^2 O(\frac{1}{m}) = O(n^2/m^2) = O(1)$ for $m = \Omega(n)$.
With a totally random hash function $C_t = O(\frac{\log n}{\log\log n})$ with probability $1 - 1/n^c$ for any $c$.
This also holds for simple tabulation hashing.


\section{Perfect Hashing (FKS hashing)}

Idea: resolve collisions with another layer of hash tables.

On collision in bucket tables, rebuild the table using a different hash function.
When $n$ gets too large, double $m$ and start over, in order to keep the number of elements in the bucket tables small.
If a bucket table is too large, double its size and rehash.

Since $\E[C_t]$ is the number of elements that should not collide in the table, we can adjust the table size in such a way that
$\E[C_t] \leq 1/2$, so $\Pr[\text{no collisions in } C_t] \geq 1/2$.
Use size $\Theta(C_t^2)$ for the bucket tables.
This makes the expected number of rebuilds of the bucket tables $O(1)$ before getting zero collisions.

$\E[\text{space}] = \Theta(m + \sum\limits_t C_t^2) = \Theta(m + n^2/m) = \Theta(n)$ for $m=\Theta(n)$.

Results: $O(1)$ deterministic query, $O(n)$ expected update (w.h.p.), $O(n)$ space.


\section{Linear Probing}

Idea: store values directly in the table. On collision, look for next available spot.
On deletion, replace element with a ``tombstone'', so that searches does not stop too early.
Great for cache performance.
The main problem of linear probing is the increasing lengths of the ``runs'',
that is intervals of non-empty cells.

We require $m \geq (1+\epsilon) n$ (not just $m=\Omega(n)$), in order to have available cells in the table.
Space is naturally $O(m)$.
With a totally random hashing function, or by using tabulation hashing, the expected time for operations is $O(1/\epsilon^2)$
With a $O(\log n)$-wise or 5-wise independent hashing function, constant time is expected.


\section{Cuckoo Hashing}
Idea: have two hash tables with different hashing functions.
On collision, swap the colliding values, and try to insert the swapped value in the other table.
Deletes are simple.
If we get a cycle (swap $a$ with $b$, swap $b$ with $c$, and $c$ hashes to $a$ again), we rebuild the tables.
The table sizes are $m \geq (1+\epsilon)n$, so the space used is $O((2+\epsilon)n)$.

Any value $x$ is either in $A[h_A(x)]$ or $B[h_B(x)] \implies O(1)$ deterministic query.
If the hashing functions are fully random or $\log n$-wise independent we get $O(1)$ expected update and $O(1/n)$ probability of failure.



\chapter{Static Tree Queries}

\topics{Range Minimum Query (\code{RMQ}{i, j}), Lowest Common Ancestor (\code{LCA}{x, y}), Level Ancestor (\code{LA}{x, n}), Range Queries, Range Trees.}

We would like $O(1)$ query time on selected operations, and still only $O(n)$ space;
if we allow $O(n^2)$ space this is trivial, as we can simply precompute all queries and store them.

\section{Range Minimum Query}
We would like to retrieve the index of the minimum element in an array, between index $i$ and $j$.
Our goal is to get $O(n)$ time and space preprocessing, and constant time query.
Note that this is easy to do in $O(\log n)$ time using Range Trees ---
store minimum in internal nodes, and traverse from $i$ to $j$ in $O(\log n)$ time.

It turns out that \LCA{} and \RMQ{} are equivalent.

\subsection{Reduction from \RMQ{} to \LCA{}}

\todo{Add tree from lecture notes}
Build a \emph{Cartesian Tree}:
Walk through the array, while keeping track of the right spine of the tree.
When inserting a new element, if the element is the largest element in the spine, insert it at the end.
Else, we have an edge from $v$ to $w$, where the new element $a$ should be in between.
Make $a$ the right child of $v$, and make $w$ the \emph{left} child of $a$.
This step looks like it will be linear, but
the more we have to search through the right spine, the more swaps we do,
and the smaller the spine gets, so it amortizes to constant time,
making the algorithm linear.

Note that simpler divide and conquer algorithm runs in $O(n \log n)$.

We see that \code{LCA}{i, j} in the cartesian tree is the same as \code{RMQ}{i, j} in $A$.

\subsection{Reduction from \LCA{} to \RMQ{}}

Traverse the tree in-order, and write out the \emph{depth} of each node to $A$.
Now \code{RMQ}{i, j} = \code{LCA}{i, j}.
Naturally, since this scheme should work for any tree, we cannot write out the node values, since
the tree may not be a cartesian tree.

Note that if we go from \RMQ{} to \LCA{} and back again, we end up with different numbers
in the array. However, since we are not interested in the actual minimum, but only the index of the minimum,
the two arrays act exactly the same.
A consequence of this is that we get \emph{Universe reduction}, where we have mapped the universe $\mathcal{U}$ to $[n-1]$.

\section{Constant time \LCA{} (\RMQ{})}
\label{sec:constant-lca}

\subsubsection{Step 1: Reduction}
We start by reducting the problem to $\pm 1 \textsc{RMQ}$, in which adjacent elements of the array differs by at most 1.
Walk an \emph{Euler Tour} of the tree:
\todo{Add figure}
visit every edge twice, and for each edge write down the node we left.
Each node store a pointer to the first visit of that node in the array, and the array elements store a pointer to its element in the tree.
\RMQ{} and \LCA{} are still equivalent.

We need this later on (step 4).

\subsubsection{Step 2: Precomputation}
Get $O(1)$ time and $O(n \log n)$ space \RMQ{}.
Precompute and store all queries from any starting point where the interval length is a power of 2.
Since there are $n$ starting points, and $\log n$ powers of 2 to choose from, there are $n \log n$ such queries.
The key observation is that any arbitrary interval is the union of two such intervals.
For instance $A[4..11] = A[4..8] \cup A[7..11]$.
The double counting does not matter, since $\min$ is an idempotent operation.
The intervals are trivially computed.

\subsubsection{Step 3: Indirection}
Indirection.
Make a two layer structure: divide the numbers in $A$ into groups of size $1/2 \log n$, which makes the bottom layer.
The top layer consists of the minimum element in each block. Since there are $n/(2 \log n) = 2n/\log n$ such blocks,
there are equally many items in the top layer.

A query in this structure consists of (potentially) three parts:
A query in the bottom block in which $i$ is,
a query in the top block for all blocks which are completely covered by the interval $[i, j]$,
and a query in the bottom block in which $j$ is.
We need all three queries to be $O(1)$.

The gain from this is that the top layer only stores $O(n/ \log n)$, so we can afford Step 2, since the $\log$ factors cancel.
We get $O(1)$ query and $O(n)$ space for the top structure.

\subsubsection{Step 4: Lookup Tables}
We use lookup tables for the bottom groups.
The groups are of size $n' = 1/2 \log n$.
\RMQ{} queries in these groups are invariant over value ``shifts'' in the gruop:
if we add $a$ to all elements in the group, the queries are still the same.
Shift all groups by its first element, such that all groups start with 0.
Now every group is completely defined by the difference of adjacent elements,
so the blocks can be encoded as a bitstring of the same length as a block, where 0 is decreasing and 1 is increasing:
$[3,4,5,4] \rightarrow [0,1,2,1] \rightarrow [-, 1, 1, 0]$.
There are $2^{n'} = \sqrt{n}$ possible such blocks,
${(1/2 \log n)}^2$ possible queries, and each answer requires $\log \log n$ bits,
so storing a lookup tables for all possible blocks, over all possible queries with all possible
answers take $\sqrt{n}\ {(1/2 \log n)}^2\ \log \log n = o(n)$ bits.
Now each bottom block can simply store a pointer into the table, and we get $O(1)$ query for the bottom groups.


\section{Constant Time \LA{}}
Level Anscestor queries take a node $x$ and a level $n$, and the goal is to find the $n$th parent of $x$ in the tree.
The simplest way to do this is for each node to store its parent, making the query $O(n)$.
We want $O(1)$.

\subsubsection{Step 1: Jump Pointers}
Instead of having each node storing only its parent, each node can store its $2^k$th parent.
Each node has $O(\log n)$ such parents, making the space requirement $O(n \log n)$.
Query time is $O(\log n)$, since we at least halve $n$ each jump.

\subsubsection{Step 2: Long-path Decomposition}
Decompose the tree into a set of paths.
Find the longest path in the tree from the root, and store the nodes in an array.
The nodes themselves store the array and its index in the array.
Recurse on the subtrees that are hanging from the path.

With this scheme, a query is done as follows:
if $n$ is less than the nodes index in its path, we jump directly to the node.
Else, we jump to the first node in our path, subtract $n$ by $x$s index, and repeat.
We risk at most to visit $O(\sqrt{n})$ such paths, since we know that the paths are the \emph{longest} paths.
We end up using $O(n)$ space, and $O(\sqrt{n})$ query time.

\subsubsection{Step 3: Ladder Decomposition}
Extend each path upwards by twice its length.
Now the arrays overlap, but the nodes still only store their original array and index.
This doubles the space of Step 2, but we are still linear.
The improvement of this step is that we at least double the length from the node to the end of its path,
since the path in which we can jump freely goes at least twice that length up.

\subsubsection{Step 4: Step 1 + Step 3}
We combine the jump pointers and the ladder decomposition.
Jump pointers are great for long jumps, and ladders are great for short jumps.
We follow the jump pointer $k/2 \leq 2^{\floor{\log k}} \leq k$ steps up, for some $k$.
Since we have gone up a path from $x$ to a node by the jump pointer, we know that the node we hit is
of large height, and hence is part of a long ladder. Since its height from the end of the path
can be doubled by Step 3, we can get from $k/2$ to $k$, in which we know our target is.
Hence, we get $O(1)$ query, but still $O(n \log n)$ space (and preprocessing).

\subsubsection{Step 5: Only Leaves Store Jump Pointers}
Since all nodes have constant access to a leaf node (by its ladder, of which the last node is a leaf, by the maximal property),
only leaves need to store the jump pointers.
In other words, we make all queries start at leaves.

\subsubsection{Step 6: Leaf Trimming}
We define a \emph{maximally deep node} as a node with $\geq 1/4 \log n$ descendants.
Split the tree in two layers by the maximally deep nodes.
The number of leaves in the top part is now $O(n/\log n)$, since
\todo{eh?}
for each $1/4 \log n$ nodes in the original tree we have ``replaced'' it with a subtree (the bottom structure).
If we now use Step 5 on the top, we get $O(n)$ space.

\subsubsection{Step 7: Lookup Table}
For the bottom trees, we use lookup tables.
The trees are of size $n' \leq 1/4 \log n$. The number of rooted trees on $n'$ nodes is limited by
$2^{2n'} \leq \sqrt{n}$ by encoding an euler tour as a string of $\pm1$, like in Section~\ref{sec:constant-lca}.
There are ${(n')}^2 = O(\log^2 n)$ possible queries (if $n$ is large, we just go to the top structure),
and an answer takes $O(\log \log n)$ bits,
so a lookup table for all possible trees, with all possible queries takes only
$\sqrt{n}\ O(\log^2 n)\ O(\log \log n) = o(n)$ bits.

We end up with $O(1)$ time queries, using $O(n)$ space!



\chapter{Strings}


String search, suffix trees, suffix arrays.

\chapter{Temporal Structures}

Partial persistency, full persistency, funcitonal persistency,
pareial retroactivity, full retroactivity.

\chapter{Connectivity in Dynamic Graphs}

Dynamic connectivity on trees, euler tour trees.


\chapter{Lower Bounds}

Dynamic partial sums, dynamic connectivity.


\chapter{Integer Structures}

van Emde Boas, x-fast trees, y-fast trees, fusion trees.

\chapter{Succinct Structures}
Rank, Select

\chapter{Concurrency}

Locks, Lock-free structures, lists, priority queues.

\printbibliography%

\end{document}