talk.tex 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. % Copyright (C) 2017 Koz Ross <koz.ross@retro-freedom.nz>
  2. % Copyright (C) 2006 BenduKiwi
  3. % This work is licensed under the Creative Commons Attribution-ShareAlike 4.0
  4. % International License. To view a copy of this license, visit
  5. % http://creativecommons.org/licenses/by-sa/4.0/ or send a letter to Creative
  6. % Commons, PO Box 1866, Mountain View, CA 94042, USA.
  7. % BenduKiwi's attribution applies to the image 'cthulhu.jpg', per
  8. % the requirements of the CC-BY-SA license.
  9. \documentclass[style=fyma]{powerdot}
  10. \usepackage{enumitem}
  11. \usepackage{graphicx}
  12. \usepackage{amsmath}
  13. \usepackage{amsfonts}
  14. \usepackage{amsthm}
  15. \usepackage[font={itshape},begintext={``},endtext={''}]{quoting}
  16. \usepackage{algpseudocode}
  17. \usepackage[normalem]{ulem}
  18. \usepackage{xcolor}
  19. \usepackage{booktabs}
  20. \usepackage{bm}
  21. \newcommand\redsout{\bgroup\markoverwith{\textcolor{red}{\rule[0.5ex]{2pt}{0.4pt}}}\ULon}
  22. \newtheorem*{definition}{Definition}
  23. \newtheorem*{lemma}{Lemma}
  24. \newtheorem*{theorem}{Theorem}
  25. \newtheorem*{observation}{Observation}
  26. \pdsetup{theslide=,
  27. palette=gray}
  28. \begin{document}
  29. \begin{slide}[toc=]{}
  30. \begin{center}
  31. {\LARGE \textbf{Uses and limitations of asymptotic analysis} \par}
  32. {\Large Or: How to assume like a computer scientist \par}
  33. \vspace{1cm}
  34. \includegraphics[scale=0.75]{logo.eps}
  35. \vspace{1cm}
  36. \linebreak
  37. Koz Ross
  38. \medskip
  39. April 13, 2017
  40. \end{center}
  41. \end{slide}
  42. \begin{slide}[toc=]{Overview}
  43. \tableofcontents[content=sections]
  44. \end{slide}
  45. \section{Recap and formalisms}
  46. \begin{slide}{Where we last left our intrepid heroes\ldots}
  47. \vspace{1cm}
  48. \begin{itemize}
  49. \item Algorithms are recipes for a computer\pause{}
  50. \item We want them to be efficient (time and space)\pause{}
  51. \item Must have a way to compare algorithms, and explain their
  52. performance\pause{}
  53. \item Implement-and-measure is {\em not\/} a good approach\pause{}
  54. \item A model of a computer (RAM)\pause{}
  55. \item Worst-case, asymptotic analysis
  56. \end{itemize}
  57. \end{slide}
  58. \begin{slide}{Example from last week}
  59. \begin{algorithmic}
  60. \Function{$\mathsf{Max}$}{$\mathsf{arr}$}
  61. \State{}$\mathsf{x} \gets \mathsf{arr}[1]$
  62. \For{$\mathsf{i} \in 2, 3, \ldots$
  63. \Call{$\mathsf{,length}$}{$\mathsf{arr}$}}
  64. \If{$\mathsf{x} < \mathsf{arr}[\mathsf{i}]$}
  65. \State{}$\mathsf{x} \gets \mathsf{arr}[\mathsf{i}]$
  66. \EndIf{}
  67. \EndFor{}
  68. \State{}\Return{}$\mathsf{x}$
  69. \EndFunction{}
  70. \end{algorithmic}\pause{}
  71. \medskip
  72. $T_{\mathsf{Max}}(n) \text{ is } O(n)$\pause{}
  73. \medskip
  74. $S_{\mathsf{Max}}(n) \text{ is } O(1)$
  75. \end{slide}
  76. \begin{slide}[toc=]{What people see when they look at a formal definition of $O$}
  77. \begin{center}
  78. \includegraphics[scale=0.4]{cthulhu.eps}\pause{}
  79. \medskip
  80. Don't worry --- we'll go {\bf slow}.
  81. \end{center}
  82. \end{slide}
  83. \begin{slide}{Definition of $O$ (at last!)}
  84. \vspace{1cm}
  85. \begin{definition}
  86. Let $f, g$ be functions.\pause{} If there exist $n_0, c > 0$,\pause{} such
  87. that for all $n > n_0$, we have:\pause{}
  88. $$f(n) \leq c \cdot g(n)$$\pause{}
  89. then we say that {\em $f$ is $O(g)$}.
  90. \end{definition}\pause{}
  91. \medskip
  92. We write $O(n)$ as a kind of shorthand --- otherwise, we would have to write
  93. $O(f \text{ such that } f(n) = n)$, which is far too long.
  94. \end{slide}
  95. \begin{slide}{Examples of use}
  96. \vspace{1cm}
  97. \begin{lemma}
  98. $6n-1$ is $O(n)$.
  99. \end{lemma}\pause{}
  100. \begin{proof}
  101. Let $n_0 = 1, c = 7$. \pause{}We observe that, for all $n > 1$, we have
  102. $$6n - 1 \leq 7n$$\pause{}
  103. Thus, by definition, $6n-1$ is $O(n)$.
  104. \end{proof}
  105. \end{slide}
  106. \begin{slide}[toc=]{Examples of use}
  107. \begin{lemma}
  108. $n^2$ is {\em not\/} $O(n)$.
  109. \end{lemma}\pause{}
  110. \begin{proof}
  111. Suppose for the sake of a contradition that $n^2$ is $O(n)$.\pause{} By
  112. definition, there exist some $n_0, c > 0$ such that for all
  113. $n > n_0$, $n^2 \leq c \cdot n$.\pause{} Consider $n =
  114. \max\{n_0 + 1, c + 1\}$.\pause{} There are two cases:\pause{}
  115. \medskip
  116. {\bf Case 1:} $n = n_0 + 1$\pause{}
  117. By substitution, we have ${(n_0 + 1)}^2 \leq c \cdot (n_0 + 1)$,
  118. which simplifies to $n_0 + 1 \leq c$.\pause{} However, this
  119. is a contradiction, as $c < n_0 + 1$.\pause{}
  120. \medskip
  121. {\bf Case 2:} $n = c + 1$\pause{}
  122. By substitution, we have ${(c + 1)}^2 \leq c \cdot (c + 1)$, which
  123. simplifies to $c + 1 \leq c$, which is a contradiction.\pause{}
  124. \medskip
  125. As both cases lead to contradictions, no such $n_0, c$ can exist. Thus,
  126. $n^2$ is not $O(n)$.
  127. \end{proof}
  128. \end{slide}
  129. \section{The usual suspects}
  130. \begin{slide}{The `good' ones}
  131. \vspace{1cm}
  132. \pause{}
  133. $\bm{O(1)}$
  134. {\em Constant\/}: input size does not affect complexity.\pause{}
  135. \bigskip
  136. $\bm{O(\log(n))}$
  137. {\em Logarithmic\/}: when input size doubles, complexity goes up by a
  138. fixed amount.\pause{} We don't care about the base of the logarithm here,
  139. because we can change to any base using a multiplication by a
  140. constant.\pause{}
  141. \bigskip
  142. $\bm{O(n)}$
  143. {\em Linear\/}: when input size doubles, complexity also doubles.
  144. \end{slide}
  145. \begin{slide}{The `acceptable' ones}
  146. \vspace{1cm}
  147. \pause{}
  148. $\bm{O(n \log(n))}$
  149. {\em Linearithmic\/}: when input size doubles, complexity doubles, then
  150. also increases by a fixed amount.\pause{} Its `wordy' name is rarely used
  151. outside of a textbook.\pause{}
  152. \bigskip
  153. $\bm{O(n^2)}$
  154. {\em Quadratic\/}: when input size doubles, complexity {\em
  155. quadruples}.\pause{}
  156. \bigskip
  157. $\bm{O(n^3)}$
  158. {\em Cubic\/}: when input size doubles, complexity increases by a factor of
  159. eight.
  160. \end{slide}
  161. \begin{slide}{The `{\em un\/}acceptable' ones}
  162. \vspace{1cm}
  163. \pause{}
  164. $\mathbf{O(2^n)}$
  165. {\em Exponential\/}: when input size goes up by 1, complexity doubles.\pause{}
  166. If your algorithm has exponential time or space complexity, it may as well not
  167. exist.\pause{}
  168. \bigskip
  169. Only gets {\em worse\/} from here --- but we usually don't bother at this
  170. point.
  171. \end{slide}
  172. \section{Limitations}
  173. \begin{slide}{Limitations of worst-case analysis}
  174. \vspace{1cm}
  175. Worst-case analysis is {\em pessimistic\/} --- this can be a good thing,
  176. because it stops us from having unrealistic expectations in the face of
  177. unknown inputs.\pause{} However, it is possible for it to be {\em too\/}
  178. pessimistic:\pause{}
  179. \begin{itemize}
  180. \item If worst cases are rare or improbable, an algorithm might look much
  181. worse than it actually is\pause{}
  182. \item Focus on the wrong places (exactly what we sought to {\em avoid\/})\pause{}
  183. \item Won't match reality (and we won't know why)\pause{}
  184. \end{itemize}
  185. Is it really sensible to {\em always\/} be {\em uncompromisingly\/} pessimistic?
  186. \end{slide}
  187. \begin{slide}{Limitations of RAM}
  188. \vspace{1cm}
  189. \begin{quoting}
  190. RAM describes computers, and if we design algorithms around it, their actual
  191. performance should reflect what RAM says it should be.
  192. \end{quoting}\pause{}
  193. \medskip
  194. That statement was certainly true in the 1950s, but things have changed {\em
  195. considerably\/} since then:\pause{}
  196. \begin{itemize}
  197. \item Processing units are no longer singular\pause{}
  198. \item Memory is not uniform or uniformally-addressable nowadays\pause{}
  199. \item Processing units haven't been sequential since the mid-70s\pause{}
  200. \end{itemize}
  201. Is it really sensible to analyze (and {\em design\/}) algorithms based on
  202. such an old view of our machines?
  203. \end{slide}
  204. \begin{slide}{Limitations of asymptotic analysis}
  205. The assumptions of asymptotic analysis are self-consistent, but are they
  206. consistent with {\em reality\/}?\pause{}
  207. \begin{itemize}
  208. \item There {\em is\/} a limit on how big inputs can get\pause{}
  209. \item $2n$ and $2^{10}n$ are both $O(n)$, but practically, are {\em
  210. very\/} different, especially for space\pause{}
  211. \item Constants can, and {\em do}, dominate algorithm performance\pause{}
  212. \end{itemize}
  213. By clever `asymptotic abuse', we can easily come up with algorithms that
  214. `look good', but are worthless in practice:
  215. \begin{quoting}
  216. Our algorithms have theoretical interest only; the constant factors involve
  217. in the execution times preclude practicality.
  218. \end{quoting}\pause{}
  219. This seems to fly in the face of why we're doing this in the first place. But
  220. where to draw the line?
  221. \end{slide}
  222. \begin{slide}{Should we bother with it, then?}
  223. \vspace{1cm}
  224. \begin{itemize}
  225. \item Asymptotic worst-case analysis based on RAM underpins many results,
  226. and would be worth knowing just for that reason alone\pause{}
  227. \item Alternatives to it exist --- but to understand them, you need a
  228. foundation (guess what {\em that\/} is?)\pause{}
  229. \item In many cases ({\em taking assumptions into account\/}), still works
  230. very well (similar to classical mechanics)\pause{}
  231. \item `The beginning, but not the end'\pause{}
  232. \item ``If you don't have an implementation, you don't have an
  233. algorithm''\pause{}
  234. \end{itemize}
  235. \begin{quoting}
  236. Trust, but verify.
  237. \end{quoting}
  238. \end{slide}
  239. \section{Questions}
  240. \end{document}