% LaTeX source for Misprints and Errors in 3rd edn of Bayesian Statistics: An Introduction (2nd edn)

\documentclass[oneside]{book}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsbsy}
\usepackage{makeidx}
\usepackage{epsf}
\usepackage{verbatim}

\setcounter{secnumdepth}{1}

\setcounter{tocdepth}{1}

% Set up environment for exercises at ends of chapters
\newcounter{qno}
\newcommand{\startqs}{\setcounter{qno}{0}\vspace{-1.5\baselineskip}}
\newcommand{\nextq}
    {\vspace{1.5\baselineskip}\noindent\addtocounter{qno}{1}\arabic{qno}.\quad}

% Allow for blank lines
\newcommand{\blankline}{\vspace{\baselineskip}\noindent}

% Define digitwidth and dotwidth (TeXbook p. 241)
\newdimen\digitwidth
\setbox0=\hbox{\rm0}
\digitwidth=\wd0
\newdimen\dotwidth
\setbox0=\hbox{\rm.}
\dotwidth=\wd0

% Notation for vectors, matrices, estimates, random variables and sample means
\newcommand{\vect}{\boldsymbol}
\newcommand{\matr}{\boldsymbol}
\newcommand{\est}{\widehat}
\newcommand{\random}{\widetilde}
\newcommand{\mean}{\overline}
\newcommand{\tr}{\text{${}^{\text{T}}$}}

% Notation for dots in subscripts
\newcommand {\bdot}{\hbox{\Huge .}}
\newcommand {\dotdot}{{\hbox{\Huge .}\kern-0.1667em\hbox{\Huge .}}}
\newcommand {\onedot}{1\kern-0.1667em\bdot}
\newcommand {\twodot}{2\kern-0.1667em\bdot}
\newcommand {\idot}{i\kern-0.1667em\bdot}
\newcommand {\jdot}{j\kern-0.1667em\bdot}
\newcommand {\mdot}{m\kern-0.1667em\bdot}
\newcommand {\dotj}{\kern-0.1667em\bdot\kern-0.1667em j}

% Define sech, arc sin and arc cos
\newcommand{\sech}{\operatorname{sech}}
\renewcommand{\arcsin}{\operatorname{arc\,sin}}
\renewcommand{\arccos}{\operatorname{arc\,cos}}

% Define Probability, Expectation, Variance, Covariance, Median, Mode
\renewcommand{\Pr}{\mbox{$\mathsf P$}}
\newcommand{\E}{\mbox{$\mathsf E$}}
\newcommand{\Var}{\mbox{$\mathcal V$}}
\newcommand{\Cov}{\mbox{$\mathcal C$}}
\newcommand{\median}{\mbox{median\,}}
\newcommand{\mode}{\mbox{mode\,}}

% Define notation for evidence
\newcommand{\Ev}{\mbox{Ev}}

% Define small common fractions for use in display formulae
\newcommand{\half}{\mbox{$\frac{1}{2}$}}
\newcommand{\smallhalf}{\mbox{\small$\frac{1}{2}$}}
\newcommand{\quarter}{\mbox{$\frac{1}{4}$}}
\newcommand{\third}{\mbox{$\frac{1}{3}$}}
\newcommand{\twothirds}{\mbox{$\frac{2}{3}$}}
\newcommand{\ninth}{\mbox{$\frac{1}{9}$}}

% Alternative notation for fractions (TeXbook, exercise 11.6)
\newcommand{\slopefrac}[2]{\leavevmode\kern.1em
\raise .5ex\hbox{\the\scriptfont0 #1}\kern-.1em
/\kern-.15em\lower .25ex\hbox{\the\scriptfont0 #2}}

% Notation for beta funcion
\newcommand{\Betafn}{\mbox{B}}

% Define names of distributions
\newcommand{\N}{\mbox{N}}              % A.1
\newcommand{\G}{\mbox{G}}              % A.4
\newcommand{\Ex}{\mbox{E}}             % A.4
\renewcommand{\t}{\mbox{t}}            % A.8
\newcommand{\Be}{\mbox{Be}}            % A.10
\newcommand{\B}{\mbox{B}}              % A.11
\renewcommand{\P}{\mbox{P}}            % A.12
\newcommand{\NB}{\mbox{NB}}            % A.13
\renewcommand{\H}{\mbox{H}}            % A.14
\newcommand{\U}{\mbox{U}}              % A.15
\newcommand{\UD}{\mbox{UD}}            % A.15
\newcommand{\Pa}{\mbox{Pa}}            % A.16
\newcommand{\Pabb}{\mbox{Pabb}}        % A.16
\newcommand{\M}{\mbox{M}}              % A.17
\newcommand{\BF}{\mbox{BF}}            % A.18
\newcommand{\F}{\mbox{F}}              % A.19
\newcommand{\z}{\mbox{z}}              % A.20
\newcommand{\C}{\mbox{C}}              % A.21

% Define some common bold symbols
\newcommand{\bbeta}{\mbox{$\boldsymbol\beta$}}
\newcommand{\beeta}{\mbox{$\boldsymbol\eta$}}
\newcommand{\btheta}{\mbox{$\boldsymbol\theta$}}
\newcommand{\bkappa}{\mbox{$\boldsymbol\kappa$}}
\newcommand{\blambda}{\mbox{$\boldsymbol\lambda$}}
\newcommand{\bmu}{\mbox{$\boldsymbol\mu$}}
\newcommand{\btau}{\mbox{$\boldsymbol\tau$}}
\newcommand{\bzero}{\mbox{$\boldsymbol0$}}

% Further bold symbols for use in connection with hierachical models
\newcommand {\bpiem}{\mbox{\boldmath $\pi^{EM}$}}
\newcommand {\bhtheta}{\mbox{\boldmath $\est\theta$}}
\newcommand {\bhthetao}{\mbox{\boldmath $\est\theta^{\mbox{\scriptsize\it0}}$}}
\newcommand {\bhthetajs}{\mbox{\boldmath $\est\theta^{JS}$}}
\newcommand {\bhthetajsplus}{\mbox{\boldmath $\est\theta^{JS^{{}_+}}$}}
\newcommand {\bhthetaem}{\mbox{\boldmath $\est\theta^{EM}$}}
\newcommand {\bhthetab}{\mbox{\boldmath $\est\theta^{B}$}}
\newcommand {\bhthetaeb}{\mbox{\boldmath $\est\theta^{EB}$}}
\newcommand {\thetabar}{\mbox{$\mean\theta$}}
\newcommand {\bphi}{\mbox{\boldmath $\phi$}}
\newcommand {\BPhi}{\mbox{\boldmath $\Phi$}}
\newcommand {\bpsi}{\mbox{\boldmath $\psi$}}
\newcommand {\BPsi}{\mbox{\boldmath $\Psi$}}
\newcommand {\BSigma}{\mbox{\boldmath $\Sigma$}}

% Define transpose for matrix theory
\newcommand{\transpose}{\mbox{${}^{\text{T}}$}}

% Define differentials with roman d and thin space before
\renewcommand{\d}{\mbox{d}}
\newcommand{\dF}{\,\mbox{\d$F$}}
\newcommand{\dt}{\,\mbox{\d$t$}}
\newcommand{\du}{\,\mbox{\d$u$}}
\newcommand{\dU}{\,\mbox{\d$U$}}
\newcommand{\dx}{\,\mbox{\d$x$}}
\newcommand{\dbx}{\,\text{\d$\vect x$}}
\newcommand{\dy}{\,\text{\d$y$}}
\newcommand{\dby}{\,\text{\d$\vect y$}}
\newcommand{\dz}{\,\mbox{\d$z$}}
\newcommand{\dgamma}{\,\mbox{\d$\gamma$}}
\newcommand{\dzeta}{\,\mbox{\d$\zeta$}}
\newcommand{\deta}{\,\mbox{d$\eta$}}
\newcommand{\dtheta}{\,\mbox{\d$\theta$}}
\newcommand{\dbtheta}{\,\mbox{\d$\boldsymbol\theta$}}
\newcommand{\dkappa}{\,\mbox{\d$\kappa$}}
\newcommand{\dlambda}{\,\mbox{\d$\lambda$}}
\newcommand{\dLambda}{\,\mbox{\d$\Lambda$}}
\newcommand{\dmu}{\,\mbox{\d$\mu$}}
\newcommand{\dbmu}{\,\mbox{\d$\bmu$}}
\newcommand{\drho}{\,\mbox{\d$\rho$}}
\newcommand{\dpi}{\,\mbox{\d$\pi$}}
\newcommand{\dxi}{\,\mbox{\d$\xi$}}
\newcommand{\dphi}{\,\mbox{\d$\phi$}}
\newcommand{\dpsi}{\,\mbox{\d$\psi$}}
\newcommand{\domega}{\,\mbox{\d$\omega$}}

% Hyp for hypothesis
\newcommand{\Hyp}{\mbox{H}}

% Blackboard bold Z for the integers
\newcommand{\Z}{\mbox{$\mathbb Z$}}

% Script X for a set of possible observations
\newcommand{\X}{\mbox{$\mathcal X$}}

% EM, GEM, E-step and M-step for the EM algorithm
\newcommand{\EM}{\mbox{\textit{EM}\ }}
\newcommand{\GEM}{\mbox{\textit{GEM}\ }}
\newcommand{\Estep}{\mbox{\textit{E}-step\ }}
\newcommand{\Mstep}{\mbox{\textit{M}-step\ }}

% Omit the word Chapter at the start of chapters
\renewcommand{\chaptername}{}

% Set up for reference list
\newcommand{\hi}{\par\noindent\hangindent=1em}

\begin{document}

\pagestyle{empty}
\begin{center}
  {\Huge{\textbf{Bayesian Statistics:}}}\\
  \vspace{10 mm}
  {\LARGE{\textbf{An Introduction}}}\\
  \vspace{20 mm}
  {\Large{\textbf{PETER M. LEE}}}\\
  \vspace{3 mm}
  {\Large{\textbf{\mbox{\noindent
    Formerly Provost of Wentworth College,}}}}\\ 
  {\Large{\textbf{\mbox{\noindent
    University of York, UK}}}}\\
  \vspace{15 mm}
  {\Large{\textbf{Third Edition}}}\\
  \vspace{55 mm}
  A member of the Hodder Headline Group \\
  LONDON \\
  Distributed in the United States of America by \\
  Oxford University Press Inc., New York
\end{center}
\vfill

\mainmatter

\appendix
\pagestyle{headings}

\setcounter{chapter}{4}

\allowdisplaybreaks

\chapter{Misprints and Errors in Third Edition}

\nextq Page viii, title for section 5.3.  For ``equal (Behrens'' read 
``unequal (Behrens''.

\nextq Page xiii (footnote to Preface).  For ``Bellhouse (2003) and 
Dale (1991)'' read ``Bellhouse (2004) and Dale (1999)''. 

\nextq Page 10, line 14.  For ``is taken as $\half$, which is scarcely in 
accord with a presumption of innocence.'' read ``happens to equal $\Pr(E)$,
which will only rarely be the case.''

\nextq Page 19, line 16 from bottom.  For ``function $\Be(k+1,
n-k+1)$'' read ``function $\Betafn(k+1, n-k+1)$''.

\nextq Page 34, line 7 from bottom.  For
\[ p(x)  =  (2\pi\phi)^{\frac{1}{2}} \exp \{- \half(x - \theta)^2/\phi\}.  \]
read
\[ p(x)  =  (2\pi\phi)^{-\frac{1}{2}} \exp \{- \half(x - \theta)^2/\phi\}. \]

\nextq Page 35, line 7.  For
\[ p(\theta|x) = p(\theta) p(x|\theta) \]
read
\[ p(\theta|x) \propto p(\theta) p(x|\theta) \]

\nextq Page 50, line 12.  For
\[  p(\phi)=p(\psi) \,|\!\dpsi/\dphi| \propto \d\,\log \psi/\dphi \]
read
\[  p(\phi)=p(\psi) \,|\!\dpsi/\dphi| \propto \d\,\log \phi/\dphi \]

\nextq Page 51, lines 15--16.  For ``the variance was in fact unkown,
but treat it as if the variance were known'' read ``the mean was in fact
unknown, but treat it \textit{as if} the mean were known''.

\nextq Page 60, line 11 from bottom.  For ``with $\theta$ known'' read 
``with $\phi$ known''.

\nextq Page 60, last line.  For
\[ p(x|\pi) = \binom{n}{x} (1 - \pi)^n\exp [- x \log {\pi/(1-\pi)}].     \]
read
\[ p(x|\pi) = \binom{n}{x} (1 - \pi)^n\exp [x \log {\pi/(1-\pi)}].       \]

\nextq Page 63, line 15.  For
\[ p(\theta|\vect x) =   p(\theta,\phi|\vect x) \dphi    \]
read
\[ p(\theta|\vect x) = \int p(\theta,\phi|\vect x) \dphi \]

\nextq Page 79, line 5.  For ``$\pi =$'' read ``$p(\pi) =$''.

\nextq Page 83, line 7.  For ``$-\frac{\d}{\dtheta^2}1$'' read
``$-\frac{\d^2}{\dtheta^2}1$''.

\nextq Page 83, line 12 from bottom.  For ``where $\vect x$ is any 
one of the $x_i$'' read ``where $x$ is any one of the $x_i$''.

\nextq Page 87, line 11 from bottom.  For
\[    p(\lambda|x)       \propto \lambda^{(\nu + 2 T)/2 - 1} 
                                 \exp \{- \half(S_0 + 2 n)\lambda\} \]
read
\[    p(\lambda|\vect x) \propto \lambda^{(\nu + 2 T)/2 - 1} 
                                 \exp \{- \half(S_0 + 2 n)\lambda\} \]
\nextq Page 90, lines 12 and 5 from bottom.  For
\[ \propto y^{-\gamma-1}I_{\{\xi,\infty\}}(y) \]
read
\[ \propto y^{-\gamma-1}I_{(\xi,\infty)}(y) \]

\nextq Page 98, line 10 from bottom.  For
\[ \int_{da}^{(d+1)a} \theta/( \theta \log_{\text{e}} 10)  =
   \log_{\text{e}}(1+d^{-1})/\log_{\text{e}}10=\log_{10}(1+d^{-1}).  \]
read
\[ \int_{da}^{(d+1)a} \dtheta/( \theta \log_{\text{e}} 10)  =
   \log_{\text{e}}(1+d^{-1})/\log_{\text{e}}10=\log_{10}(1+d^{-1}).  \]
   
\nextq Page 100, line 14.  For ``for large $z$'' read ``for large $\kappa$''.
   
\nextq Page 100, line 15.  For ``$(2\pi)^{-1/2}\exp(z)$'' 
read ``$(2\pi\kappa)^{-1/2}\exp(\kappa)$''.

\nextq Page 105, line 6 from bottom. For ``$S \sim \chi_n^2$'' read ``$S \sim
\phi\chi_n^2$''.

\nextq Page 106, line 6 from bottom.  For
\[ L(\theta) = \text{constant}-\log\{1+(x_i-\theta)^2\} \]
read
\[ L(\theta) = \text{constant}-\sum\log\{1+(x_i-\theta)^2\} \]

\nextq Page 108, line 18.  Element in top right corner of matrix on 
right hand side should be $-n(\mean x-\theta)/\phi^2$ in agreement 
with the element in the bottom left corner.

\nextq Page 110, line 14.  For ``$\Be(k+1/n,\,k-x+1/n)$'' read  
``$\Be(x+1/n,\,k-x+1/n)$''.

\nextq Page 112, line 8.  For ``Since $M-\theta-\half$ is'' read
``Since $M-(\theta-\half)$ is'.'

\nextq Page 122, line 7 from bottom.  For $\frac{p_0}{1-p_1}$ read 
$\frac{p_0}{1-p_0}$.

\nextq Page 126, line 15.  For
\[ p_1(\vect x) = \pi_1 \int \rho_1(\theta) p(\vect x|\theta) \dtheta   \]
read
\[ p_1(\vect x) = \int \rho_1(\theta) p(\vect x|\theta) \dtheta         \]

\nextq Page 143, line 11.  For
\[ t=\frac{\delta-(\overline x-\overline y)}{s(m^{-1}+n^{-1})} \]
read
\[ t=\frac{\delta-(\overline x-\overline y)}{s(m^{-1}+n^{-1})^{1/2}} \]

\nextq Page 167, line 13 from bottom.  For
\[ p(\alpha, \beta, \phi\,|\,\vect x,\vect y)\propto
   p(\alpha, \beta, \phi)\,p(\vect y\,|\,\vect x, \phi, \alpha, \beta, \phi) \]
read
\[ p(\alpha, \beta, \phi\,|\,\vect x,\vect y)\propto
   p(\alpha, \beta, \phi)\,p(\vect y\,|\,\vect x, \alpha, \beta, \phi) \]
   
   \nextq Page 176, line 2 from bottom. For ``$F_{i-1,}$'' read
``$F_{i-1,\nu}$''.

\nextq Page 183, line 1. On line 1 ``for all $i$'' should read ``for all $j$''
and vice-versa on line 3. 

\nextq Page 183, line 4 from bottom.  For
\[ p(\lambda,\btau,\bbeta,\bkappa,\phi\,|\,\vect x) 
   \propto \phi^{-N/2-1}
   \exp[-\half[[S_t(\btau)+S_b(\bbeta)+S_{tb}(\bkappa)+S_e]/\phi]  \]
read
\[ p(\lambda,\btau,\bbeta,\bkappa,\phi\,|\,\vect x) 
  \propto \phi^{-(N+1)/2-1}
   \exp[-\half[[S_t(\btau)+S_b(\bbeta)+S_{tb}(\bkappa)+S_e]/\phi]  \]

\nextq Page 185, line 5 from bottom .  For 
$\boldsymbol\beta=(\alpha,\beta)^{\text{T}}$ 
read
$\boldsymbol\eta=(\alpha,\beta)^{\text{T}}$.

\nextq Page 188, line 14 from bottom.  For
\[ \frac{\nu(\alpha-\mean y)^2}{s^s} \qquad\text{read}\qquad
    \frac{\nu(\alpha-\mean y)^2}{s^2}  \]

\nextq Page 189, question 6. For ``special form $p(\rho) \propto
(1-\rho^2)^k$'' read ``special form $p(\rho) \propto (1-\rho^2)^{k/2}$''.

\nextq Page 222, line 16 from bottom. For ``$\mu \sim (\lambda,\psi)$'' read
``$\mu \sim \N(\lambda, \psi)$''.

\nextq Page 224, line 3.  For ``$\mu_{ij}$'' read ``$\theta_{ij}$''.

\nextq Page 226, line 1. For ``estimates well not'' read ``estimates will
not''.

\nextq Page 229, line 9. For
\[ r=16,\quad\mean{Y}=0.2564,\quad\mean{X}=7.221,\quad S=18.96 \]
read
\[ r=18,\quad\mean{Y}=0.2654,\quad\mean{X}=7.221,\quad S=18.96 \]

\nextq Page 229, line 13 from bottom.  For ``We suppose that the 
$i$th player had $m_i$ hits and was at bat $T_i$ times'' read
``We suppose that the $i$th player had $T_i$ hits and was at bat 
$m_i$ times''.

\nextq Page 249, lines 2 and 3.  For $p(\vect y\,|\,\eta^{(t+1)},\vect y)\dby$ 
read $p(\vect y\,|\,\eta^{(t+1)},\vect x)\dby$ (twice).

\nextq Page 255, line 9 from bottom. For 
\[ \psi \sim \chi_{\nu}^2\quad\text{and}\quad
   \theta\,|\,\psi\sim\N(\mu,\psi/s) \]
read
\[ \psi \sim \chi_{\nu}^2\quad\text{and}\quad
   \theta\,|\,\psi\sim\N(\mu,s^2\psi) \]
   
\nextq Page 255, bottom line should read ``the original data $\vect x$
   augmented by a single scalar $z$ (as in the linkage example).  
   The al\-gorithm''
   
\nextq Page 256, line 20 from bottom.  For '`marginal distribution of
$z$'' read ``marginal distribution of $y$''.

\nextq Page 264, line 5 from bottom.  For
\[ \Var_i=\left\{1/\psi+n_i/\psi\right\}^{-1}\quad\text{and}\quad 
\est\theta_i=\Var_i\left\{\mu/\psi+n_ix_{\idot}/\psi\right\}. \]
read
\[ \Var_i=\left\{1/\psi+n_i/\phi\right\}^{-1}\quad\text{and}\quad 
   \est\theta_i=\Var_i\left\{\mu/\psi+n_ix_{\idot}/\phi\right\}. \]

\nextq Page 271, line 18 from bottom.  For
\[ \alpha(\bphi|\btheta) = \min\left\{
   \frac{\exp[-\frac{1}{2}(\bphi  -\bmu)\tr\BSigma^{-1}(\bphi  -\bmu)]}
   {\exp[-\frac{1}{2}(\btheta-\bmu)\tr\BSigma^{-1}(\btheta-\bmu)]}
   \right\}. \]
read
\[ \alpha(\bphi|\btheta) = \min\left\{
   \frac{\exp[-\frac{1}{2}(\bphi  -\bmu)\tr\BSigma^{-1}(\bphi  -\bmu)]}
   {\exp[-\frac{1}{2}(\btheta-\bmu)\tr\BSigma^{-1}(\btheta-\bmu)]}
   \,,\,1\right\}. \]
   
\nextq Page 282. In question 9, for ``Section 15'' read ``Section 9.4''.

\nextq Page 285, line 7 from bottom. For 
\[ F(x) = \Pr(X\leqslant \half)=\half. \] 
read 
\[ F(x) = \Pr(X\leqslant m)=\half. \]

\nextq Page 289, line 2.  For ``$p(X)$'' read ``$p(Y)$''.

\nextq Page 289, line 6 from bottom.  For 
``$Y\sim S^{\frac{1}{2}}\chi_{\nu}^{-2}$'' read
``$Y\sim S^{\frac{1}{2}}\chi_{\nu}^{-1}$''.

\nextq Pages 332--333. The more program for Behrens' distribution (due
to Jacob Colvin) previously given as in place of this program is no
longer recommended.  It can, however, still be found in the file
\begin{verbatim}
    http://www-users.york.ac.uk/~pml1/bayes/rprogs/hdr_colvin.txt
\end{verbatim}

\nextq Page 337.  In references, change entry to
\hi
Bellhouse, D.~R.,`The Reverend Thomas Bayes, FRS: A biography to
celebrate the tercentenary of his birth' (with discussion),
\textit{Statistical Science}, \textbf{19} (2004), 3--43.

\nextq Page 339.  In references, change entry to 
\hi
Dale, A., \textit{A History of Inverse Probability from Thomas Bayes to
     Karl Pearson}, Berlin: Springer-Verlag (1999) [1st edn (1991)].
     
\nextq Page 343.  In reference, change entry to
\hi
Goldstein, M., and Wooff, D., \textit{Bayes Linear Statistics: Theory 
and Methods}, Chichester, Wiley 2007.

\vspace{1.5\baselineskip}\noindent
\textit{Revised 13 December 2011.}
\end{document}

%