\documentclass[12pt,titlepage]{article} \usepackage{amsmath} \usepackage{mathrsfs} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{amsthm} \usepackage{mathtools} \usepackage{graphicx} \usepackage{color} \usepackage{ucs} \usepackage[utf8x]{inputenc} \usepackage{xparse} \usepackage{hyperref} %----Macros---------- % % Unresolved issues: % % \righttoleftarrow % \lefttorightarrow % % \color{} with HTML colorspec % \bgcolor % \array with options (without options, it's equivalent to the matrix environment) % Of the standard HTML named colors, white, black, red, green, blue and yellow % are predefined in the color package. Here are the rest. \definecolor{aqua}{rgb}{0, 1.0, 1.0} \definecolor{fuschia}{rgb}{1.0, 0, 1.0} \definecolor{gray}{rgb}{0.502, 0.502, 0.502} \definecolor{lime}{rgb}{0, 1.0, 0} \definecolor{maroon}{rgb}{0.502, 0, 0} \definecolor{navy}{rgb}{0, 0, 0.502} \definecolor{olive}{rgb}{0.502, 0.502, 0} \definecolor{purple}{rgb}{0.502, 0, 0.502} \definecolor{silver}{rgb}{0.753, 0.753, 0.753} \definecolor{teal}{rgb}{0, 0.502, 0.502} % Because of conflicts, \space and \mathop are converted to % \itexspace and \operatorname during preprocessing. % itex: \space{ht}{dp}{wd} % % Height and baseline depth measurements are in units of tenths of an ex while % the width is measured in tenths of an em. \makeatletter \newdimen\itex@wd% \newdimen\itex@dp% \newdimen\itex@thd% \def\itexspace#1#2#3{\itex@wd=#3em% \itex@wd=0.1\itex@wd% \itex@dp=#2ex% \itex@dp=0.1\itex@dp% \itex@thd=#1ex% \itex@thd=0.1\itex@thd% \advance\itex@thd\the\itex@dp% \makebox[\the\itex@wd]{\rule[-\the\itex@dp]{0cm}{\the\itex@thd}}} \makeatother % \tensor and \multiscript \makeatletter \newif\if@sup \newtoks\@sups \def\append@sup#1{\edef\act{\noexpand\@sups={\the\@sups #1}}\act}% \def\reset@sup{\@supfalse\@sups={}}% \def\mk@scripts#1#2{\if #2/ \if@sup ^{\the\@sups}\fi \else% \ifx #1_ \if@sup ^{\the\@sups}\reset@sup \fi {}_{#2}% \else \append@sup#2 \@suptrue \fi% \expandafter\mk@scripts\fi} \def\tensor#1#2{\reset@sup#1\mk@scripts#2_/} \def\multiscripts#1#2#3{\reset@sup{}\mk@scripts#1_/#2% \reset@sup\mk@scripts#3_/} \makeatother % \slash \makeatletter \newbox\slashbox \setbox\slashbox=\hbox{$/$} \def\itex@pslash#1{\setbox\@tempboxa=\hbox{$#1$} \@tempdima=0.5\wd\slashbox \advance\@tempdima 0.5\wd\@tempboxa \copy\slashbox \kern-\@tempdima \box\@tempboxa} \def\slash{\protect\itex@pslash} \makeatother % math-mode versions of \rlap, etc % from Alexander Perlis, "A complement to \smash, \llap, and lap" % http://math.arizona.edu/~aprl/publications/mathclap/ \def\clap#1{\hbox to 0pt{\hss#1\hss}} \def\mathllap{\mathpalette\mathllapinternal} \def\mathrlap{\mathpalette\mathrlapinternal} \def\mathclap{\mathpalette\mathclapinternal} \def\mathllapinternal#1#2{\llap{$\mathsurround=0pt#1{#2}$}} \def\mathrlapinternal#1#2{\rlap{$\mathsurround=0pt#1{#2}$}} \def\mathclapinternal#1#2{\clap{$\mathsurround=0pt#1{#2}$}} % Renames \sqrt as \oldsqrt and redefine root to result in \sqrt[#1]{#2} \let\oldroot\root \def\root#1#2{\oldroot #1 \of{#2}} \renewcommand{\sqrt}[2][]{\oldroot #1 \of{#2}} % Manually declare the txfonts symbolsC font \DeclareSymbolFont{symbolsC}{U}{txsyc}{m}{n} \SetSymbolFont{symbolsC}{bold}{U}{txsyc}{bx}{n} \DeclareFontSubstitution{U}{txsyc}{m}{n} % Manually declare the stmaryrd font \DeclareSymbolFont{stmry}{U}{stmry}{m}{n} \SetSymbolFont{stmry}{bold}{U}{stmry}{b}{n} % Manually declare the MnSymbolE font \DeclareFontFamily{OMX}{MnSymbolE}{} \DeclareSymbolFont{mnomx}{OMX}{MnSymbolE}{m}{n} \SetSymbolFont{mnomx}{bold}{OMX}{MnSymbolE}{b}{n} \DeclareFontShape{OMX}{MnSymbolE}{m}{n}{ <-6> MnSymbolE5 <6-7> MnSymbolE6 <7-8> MnSymbolE7 <8-9> MnSymbolE8 <9-10> MnSymbolE9 <10-12> MnSymbolE10 <12-> MnSymbolE12}{} % Declare specific arrows from txfonts without loading the full package \makeatletter \def\re@DeclareMathSymbol#1#2#3#4{% \let#1=\undefined \DeclareMathSymbol{#1}{#2}{#3}{#4}} \re@DeclareMathSymbol{\neArrow}{\mathrel}{symbolsC}{116} \re@DeclareMathSymbol{\neArr}{\mathrel}{symbolsC}{116} \re@DeclareMathSymbol{\seArrow}{\mathrel}{symbolsC}{117} \re@DeclareMathSymbol{\seArr}{\mathrel}{symbolsC}{117} \re@DeclareMathSymbol{\nwArrow}{\mathrel}{symbolsC}{118} \re@DeclareMathSymbol{\nwArr}{\mathrel}{symbolsC}{118} \re@DeclareMathSymbol{\swArrow}{\mathrel}{symbolsC}{119} \re@DeclareMathSymbol{\swArr}{\mathrel}{symbolsC}{119} \re@DeclareMathSymbol{\nequiv}{\mathrel}{symbolsC}{46} \re@DeclareMathSymbol{\Perp}{\mathrel}{symbolsC}{121} \re@DeclareMathSymbol{\Vbar}{\mathrel}{symbolsC}{121} \re@DeclareMathSymbol{\sslash}{\mathrel}{stmry}{12} \re@DeclareMathSymbol{\bigsqcap}{\mathop}{stmry}{"64} \re@DeclareMathSymbol{\biginterleave}{\mathop}{stmry}{"6} \re@DeclareMathSymbol{\invamp}{\mathrel}{symbolsC}{77} \re@DeclareMathSymbol{\parr}{\mathrel}{symbolsC}{77} \makeatother % \llangle, \rrangle, \lmoustache and \rmoustache from MnSymbolE \makeatletter \def\Decl@Mn@Delim#1#2#3#4{% \if\relax\noexpand#1% \let#1\undefined \fi \DeclareMathDelimiter{#1}{#2}{#3}{#4}{#3}{#4}} \def\Decl@Mn@Open#1#2#3{\Decl@Mn@Delim{#1}{\mathopen}{#2}{#3}} \def\Decl@Mn@Close#1#2#3{\Decl@Mn@Delim{#1}{\mathclose}{#2}{#3}} \Decl@Mn@Open{\llangle}{mnomx}{'164} \Decl@Mn@Close{\rrangle}{mnomx}{'171} \Decl@Mn@Open{\lmoustache}{mnomx}{'245} \Decl@Mn@Close{\rmoustache}{mnomx}{'244} \makeatother % Widecheck \makeatletter \DeclareRobustCommand\widecheck[1]{{\mathpalette\@widecheck{#1}}} \def\@widecheck#1#2{% \setbox\z@\hbox{\m@th$#1#2$}% \setbox\tw@\hbox{\m@th$#1% \widehat{% \vrule\@width\z@\@height\ht\z@ \vrule\@height\z@\@width\wd\z@}$}% \dp\tw@-\ht\z@ \@tempdima\ht\z@ \advance\@tempdima2\ht\tw@ \divide\@tempdima\thr@@ \setbox\tw@\hbox{% \raise\@tempdima\hbox{\scalebox{1}[-1]{\lower\@tempdima\box \tw@}}}% {\ooalign{\box\tw@ \cr \box\z@}}} \makeatother % \mathraisebox{voffset}[height][depth]{something} \makeatletter \NewDocumentCommand\mathraisebox{moom}{% \IfNoValueTF{#2}{\def\@temp##1##2{\raisebox{#1}{$\m@th##1##2$}}}{% \IfNoValueTF{#3}{\def\@temp##1##2{\raisebox{#1}[#2]{$\m@th##1##2$}}% }{\def\@temp##1##2{\raisebox{#1}[#2][#3]{$\m@th##1##2$}}}}% \mathpalette\@temp{#4}} \makeatletter % udots (taken from yhmath) \makeatletter \def\udots{\mathinner{\mkern2mu\raise\p@\hbox{.} \mkern2mu\raise4\p@\hbox{.}\mkern1mu \raise7\p@\vbox{\kern7\p@\hbox{.}}\mkern1mu}} \makeatother %% Fix array \newcommand{\itexarray}[1]{\begin{matrix}#1\end{matrix}} %% \itexnum is a noop \newcommand{\itexnum}[1]{#1} %% Renaming existing commands \newcommand{\underoverset}[3]{\underset{#1}{\overset{#2}{#3}}} \newcommand{\widevec}{\overrightarrow} \newcommand{\darr}{\downarrow} \newcommand{\nearr}{\nearrow} \newcommand{\nwarr}{\nwarrow} \newcommand{\searr}{\searrow} \newcommand{\swarr}{\swarrow} \newcommand{\curvearrowbotright}{\curvearrowright} \newcommand{\uparr}{\uparrow} \newcommand{\downuparrow}{\updownarrow} \newcommand{\duparr}{\updownarrow} \newcommand{\updarr}{\updownarrow} \newcommand{\gt}{>} \newcommand{\lt}{<} \newcommand{\map}{\mapsto} \newcommand{\embedsin}{\hookrightarrow} \newcommand{\Alpha}{A} \newcommand{\Beta}{B} \newcommand{\Zeta}{Z} \newcommand{\Eta}{H} \newcommand{\Iota}{I} \newcommand{\Kappa}{K} \newcommand{\Mu}{M} \newcommand{\Nu}{N} \newcommand{\Rho}{P} \newcommand{\Tau}{T} \newcommand{\Upsi}{\Upsilon} \newcommand{\omicron}{o} \newcommand{\lang}{\langle} \newcommand{\rang}{\rangle} \newcommand{\Union}{\bigcup} \newcommand{\Intersection}{\bigcap} \newcommand{\Oplus}{\bigoplus} \newcommand{\Otimes}{\bigotimes} \newcommand{\Wedge}{\bigwedge} \newcommand{\Vee}{\bigvee} \newcommand{\coproduct}{\coprod} \newcommand{\product}{\prod} \newcommand{\closure}{\overline} \newcommand{\integral}{\int} \newcommand{\doubleintegral}{\iint} \newcommand{\tripleintegral}{\iiint} \newcommand{\quadrupleintegral}{\iiiint} \newcommand{\conint}{\oint} \newcommand{\contourintegral}{\oint} \newcommand{\infinity}{\infty} \newcommand{\bottom}{\bot} \newcommand{\minusb}{\boxminus} \newcommand{\plusb}{\boxplus} \newcommand{\timesb}{\boxtimes} \newcommand{\intersection}{\cap} \newcommand{\union}{\cup} \newcommand{\Del}{\nabla} \newcommand{\odash}{\circleddash} \newcommand{\negspace}{\!} \newcommand{\widebar}{\overline} \newcommand{\textsize}{\normalsize} \renewcommand{\scriptsize}{\scriptstyle} \newcommand{\scriptscriptsize}{\scriptscriptstyle} \newcommand{\mathfr}{\mathfrak} \newcommand{\statusline}[2]{#2} \newcommand{\tooltip}[2]{#2} \newcommand{\toggle}[2]{#2} % Theorem Environments \theoremstyle{plain} \newtheorem{theorem}{Theorem} \newtheorem{lemma}{Lemma} \newtheorem{prop}{Proposition} \newtheorem{cor}{Corollary} \newtheorem*{utheorem}{Theorem} \newtheorem*{ulemma}{Lemma} \newtheorem*{uprop}{Proposition} \newtheorem*{ucor}{Corollary} \theoremstyle{definition} \newtheorem{defn}{Definition} \newtheorem{example}{Example} \newtheorem*{udefn}{Definition} \newtheorem*{uexample}{Example} \theoremstyle{remark} \newtheorem{remark}{Remark} \newtheorem{note}{Note} \newtheorem*{uremark}{Remark} \newtheorem*{unote}{Note} %------------------------------------------------------------------- \begin{document} %------------------------------------------------------------------- \section*{statistical significance} \hypertarget{context}{}\subsubsection*{{Context}}\label{context} \hypertarget{measure_and_probability_theory}{}\paragraph*{{Measure and probability theory}}\label{measure_and_probability_theory} [[!include measure theory - contents]] \hypertarget{contents}{}\section*{{Contents}}\label{contents} \noindent\hyperlink{idea}{Idea}\dotfill \pageref*{idea} \linebreak \noindent\hyperlink{origin_of_significance_threshold}{Origin of significance threshold}\dotfill \pageref*{origin_of_significance_threshold} \linebreak \noindent\hyperlink{ParticlePhysics}{In particle physics}\dotfill \pageref*{ParticlePhysics} \linebreak \noindent\hyperlink{in_other_sciences}{In other sciences}\dotfill \pageref*{in_other_sciences} \linebreak \noindent\hyperlink{PossibleMisuse}{Possible misuse}\dotfill \pageref*{PossibleMisuse} \linebreak \noindent\hyperlink{Bayes}{Frequentist versus Bayesian statistics}\dotfill \pageref*{Bayes} \linebreak \noindent\hyperlink{references}{References}\dotfill \pageref*{references} \linebreak \hypertarget{idea}{}\subsection*{{Idea}}\label{idea} The \emph{statistical significance} of a [[measurement]] outcome $x$ of some [[variable]] $X$ is a measure of how unlikely it is that this outcome would be observed if $X$ were a [[random variable]] under given default assumptions, the latter often called the ``standard model'' or the ``[[null hypothesis]]'' and denoted $H_0$. Hence \begin{enumerate}% \item a low statistical significance of the observed value $x$ \emph{suggests} that the default assumptions, hence the ``standard model'' or the ``null hypothesis'', is correct, whence $x$ is then also called a \emph{[[null result]]}; \item a high statistical significance of the observed value $x$ \emph{suggests} that the default assumptions, hence the ``standard model'' or the ``null hypothesis'', might be inaccurate and hence ``to be rejected'', or, rather, to be replaced by an improved assumption/model/hypothesis $H_1$ under which the observation $x$ becomes implied with higher probability. \end{enumerate} More precisely, in general statistical significance is given as the [[probability]] $P(X \geq x\vert H_0)$ that a [[random variable]] $X$ with values in the [[real numbers]] takes a value in the [[interval]] $w$ of values equal or greater the observed value $x$. This probability is known as the \emph{[[p-value]]}. The lower this probability, the higher the statistical significance of observing $x$. \begin{quote}% graphics grabbed from \hyperlink{Sinervo02}{Sinervo 02} \end{quote} In the special, but, by the [[central limit theorem]], generic situation that the [[probability distribution]] $p(X\vert H_0)$ of $X$ under hypothesis $H_0$ is a [[normal distribution]], it follows that the [[probability]] $P(X \geq x\vert H_0)$ is a [[monotone decreasing function]] of $x$, so that the value of $x$ itself is a measure of its statistical significance -- the larger $x$, the larger its statistical significance, in this case. It is natural then to express the magnitude $x \in \mathbb{R}$ in [[physical unit|units]] of [[standard deviations]] $\sigma$ of the given [[normal distribution]] $p(X\vert H_0)$. This expression of statistical significances in terms of [[standard deviations]] is common in [[particle physics]] (e.g. \hyperlink{Sinervo02}{Sinervo 02}), see \hyperlink{ParticlePhysics}{below}. As with much of [[statistics]], the concept of statistical significance is mostly used at the interface between [[theory (physics)|theory]] and [[experiment]], hence is part of the process of \emph{[[coordination]]}, and as such subject to subtleties of real world activity that are not manifest in its clean mathematical definition. Often policy making and/or financial decisions depend on estimating and interpreting statistical significances. This has led to some lively debate about their use and misuse, see \hyperlink{PossibleMisuse}{below} for more. As a general rule of life, for a mathematical result to work well in applications, you need to understand what it says and what it does not say. There has also been considerable debate between [[Bayesianism|Bayesian]] and frequentist statisticians as to cogency of the use of statistical significance, see \hyperlink{Bayes}{below}. \hypertarget{origin_of_significance_threshold}{}\subsection*{{Origin of significance threshold}}\label{origin_of_significance_threshold} The threshold $p\lt 0.05$ seems to date back to (\hyperlink{Fisher26}{Fisher 1926}) \begin{quote}% … it is convenient to draw the line at about the level at which we can say: “Either there is something in the treatment, or a coincidence has occurred such as does not occur more than once in twenty trials.”… If one in twenty does not seem high enough odds, we may, if we prefer it, draw the line at one in fifty (the 2 per cent point), or one in a hundred (the 1 per cent point). Personally, the writer prefers to set a low standard of significance at the 5 per cent point, and ignore entirely all results which fail to reach this level. A scientific fact should be regarded as experimentally established only if a properly designed experiment rarely fails to give this level of significance. \end{quote} This last sentence—implicitly about experimental replication—is often conveniently forgotten by researchers with only a rough working knowledge statistics, whereby a \emph{single} $p$-value under $0.05$ is seen as a discovery, rather than merely evidence. \hypertarget{ParticlePhysics}{}\subsection*{{In particle physics}}\label{ParticlePhysics} In [[particle physics]], it has become customary to require statistical significance levels of $5 \sigma$ (5 [[standard deviations]]) in order to claim that a given observation is a real effect (e.g. \hyperlink{Sinervo02}{Sinervo 02, Section 5.3}, \hyperlink{Dorigo15}{Dorigo 15, Section 3}). This corresponds to a probability ([[p-value]]) of about \begin{displaymath} 5 \sigma \;\leftrightarrow\; p \;=\; 2.8 \cdot 10^{-7} \;=\; 0.00000028 \end{displaymath} that the observation is a random fluctuation under the null hypothesis. Notice that this is rather more stringent than the p-value of $p = 0.05$, corresponding to $\lt 2 \sigma$, which has been much used and much criticized in other areas of science, see \hyperlink{PossibleMisuse}{below}. Of course the $5 \sigma$-criterion for detectoin is a convention as any other. It has been argued in \hyperlink{Lyons13b}{Lyons 13b}, \hyperlink{Dorigo15}{Dorigo 15} that different detection-thresholds should be used for different experiments. These suggestions are subjective and not generally agreed on either: $\backslash$begin\{center\} $\backslash$end\{center\} \begin{quote}% table taken from \hyperlink{Lyons13b}{Lyons 13b, p. 4} \end{quote} $\backslash$begin\{center\} $\backslash$end\{center\} \begin{quote}% table taken from \hyperlink{Dorigo15}{Dorigo 15, p. 16} \end{quote} Regarding effects of current interest, notice that \begin{enumerate}% \item the significance $4 \sigma$ for anomalies in the [[anomalous magnetic moment]] $(g_\mu-2)$ of the [[muon]] required by \hyperlink{Lyons13b}{Lyons 13b, p. 4} has been reached by summer 2018, see \href{anomalous+magnetic+moment#Anomalies}{there}; \item the significance $3 \sigma$ for [[flavour anomalies]] in [[B meson]] decays required both by \hyperlink{Lyons13b}{Lyons 13b, p. 4} and by \hyperlink{Dorigo15}{Dorigo 15, p. 16} has also been much exceeded by summer 2018, see \href{https://ncatlab.org/nlab/show/flavour+anomaly#Idea}{there}. \end{enumerate} \hypertarget{in_other_sciences}{}\subsection*{{In other sciences}}\label{in_other_sciences} In pharmacology, for instance, hypothesis testing at some significance level is used in drug trials. Where those taking drug $A$ fare on average better than those on drug $B$, one might calculate the likelihood that such an effect would be found by chance given that the two drugs were in fact equally effective. \hypertarget{PossibleMisuse}{}\subsection*{{Possible misuse}}\label{PossibleMisuse} There have been a number of criticisms of the uses to which $p$-values have been put in scientific practice (e.g., \hyperlink{ZilMcCl08}{ZilMcCl 08}, \hyperlink{GSRCPGA16}{GSRCPGA 16}). The American Statistical Association has published a statement on $p$-values (\hyperlink{WassLaz16}{WassLaz 16}), claiming that \begin{quote}% the widespread use of `statistical significance' (generally interpreted as `$p \leq 0.05$') as a license for making a claim of a scientific finding (or implied truth) leads to considerable distortion of the scientific process. \end{quote} \hypertarget{Bayes}{}\subsection*{{Frequentist versus Bayesian statistics}}\label{Bayes} On a more fundamental level, [[Bayesianism|Bayesian]] statisticians have taken issue with the frequentist practice of hypothesis testing (see, e.g., \hyperlink{Jaynes03}{Jaynes 03, sec 9.1.1}, \hyperlink{DAgostini03}{D'Agostini 03}), arguing that the value that should be sought is the probability of a proposed hypothesis conditional on the data, rather than the probability of the data under some null hypothesis. The frequentist concept of probability cannot allow for such a thing as a hypothesis having a probability. A useful comparison of the frequentist and Bayesian approaches as employed in particle physics, and a call for their reconciliation, is in (\hyperlink{Lyons13a}{Lyons 13a}): \begin{quote}% for physics analyses at the CERN’s LHC, the aim is, at least for determining parameters and setting upper limits in searches for various new phenomena, to use both approaches; similar answers would strengthen confidence in the results, while differences suggest the need to understand them in terms of the somewhat different questions that the two approaches are asking. It thus seems that the old war between the two methodologies is subsiding, and that they can hopefully live together in fruitful cooperation. \end{quote} \hypertarget{references}{}\subsection*{{References}}\label{references} One early discussion is in \begin{itemize}% \item RA Fisher, \emph{The Arrangement of Field Experiments}, Journal of the Ministry of Agriculture of Great Britain \textbf{33} (1926) pp 503–513. \end{itemize} Review includes \begin{itemize}% \item Pekka K. Sinervo, \emph{Signal Significance in Particle Physics}, in Proceedings of \emph{Advanced Statistical Techniques in Particle Physics} Durham, UK, March 18-22, 2002 (\href{https://arxiv.org/abs/hep-ex/0208005}{arXiv:hep-ex/0208005}, \href{http://inspirehep.net/record/601052}{spire:601052}) \item [[Edwin Jaynes]], \emph{Probability theory: The logic of science}, Cambridge University Press, 2003. \item [[Louis Lyons]], \emph{Bayes and Frequentism: a Particle Physicist's perspective}, Journal of Contemporary Physics Volume 54, 2013 - Issue 1 (\href{https://arxiv.org/abs/1301.1273}{arXiv:1301.1273}, \href{https://doi.org/10.1080/00107514.2012.756312}{doi:10.1080/00107514.2012.756312}) \item [[Louis Lyons]], \emph{Discovering the Significance of 5 sigma} (\href{https://arxiv.org/abs/1310.1284}{arXiv:1310.1284}) \item [[Tommaso Dorigo]], \emph{Extraordinary claims: the $0.000029\%$ solution}, EPJ Web of Conferences 95, 02003 (2015) (\href{https://doi.org/10.1051/epjconf/20159502003}{doi:10.1051/epjconf/20159502003}) \item Roger John Barlow, \emph{Practical Statistics for Particle Physics} (\href{https://arxiv.org/abs/1905.12362}{arXiv:1905.12362}) \end{itemize} See also \begin{itemize}% \item Wikipedia, \emph{\href{https://en.wikipedia.org/wiki/Statistical_significance}{Statistical significance}} \item Leland Wilkinson and Task Force on Statistical Inference, Statistical methods in psychology journals: guidelines and explanations, American Psychologist 54 (1999), 594--604, \href{http://www.apa.org/pubs/journals/releases/amp-54-8-594.pdf}{pdf} \end{itemize} Cautionary remarks on misuse of the concept include the following \begin{itemize}% \item Stephen Ziliak and Deirdre McCloskey, \emph{The Cult of Statistical Significance}, Michegan University Press, 2008, \href{https://www.press.umich.edu/186351/cult_of_statistical_significance}{book}. \item Giulio D'Agostini, \emph{Bayesian reasoning in data analysis: A critical introduction}, World Scientific Publishing, 2003. \item $n$Cafe: \href{http://golem.ph.utexas.edu/category/2010/09/fetishizing_pvalues.html}{Fetishizing p-Values} by Tom Leinster \item Greenland, Senn, Rothman, Carlin, Poole, Goodman, Altman, \emph{Statistical tests, P values, confidence intervals, and power: a guide to misinterpretations}, Eur J Epidemiol. 2016 Apr;31(4):337-50. (\href{https://www.ncbi.nlm.nih.gov/pubmed/27209009}{doi:10.1007/s10654-016-0149-3}) \item Ronald Wasserstein, Nicole Lazar, \emph{The ASA's Statement on $p$-Values: Context, Process, and Purpose}, The American Statistician 70(2), 2016, pp. 129-133 (\href{https://doi.org/10.1080/00031305.2016.1154108}{doi:10.1080/00031305.2016.1154108}) \end{itemize} [[!redirects statistical significances]] [[!redirects p-value]] [[!redirects p-values]] \end{document}