\documentclass[12pt,titlepage]{article} \usepackage{amsmath} \usepackage{mathrsfs} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{amsthm} \usepackage{mathtools} \usepackage{graphicx} \usepackage{color} \usepackage{ucs} \usepackage[utf8x]{inputenc} \usepackage{xparse} \usepackage{hyperref} %----Macros---------- % % Unresolved issues: % % \righttoleftarrow % \lefttorightarrow % % \color{} with HTML colorspec % \bgcolor % \array with options (without options, it's equivalent to the matrix environment) % Of the standard HTML named colors, white, black, red, green, blue and yellow % are predefined in the color package. Here are the rest. \definecolor{aqua}{rgb}{0, 1.0, 1.0} \definecolor{fuschia}{rgb}{1.0, 0, 1.0} \definecolor{gray}{rgb}{0.502, 0.502, 0.502} \definecolor{lime}{rgb}{0, 1.0, 0} \definecolor{maroon}{rgb}{0.502, 0, 0} \definecolor{navy}{rgb}{0, 0, 0.502} \definecolor{olive}{rgb}{0.502, 0.502, 0} \definecolor{purple}{rgb}{0.502, 0, 0.502} \definecolor{silver}{rgb}{0.753, 0.753, 0.753} \definecolor{teal}{rgb}{0, 0.502, 0.502} % Because of conflicts, \space and \mathop are converted to % \itexspace and \operatorname during preprocessing. % itex: \space{ht}{dp}{wd} % % Height and baseline depth measurements are in units of tenths of an ex while % the width is measured in tenths of an em. \makeatletter \newdimen\itex@wd% \newdimen\itex@dp% \newdimen\itex@thd% \def\itexspace#1#2#3{\itex@wd=#3em% \itex@wd=0.1\itex@wd% \itex@dp=#2ex% \itex@dp=0.1\itex@dp% \itex@thd=#1ex% \itex@thd=0.1\itex@thd% \advance\itex@thd\the\itex@dp% \makebox[\the\itex@wd]{\rule[-\the\itex@dp]{0cm}{\the\itex@thd}}} \makeatother % \tensor and \multiscript \makeatletter \newif\if@sup \newtoks\@sups \def\append@sup#1{\edef\act{\noexpand\@sups={\the\@sups #1}}\act}% \def\reset@sup{\@supfalse\@sups={}}% \def\mk@scripts#1#2{\if #2/ \if@sup ^{\the\@sups}\fi \else% \ifx #1_ \if@sup ^{\the\@sups}\reset@sup \fi {}_{#2}% \else \append@sup#2 \@suptrue \fi% \expandafter\mk@scripts\fi} \def\tensor#1#2{\reset@sup#1\mk@scripts#2_/} \def\multiscripts#1#2#3{\reset@sup{}\mk@scripts#1_/#2% \reset@sup\mk@scripts#3_/} \makeatother % \slash \makeatletter \newbox\slashbox \setbox\slashbox=\hbox{$/$} \def\itex@pslash#1{\setbox\@tempboxa=\hbox{$#1$} \@tempdima=0.5\wd\slashbox \advance\@tempdima 0.5\wd\@tempboxa \copy\slashbox \kern-\@tempdima \box\@tempboxa} \def\slash{\protect\itex@pslash} \makeatother % math-mode versions of \rlap, etc % from Alexander Perlis, "A complement to \smash, \llap, and lap" % http://math.arizona.edu/~aprl/publications/mathclap/ \def\clap#1{\hbox to 0pt{\hss#1\hss}} \def\mathllap{\mathpalette\mathllapinternal} \def\mathrlap{\mathpalette\mathrlapinternal} \def\mathclap{\mathpalette\mathclapinternal} \def\mathllapinternal#1#2{\llap{$\mathsurround=0pt#1{#2}$}} \def\mathrlapinternal#1#2{\rlap{$\mathsurround=0pt#1{#2}$}} \def\mathclapinternal#1#2{\clap{$\mathsurround=0pt#1{#2}$}} % Renames \sqrt as \oldsqrt and redefine root to result in \sqrt[#1]{#2} \let\oldroot\root \def\root#1#2{\oldroot #1 \of{#2}} \renewcommand{\sqrt}[2][]{\oldroot #1 \of{#2}} % Manually declare the txfonts symbolsC font \DeclareSymbolFont{symbolsC}{U}{txsyc}{m}{n} \SetSymbolFont{symbolsC}{bold}{U}{txsyc}{bx}{n} \DeclareFontSubstitution{U}{txsyc}{m}{n} % Manually declare the stmaryrd font \DeclareSymbolFont{stmry}{U}{stmry}{m}{n} \SetSymbolFont{stmry}{bold}{U}{stmry}{b}{n} % Manually declare the MnSymbolE font \DeclareFontFamily{OMX}{MnSymbolE}{} \DeclareSymbolFont{mnomx}{OMX}{MnSymbolE}{m}{n} \SetSymbolFont{mnomx}{bold}{OMX}{MnSymbolE}{b}{n} \DeclareFontShape{OMX}{MnSymbolE}{m}{n}{ <-6> MnSymbolE5 <6-7> MnSymbolE6 <7-8> MnSymbolE7 <8-9> MnSymbolE8 <9-10> MnSymbolE9 <10-12> MnSymbolE10 <12-> MnSymbolE12}{} % Declare specific arrows from txfonts without loading the full package \makeatletter \def\re@DeclareMathSymbol#1#2#3#4{% \let#1=\undefined \DeclareMathSymbol{#1}{#2}{#3}{#4}} \re@DeclareMathSymbol{\neArrow}{\mathrel}{symbolsC}{116} \re@DeclareMathSymbol{\neArr}{\mathrel}{symbolsC}{116} \re@DeclareMathSymbol{\seArrow}{\mathrel}{symbolsC}{117} \re@DeclareMathSymbol{\seArr}{\mathrel}{symbolsC}{117} \re@DeclareMathSymbol{\nwArrow}{\mathrel}{symbolsC}{118} \re@DeclareMathSymbol{\nwArr}{\mathrel}{symbolsC}{118} \re@DeclareMathSymbol{\swArrow}{\mathrel}{symbolsC}{119} \re@DeclareMathSymbol{\swArr}{\mathrel}{symbolsC}{119} \re@DeclareMathSymbol{\nequiv}{\mathrel}{symbolsC}{46} \re@DeclareMathSymbol{\Perp}{\mathrel}{symbolsC}{121} \re@DeclareMathSymbol{\Vbar}{\mathrel}{symbolsC}{121} \re@DeclareMathSymbol{\sslash}{\mathrel}{stmry}{12} \re@DeclareMathSymbol{\bigsqcap}{\mathop}{stmry}{"64} \re@DeclareMathSymbol{\biginterleave}{\mathop}{stmry}{"6} \re@DeclareMathSymbol{\invamp}{\mathrel}{symbolsC}{77} \re@DeclareMathSymbol{\parr}{\mathrel}{symbolsC}{77} \makeatother % \llangle, \rrangle, \lmoustache and \rmoustache from MnSymbolE \makeatletter \def\Decl@Mn@Delim#1#2#3#4{% \if\relax\noexpand#1% \let#1\undefined \fi \DeclareMathDelimiter{#1}{#2}{#3}{#4}{#3}{#4}} \def\Decl@Mn@Open#1#2#3{\Decl@Mn@Delim{#1}{\mathopen}{#2}{#3}} \def\Decl@Mn@Close#1#2#3{\Decl@Mn@Delim{#1}{\mathclose}{#2}{#3}} \Decl@Mn@Open{\llangle}{mnomx}{'164} \Decl@Mn@Close{\rrangle}{mnomx}{'171} \Decl@Mn@Open{\lmoustache}{mnomx}{'245} \Decl@Mn@Close{\rmoustache}{mnomx}{'244} \makeatother % Widecheck \makeatletter \DeclareRobustCommand\widecheck[1]{{\mathpalette\@widecheck{#1}}} \def\@widecheck#1#2{% \setbox\z@\hbox{\m@th$#1#2$}% \setbox\tw@\hbox{\m@th$#1% \widehat{% \vrule\@width\z@\@height\ht\z@ \vrule\@height\z@\@width\wd\z@}$}% \dp\tw@-\ht\z@ \@tempdima\ht\z@ \advance\@tempdima2\ht\tw@ \divide\@tempdima\thr@@ \setbox\tw@\hbox{% \raise\@tempdima\hbox{\scalebox{1}[-1]{\lower\@tempdima\box \tw@}}}% {\ooalign{\box\tw@ \cr \box\z@}}} \makeatother % \mathraisebox{voffset}[height][depth]{something} \makeatletter \NewDocumentCommand\mathraisebox{moom}{% \IfNoValueTF{#2}{\def\@temp##1##2{\raisebox{#1}{$\m@th##1##2$}}}{% \IfNoValueTF{#3}{\def\@temp##1##2{\raisebox{#1}[#2]{$\m@th##1##2$}}% }{\def\@temp##1##2{\raisebox{#1}[#2][#3]{$\m@th##1##2$}}}}% \mathpalette\@temp{#4}} \makeatletter % udots (taken from yhmath) \makeatletter \def\udots{\mathinner{\mkern2mu\raise\p@\hbox{.} \mkern2mu\raise4\p@\hbox{.}\mkern1mu \raise7\p@\vbox{\kern7\p@\hbox{.}}\mkern1mu}} \makeatother %% Fix array \newcommand{\itexarray}[1]{\begin{matrix}#1\end{matrix}} %% \itexnum is a noop \newcommand{\itexnum}[1]{#1} %% Renaming existing commands \newcommand{\underoverset}[3]{\underset{#1}{\overset{#2}{#3}}} \newcommand{\widevec}{\overrightarrow} \newcommand{\darr}{\downarrow} \newcommand{\nearr}{\nearrow} \newcommand{\nwarr}{\nwarrow} \newcommand{\searr}{\searrow} \newcommand{\swarr}{\swarrow} \newcommand{\curvearrowbotright}{\curvearrowright} \newcommand{\uparr}{\uparrow} \newcommand{\downuparrow}{\updownarrow} \newcommand{\duparr}{\updownarrow} \newcommand{\updarr}{\updownarrow} \newcommand{\gt}{>} \newcommand{\lt}{<} \newcommand{\map}{\mapsto} \newcommand{\embedsin}{\hookrightarrow} \newcommand{\Alpha}{A} \newcommand{\Beta}{B} \newcommand{\Zeta}{Z} \newcommand{\Eta}{H} \newcommand{\Iota}{I} \newcommand{\Kappa}{K} \newcommand{\Mu}{M} \newcommand{\Nu}{N} \newcommand{\Rho}{P} \newcommand{\Tau}{T} \newcommand{\Upsi}{\Upsilon} \newcommand{\omicron}{o} \newcommand{\lang}{\langle} \newcommand{\rang}{\rangle} \newcommand{\Union}{\bigcup} \newcommand{\Intersection}{\bigcap} \newcommand{\Oplus}{\bigoplus} \newcommand{\Otimes}{\bigotimes} \newcommand{\Wedge}{\bigwedge} \newcommand{\Vee}{\bigvee} \newcommand{\coproduct}{\coprod} \newcommand{\product}{\prod} \newcommand{\closure}{\overline} \newcommand{\integral}{\int} \newcommand{\doubleintegral}{\iint} \newcommand{\tripleintegral}{\iiint} \newcommand{\quadrupleintegral}{\iiiint} \newcommand{\conint}{\oint} \newcommand{\contourintegral}{\oint} \newcommand{\infinity}{\infty} \newcommand{\bottom}{\bot} \newcommand{\minusb}{\boxminus} \newcommand{\plusb}{\boxplus} \newcommand{\timesb}{\boxtimes} \newcommand{\intersection}{\cap} \newcommand{\union}{\cup} \newcommand{\Del}{\nabla} \newcommand{\odash}{\circleddash} \newcommand{\negspace}{\!} \newcommand{\widebar}{\overline} \newcommand{\textsize}{\normalsize} \renewcommand{\scriptsize}{\scriptstyle} \newcommand{\scriptscriptsize}{\scriptscriptstyle} \newcommand{\mathfr}{\mathfrak} \newcommand{\statusline}[2]{#2} \newcommand{\tooltip}[2]{#2} \newcommand{\toggle}[2]{#2} % Theorem Environments \theoremstyle{plain} \newtheorem{theorem}{Theorem} \newtheorem{lemma}{Lemma} \newtheorem{prop}{Proposition} \newtheorem{cor}{Corollary} \newtheorem*{utheorem}{Theorem} \newtheorem*{ulemma}{Lemma} \newtheorem*{uprop}{Proposition} \newtheorem*{ucor}{Corollary} \theoremstyle{definition} \newtheorem{defn}{Definition} \newtheorem{example}{Example} \newtheorem*{udefn}{Definition} \newtheorem*{uexample}{Example} \theoremstyle{remark} \newtheorem{remark}{Remark} \newtheorem{note}{Note} \newtheorem*{uremark}{Remark} \newtheorem*{unote}{Note} %------------------------------------------------------------------- \begin{document} %------------------------------------------------------------------- \section*{cut rule} \hypertarget{context}{}\subsubsection*{{Context}}\label{context} \hypertarget{deduction_and_induction}{}\paragraph*{{Deduction and Induction}}\label{deduction_and_induction} [[!include deduction and induction - contents]] \hypertarget{type_theory}{}\paragraph*{{Type theory}}\label{type_theory} [[!include type theory - contents]] \hypertarget{foundations}{}\paragraph*{{Foundations}}\label{foundations} [[!include foundations - contents]] \hypertarget{contents}{}\section*{{Contents}}\label{contents} \noindent\hyperlink{idea}{Idea}\dotfill \pageref*{idea} \linebreak \noindent\hyperlink{cut_elimination}{Cut elimination}\dotfill \pageref*{cut_elimination} \linebreak \noindent\hyperlink{connection_to_identities}{Connection to identities}\dotfill \pageref*{connection_to_identities} \linebreak \noindent\hyperlink{examples_of_elimination_steps}{Examples of elimination steps}\dotfill \pageref*{examples_of_elimination_steps} \linebreak \noindent\hyperlink{alternative_forms_of_the_cut_rule}{Alternative forms of the cut rule}\dotfill \pageref*{alternative_forms_of_the_cut_rule} \linebreak \noindent\hyperlink{references}{References}\dotfill \pageref*{references} \linebreak \hypertarget{idea}{}\subsection*{{Idea}}\label{idea} The \emph{cut rule} in [[sequent calculus]] ([[formal logic]]) is the rule that from [[sequents]] of the form \begin{displaymath} \Gamma \vdash A , \Delta \end{displaymath} and \begin{displaymath} \Pi, A \vdash \Lambda \end{displaymath} the new sequent \begin{displaymath} \Pi, \Gamma \vdash \Lambda, \Delta \end{displaymath} may be [[deduction|deduced]]. This is often written in the form \begin{displaymath} \frac{\Gamma \vdash A, \Delta \; \; \; \Pi, A \vdash \Lambda}{\Pi, \Gamma \vdash \Lambda, \Delta} \; cut. \end{displaymath} In the [[categorical semantics]] where each [[sequent]] here is interpreted as a [[morphism]] in a [[category]], the cut rule asserts the existence of [[composition]] of morphisms. \hypertarget{cut_elimination}{}\subsection*{{Cut elimination}}\label{cut_elimination} \begin{quote}% ``A logic without cut elimination is like a car without an engine'' -- Jean-Yves Girard (in \href{http://iml.univ-mrs.fr/~girard/Synsem.pdf.gz}{Linear logic}) \end{quote} The \emph{cut-elimination theorem} (``[[Gerhard Gentzen]]`s [[Gentzen's Hauptsatz|Hauptsatz]]'') asserts that every [[judgement]] which has a [[proof]] using the cut-rule also has a proof not using it (a ``cut-free proof''). While Gentzen's original theorem was for a few particular sequent calculi that he was considering, it is true of many other sequent calculi and is generally seen as desirable. (That said, there are some useful sequent calculi in which it fails.) Intuitively, the problem in deciding whether a formula $B$ follows from a formula $A$, i.e., deriving $A \vdash B$, is that there could be very complicated steps in the middle, i.e., in typical mathematical arguments one puts together steps $A \vdash C$ and $C \vdash B$ where $C$ is potentially a complicated or large formula. For an automated theorem prover, the search space for such $C$ is potentially infinite. By establishing a cut-elimination theorem for formal systems, one circumvents this problem, and it is quite typical that cut-free proofs build up complex sequents from less complex sequents (cf. [[subformula property]]), so that one can decide whether a sequent is provable or derivable by following an inductive procedure. Cut-elimination is also a key step in deciding whether two proofs of a sequent are the ``same'' in some suitable sense. In [[type theory]], for instance, the issue is not merely whether $A \vdash B$ is provable or whether the function type $A \multimap B$ is inhabited (has a proof or a term witnessing that fact), but also the nature of the space of such proofs. Since any proof has a trivial cut-free formulation in a system where all provable sequents in the original system are simply postulated as axioms, a cut-elimination result worthy of the name will not merely replace a proof with one which is cut-free, but with a cut-free proof which is \emph{equivalent} to the original. This idea is used for instance in proving [[coherence theorems]]. Cut-elimination may also be used to give independent proof-theoretic motivation of the definition of a category, and other basic category theoretic notions, eg. [[adjunction]] (see \hyperlink{Dos99}{Do\v{s}en 99}). \hypertarget{connection_to_identities}{}\subsection*{{Connection to identities}}\label{connection_to_identities} In the analogy between the composition and the cut rule, the analogue of [[identity morphisms]] (or nullary compositions) is the identity rule \begin{displaymath} A \vdash A \end{displaymath} Typically, a cut-elimination algorithm goes hand-in-hand with an algorithm which eliminates the identity rule, or rather which pushes back identities as far as possible, down to identities for basic propositional variables (so for example, $p \wedge q \vdash p \wedge q$ may be proved using $p \vdash p$ and $q \vdash q$, in addition to the rules for $\wedge$, but $p \vdash p$ itself must be adopted as an axiom). In fact, there is a sense in which elimination of cuts is seen as \emph{dual} to elimination of identities, analogous to the sense in which [[beta reduction]] is seen as dual to [[eta expansion]]. Very typically, a normalization scheme on terms first applies eta expansions are far as they will go, and then applies beta reductions as far as they will go, so as to at last reach a normal form. The same goes for rewrite systems on sequent deductions, which first eliminate identities, then eliminate cuts. \hypertarget{examples_of_elimination_steps}{}\subsection*{{Examples of elimination steps}}\label{examples_of_elimination_steps} The conversion \begin{displaymath} \frac{\displaystyle \frac{\Gamma, A \vdash B, \Delta}{\Gamma \vdash A \multimap B,\Delta} \;\;\; \frac{\Pi_1 \vdash A,\Lambda_1 \;\;\; \Pi_2,B \vdash \Lambda_2}{\Pi_2,\Pi_1, A\multimap B \vdash \Lambda_2,\Lambda_1}}{\Pi_2,\Pi_1, \Gamma \vdash \Lambda_2,\Lambda_1, \Delta} \quad\to\quad \frac{\displaystyle \frac{\Gamma, A \vdash B, \Delta \;\;\; \Pi_1 \vdash A,\Lambda_1}{\Pi_1,\Gamma \vdash \Lambda_1,B,\Delta} \;\;\; \Pi_2,B \vdash \Lambda_2}{\Pi_2,\Pi_1, \Gamma \vdash \Lambda_2, \Lambda_1,\Delta} \end{displaymath} replaces a single cut on the formula $A \multimap B$ with a pair of cuts on the formulas $A$ and $B$, in the process eliminating the use of the logical rules ${\multimap}R$ and ${\multimap}L$. Although this step replaces one cut by two, the cuts have been in effect pushed up the proof tree, to formulas of lower complexity. Cuts are finally eliminated when they have been pushed all the way up to identity axioms on propositional variables, by applying conversions of type \begin{displaymath} \frac{\displaystyle \frac{}{x \vdash x}\; axiom \;\;\;\; \frac{}{x \vdash x}\; axiom}{x \vdash x} \; cut \quad\to\quad \frac{}{x \vdash x}\; axiom. \end{displaymath} Likewise, the conversion \begin{displaymath} A \wedge B \vdash A \wedge B \quad\to\quad \frac { \displaystyle \frac { A \vdash A } { A \wedge B \vdash A } \;\;\; \frac { B \vdash B } { A \wedge B \vdash B} } { A \wedge B \vdash A \wedge B } \end{displaymath} reconstructs the identity on $A \wedge B$ from identities on $A$ and on $B$, by first applying the ${\wedge}R$ rule followed by the two ${\wedge}L$ rules (reading the derivation on the right bottom-up). (Compare these two conversions arising from cut- and identity-elimination to the lambda calculus conversions $(\lambda x.t_1) t_2 \to t_2[t_1/x]$ and $t \to \langle\pi_1t,\pi_2 t\rangle$, i.e., a $\beta$ reduction and an $\eta$ expansion respectively.) \hypertarget{alternative_forms_of_the_cut_rule}{}\subsection*{{Alternative forms of the cut rule}}\label{alternative_forms_of_the_cut_rule} In [[linear logic]] (for instance), one sometimes sees sequents written in one-sided form: \begin{displaymath} \; \vdash \Gamma. \end{displaymath} Here the [[negation]] operator is used to mediate between classical two-sided sequents and one-sided sequents, according to a scheme where a sequent $\Gamma, A \vdash \Delta$ is associated with a sequent $\Gamma \vdash \Delta, \neg A$ (each being derivable from the other). Thus one can contemplate sequents where all formulae have been pushed to the right of the entailment symbol $\vdash$. For such one-sided sequents, say in multiplicative linear logic, the cut rule may be expressed in the form \begin{displaymath} \frac{\vdash \Gamma, \neg A \;\;\; \vdash \Delta, A}{\vdash \Gamma, \Delta} \; cut \end{displaymath} and this rule is `dual' to one which introduces an identity: \begin{displaymath} \frac{}{\vdash \neg A, A} \; identity. \end{displaymath} Categorically, the cut rule in this form corresponds to the arrow $\neg A \otimes A \to \bot$ that implements an evaluation, and the identity rule corresponds to an arrow $\top \to \neg A \wp A = A \multimap A$ that names an identity morphism. These two arrows are de Morgan dual to one another. \hypertarget{references}{}\subsection*{{References}}\label{references} \begin{itemize}% \item Wikipedia, \emph{\href{http://en.wikipedia.org/wiki/Cut-elimination_theorem}{Cut-elimination theorem}} \item Kosta Do\v{s}en, \emph{Cut Elimination in Categories} Dordrecht: Springer Netherlands, 1999. \end{itemize} [[!redirects cut-elimination]] [[!redirects cut elimination]] [[!redirects cut-elimination theorem]] [[!redirects cut elimination theorem]] [[!redirects cut-admissibility]] [[!redirects cut admissibility]] [[!redirects Gentzen's Hauptsatz]] [[!redirects Gentzen Hauptsatz]] \end{document}