\documentclass[12pt,titlepage]{article} \usepackage{amsmath} \usepackage{mathrsfs} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{amsthm} \usepackage{mathtools} \usepackage{graphicx} \usepackage{color} \usepackage{ucs} \usepackage[utf8x]{inputenc} \usepackage{xparse} \usepackage{hyperref} %----Macros---------- % % Unresolved issues: % % \righttoleftarrow % \lefttorightarrow % % \color{} with HTML colorspec % \bgcolor % \array with options (without options, it's equivalent to the matrix environment) % Of the standard HTML named colors, white, black, red, green, blue and yellow % are predefined in the color package. Here are the rest. \definecolor{aqua}{rgb}{0, 1.0, 1.0} \definecolor{fuschia}{rgb}{1.0, 0, 1.0} \definecolor{gray}{rgb}{0.502, 0.502, 0.502} \definecolor{lime}{rgb}{0, 1.0, 0} \definecolor{maroon}{rgb}{0.502, 0, 0} \definecolor{navy}{rgb}{0, 0, 0.502} \definecolor{olive}{rgb}{0.502, 0.502, 0} \definecolor{purple}{rgb}{0.502, 0, 0.502} \definecolor{silver}{rgb}{0.753, 0.753, 0.753} \definecolor{teal}{rgb}{0, 0.502, 0.502} % Because of conflicts, \space and \mathop are converted to % \itexspace and \operatorname during preprocessing. % itex: \space{ht}{dp}{wd} % % Height and baseline depth measurements are in units of tenths of an ex while % the width is measured in tenths of an em. \makeatletter \newdimen\itex@wd% \newdimen\itex@dp% \newdimen\itex@thd% \def\itexspace#1#2#3{\itex@wd=#3em% \itex@wd=0.1\itex@wd% \itex@dp=#2ex% \itex@dp=0.1\itex@dp% \itex@thd=#1ex% \itex@thd=0.1\itex@thd% \advance\itex@thd\the\itex@dp% \makebox[\the\itex@wd]{\rule[-\the\itex@dp]{0cm}{\the\itex@thd}}} \makeatother % \tensor and \multiscript \makeatletter \newif\if@sup \newtoks\@sups \def\append@sup#1{\edef\act{\noexpand\@sups={\the\@sups #1}}\act}% \def\reset@sup{\@supfalse\@sups={}}% \def\mk@scripts#1#2{\if #2/ \if@sup ^{\the\@sups}\fi \else% \ifx #1_ \if@sup ^{\the\@sups}\reset@sup \fi {}_{#2}% \else \append@sup#2 \@suptrue \fi% \expandafter\mk@scripts\fi} \def\tensor#1#2{\reset@sup#1\mk@scripts#2_/} \def\multiscripts#1#2#3{\reset@sup{}\mk@scripts#1_/#2% \reset@sup\mk@scripts#3_/} \makeatother % \slash \makeatletter \newbox\slashbox \setbox\slashbox=\hbox{$/$} \def\itex@pslash#1{\setbox\@tempboxa=\hbox{$#1$} \@tempdima=0.5\wd\slashbox \advance\@tempdima 0.5\wd\@tempboxa \copy\slashbox \kern-\@tempdima \box\@tempboxa} \def\slash{\protect\itex@pslash} \makeatother % math-mode versions of \rlap, etc % from Alexander Perlis, "A complement to \smash, \llap, and lap" % http://math.arizona.edu/~aprl/publications/mathclap/ \def\clap#1{\hbox to 0pt{\hss#1\hss}} \def\mathllap{\mathpalette\mathllapinternal} \def\mathrlap{\mathpalette\mathrlapinternal} \def\mathclap{\mathpalette\mathclapinternal} \def\mathllapinternal#1#2{\llap{$\mathsurround=0pt#1{#2}$}} \def\mathrlapinternal#1#2{\rlap{$\mathsurround=0pt#1{#2}$}} \def\mathclapinternal#1#2{\clap{$\mathsurround=0pt#1{#2}$}} % Renames \sqrt as \oldsqrt and redefine root to result in \sqrt[#1]{#2} \let\oldroot\root \def\root#1#2{\oldroot #1 \of{#2}} \renewcommand{\sqrt}[2][]{\oldroot #1 \of{#2}} % Manually declare the txfonts symbolsC font \DeclareSymbolFont{symbolsC}{U}{txsyc}{m}{n} \SetSymbolFont{symbolsC}{bold}{U}{txsyc}{bx}{n} \DeclareFontSubstitution{U}{txsyc}{m}{n} % Manually declare the stmaryrd font \DeclareSymbolFont{stmry}{U}{stmry}{m}{n} \SetSymbolFont{stmry}{bold}{U}{stmry}{b}{n} % Manually declare the MnSymbolE font \DeclareFontFamily{OMX}{MnSymbolE}{} \DeclareSymbolFont{mnomx}{OMX}{MnSymbolE}{m}{n} \SetSymbolFont{mnomx}{bold}{OMX}{MnSymbolE}{b}{n} \DeclareFontShape{OMX}{MnSymbolE}{m}{n}{ <-6> MnSymbolE5 <6-7> MnSymbolE6 <7-8> MnSymbolE7 <8-9> MnSymbolE8 <9-10> MnSymbolE9 <10-12> MnSymbolE10 <12-> MnSymbolE12}{} % Declare specific arrows from txfonts without loading the full package \makeatletter \def\re@DeclareMathSymbol#1#2#3#4{% \let#1=\undefined \DeclareMathSymbol{#1}{#2}{#3}{#4}} \re@DeclareMathSymbol{\neArrow}{\mathrel}{symbolsC}{116} \re@DeclareMathSymbol{\neArr}{\mathrel}{symbolsC}{116} \re@DeclareMathSymbol{\seArrow}{\mathrel}{symbolsC}{117} \re@DeclareMathSymbol{\seArr}{\mathrel}{symbolsC}{117} \re@DeclareMathSymbol{\nwArrow}{\mathrel}{symbolsC}{118} \re@DeclareMathSymbol{\nwArr}{\mathrel}{symbolsC}{118} \re@DeclareMathSymbol{\swArrow}{\mathrel}{symbolsC}{119} \re@DeclareMathSymbol{\swArr}{\mathrel}{symbolsC}{119} \re@DeclareMathSymbol{\nequiv}{\mathrel}{symbolsC}{46} \re@DeclareMathSymbol{\Perp}{\mathrel}{symbolsC}{121} \re@DeclareMathSymbol{\Vbar}{\mathrel}{symbolsC}{121} \re@DeclareMathSymbol{\sslash}{\mathrel}{stmry}{12} \re@DeclareMathSymbol{\bigsqcap}{\mathop}{stmry}{"64} \re@DeclareMathSymbol{\biginterleave}{\mathop}{stmry}{"6} \re@DeclareMathSymbol{\invamp}{\mathrel}{symbolsC}{77} \re@DeclareMathSymbol{\parr}{\mathrel}{symbolsC}{77} \makeatother % \llangle, \rrangle, \lmoustache and \rmoustache from MnSymbolE \makeatletter \def\Decl@Mn@Delim#1#2#3#4{% \if\relax\noexpand#1% \let#1\undefined \fi \DeclareMathDelimiter{#1}{#2}{#3}{#4}{#3}{#4}} \def\Decl@Mn@Open#1#2#3{\Decl@Mn@Delim{#1}{\mathopen}{#2}{#3}} \def\Decl@Mn@Close#1#2#3{\Decl@Mn@Delim{#1}{\mathclose}{#2}{#3}} \Decl@Mn@Open{\llangle}{mnomx}{'164} \Decl@Mn@Close{\rrangle}{mnomx}{'171} \Decl@Mn@Open{\lmoustache}{mnomx}{'245} \Decl@Mn@Close{\rmoustache}{mnomx}{'244} \makeatother % Widecheck \makeatletter \DeclareRobustCommand\widecheck[1]{{\mathpalette\@widecheck{#1}}} \def\@widecheck#1#2{% \setbox\z@\hbox{\m@th$#1#2$}% \setbox\tw@\hbox{\m@th$#1% \widehat{% \vrule\@width\z@\@height\ht\z@ \vrule\@height\z@\@width\wd\z@}$}% \dp\tw@-\ht\z@ \@tempdima\ht\z@ \advance\@tempdima2\ht\tw@ \divide\@tempdima\thr@@ \setbox\tw@\hbox{% \raise\@tempdima\hbox{\scalebox{1}[-1]{\lower\@tempdima\box \tw@}}}% {\ooalign{\box\tw@ \cr \box\z@}}} \makeatother % \mathraisebox{voffset}[height][depth]{something} \makeatletter \NewDocumentCommand\mathraisebox{moom}{% \IfNoValueTF{#2}{\def\@temp##1##2{\raisebox{#1}{$\m@th##1##2$}}}{% \IfNoValueTF{#3}{\def\@temp##1##2{\raisebox{#1}[#2]{$\m@th##1##2$}}% }{\def\@temp##1##2{\raisebox{#1}[#2][#3]{$\m@th##1##2$}}}}% \mathpalette\@temp{#4}} \makeatletter % udots (taken from yhmath) \makeatletter \def\udots{\mathinner{\mkern2mu\raise\p@\hbox{.} \mkern2mu\raise4\p@\hbox{.}\mkern1mu \raise7\p@\vbox{\kern7\p@\hbox{.}}\mkern1mu}} \makeatother %% Fix array \newcommand{\itexarray}[1]{\begin{matrix}#1\end{matrix}} %% \itexnum is a noop \newcommand{\itexnum}[1]{#1} %% Renaming existing commands \newcommand{\underoverset}[3]{\underset{#1}{\overset{#2}{#3}}} \newcommand{\widevec}{\overrightarrow} \newcommand{\darr}{\downarrow} \newcommand{\nearr}{\nearrow} \newcommand{\nwarr}{\nwarrow} \newcommand{\searr}{\searrow} \newcommand{\swarr}{\swarrow} \newcommand{\curvearrowbotright}{\curvearrowright} \newcommand{\uparr}{\uparrow} \newcommand{\downuparrow}{\updownarrow} \newcommand{\duparr}{\updownarrow} \newcommand{\updarr}{\updownarrow} \newcommand{\gt}{>} \newcommand{\lt}{<} \newcommand{\map}{\mapsto} \newcommand{\embedsin}{\hookrightarrow} \newcommand{\Alpha}{A} \newcommand{\Beta}{B} \newcommand{\Zeta}{Z} \newcommand{\Eta}{H} \newcommand{\Iota}{I} \newcommand{\Kappa}{K} \newcommand{\Mu}{M} \newcommand{\Nu}{N} \newcommand{\Rho}{P} \newcommand{\Tau}{T} \newcommand{\Upsi}{\Upsilon} \newcommand{\omicron}{o} \newcommand{\lang}{\langle} \newcommand{\rang}{\rangle} \newcommand{\Union}{\bigcup} \newcommand{\Intersection}{\bigcap} \newcommand{\Oplus}{\bigoplus} \newcommand{\Otimes}{\bigotimes} \newcommand{\Wedge}{\bigwedge} \newcommand{\Vee}{\bigvee} \newcommand{\coproduct}{\coprod} \newcommand{\product}{\prod} \newcommand{\closure}{\overline} \newcommand{\integral}{\int} \newcommand{\doubleintegral}{\iint} \newcommand{\tripleintegral}{\iiint} \newcommand{\quadrupleintegral}{\iiiint} \newcommand{\conint}{\oint} \newcommand{\contourintegral}{\oint} \newcommand{\infinity}{\infty} \newcommand{\bottom}{\bot} \newcommand{\minusb}{\boxminus} \newcommand{\plusb}{\boxplus} \newcommand{\timesb}{\boxtimes} \newcommand{\intersection}{\cap} \newcommand{\union}{\cup} \newcommand{\Del}{\nabla} \newcommand{\odash}{\circleddash} \newcommand{\negspace}{\!} \newcommand{\widebar}{\overline} \newcommand{\textsize}{\normalsize} \renewcommand{\scriptsize}{\scriptstyle} \newcommand{\scriptscriptsize}{\scriptscriptstyle} \newcommand{\mathfr}{\mathfrak} \newcommand{\statusline}[2]{#2} \newcommand{\tooltip}[2]{#2} \newcommand{\toggle}[2]{#2} % Theorem Environments \theoremstyle{plain} \newtheorem{theorem}{Theorem} \newtheorem{lemma}{Lemma} \newtheorem{prop}{Proposition} \newtheorem{cor}{Corollary} \newtheorem*{utheorem}{Theorem} \newtheorem*{ulemma}{Lemma} \newtheorem*{uprop}{Proposition} \newtheorem*{ucor}{Corollary} \theoremstyle{definition} \newtheorem{defn}{Definition} \newtheorem{example}{Example} \newtheorem*{udefn}{Definition} \newtheorem*{uexample}{Example} \theoremstyle{remark} \newtheorem{remark}{Remark} \newtheorem{note}{Note} \newtheorem*{uremark}{Remark} \newtheorem*{unote}{Note} %------------------------------------------------------------------- \begin{document} %------------------------------------------------------------------- \section*{sequent calculus} \hypertarget{context}{}\subsubsection*{{Context}}\label{context} \hypertarget{type_theory}{}\paragraph*{{Type theory}}\label{type_theory} [[!include type theory - contents]] \hypertarget{sequent_calculus}{}\section*{{Sequent calculus}}\label{sequent_calculus} \noindent\hyperlink{idea}{Idea}\dotfill \pageref*{idea} \linebreak \noindent\hyperlink{definitions}{Definitions}\dotfill \pageref*{definitions} \linebreak \noindent\hyperlink{rules_of_proof}{Rules of proof}\dotfill \pageref*{rules_of_proof} \linebreak \noindent\hyperlink{subformulaprop}{Cut-free proofs}\dotfill \pageref*{subformulaprop} \linebreak \noindent\hyperlink{related_entries}{Related entries}\dotfill \pageref*{related_entries} \linebreak \noindent\hyperlink{references}{References}\dotfill \pageref*{references} \linebreak \hypertarget{idea}{}\subsection*{{Idea}}\label{idea} A sequent calculus is a [[deductive system]] for [[predicate logic]] (or something similar) in which the basic [[judgment]] is a [[sequent]] \begin{displaymath} \phi_1, \phi_2, \ldots \vdash \psi_1, \psi_2, \ldots , \end{displaymath} expressing the idea that some statement $\psi_i$ must be true if every statement $\phi_i$ is true. The original sequent calculi were developed by [[Gerhard Gentzen]] in 1933 building on anticipatory ideas by [[Paul Hertz]]. A central property of sequent calculi, which distinguishes them from systems of [[natural deduction]], is that it allows an easier analysis of proofs: the \hyperlink{subformulaprop}{subformula property} that it enjoys allows easy [[induction]] over proof-steps. The reason is roughly that, using the language of [[natural deduction]], in sequent calculus ``every rule is an [[introduction rule]]'' which introduces a term on \emph{either} side of a [[sequent]] with no [[elimination rules]]. This means that working backward every ``un-application'' of such a rule makes the sequent necessarily simpler. \hypertarget{definitions}{}\subsection*{{Definitions}}\label{definitions} We will start with an arbitrary [[signature (in logic)|signature]] $\Sigma$; the simplest case (for [[propositional logic]]) uses the empty signature (with no [[types]]). We will assume unlimited [[variables]] for [[propositions]] and unlimited variables for each type. \begin{remark} \label{}\hypertarget{}{} We will be defining several terms below; but keep in mind that these are definitions \emph{for sequent calculus}, since these terms may also be used more generally. \end{remark} \begin{defn} \label{}\hypertarget{}{} A \textbf{[[context]]} over $\Sigma$ is a (finite) [[list]] $\vec{x}\colon \vec{T}$ or \begin{displaymath} x_1\colon T_1, x_2\colon T_2, \ldots , \end{displaymath} where the $x_i$ are [[variables]] and the $T_i$ are (formulas for) [[types]] over $\Sigma$. \end{defn} For the empty signature, the only context is empty; if $\Sigma$ consists of a single type, then a context is effectively a list of variables. \begin{defn} \label{}\hypertarget{}{} Given a context $\Gamma$ over $\Sigma$, a \textbf{cedent} in $\Gamma$ over $\Sigma$ is simply a [[list]] $\vec{\phi}$ or \begin{displaymath} \phi_1, \phi_2, \ldots \end{displaymath} of (formulas for) [[propositions]] in $\Gamma$ over $\Sigma$. \end{defn} (Recall that a proposition in the context $\Gamma$ allows the variables introduced by $\Gamma$, but no others, to be free variables. Such a proposition in context is also called a [[predicate]].) \begin{defn} \label{}\hypertarget{}{} A \textbf{sequent} over $\Sigma$ consists of a context $\Gamma$ over $\Sigma$ and two cedents in $\Gamma$, called the \textbf{[[antecedent]]} and the \textbf{[[succedent]]}. This is written \begin{displaymath} \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi} , \end{displaymath} where $\vec{x}\colon \vec{T}$ is the context $\Gamma$, with the antecedent on the left and the succedent on the right. \end{defn} \begin{remark} \label{}\hypertarget{}{} If $\Sigma$ has a single type (a so-called `untyped' signature) and one tacitly assumes that the single type is [[inhabited type|inhabited]], then one may (up to equivalence in a certain sense) reconstruct the context of any sequent from the cedents, by examining which variables appear; this is even possible if $\Sigma$ has more than one type (all assumed inhabited), although more difficult if certain abuses of notation are used. Then the context can be ignored. This is not really the right way to do things, but (especially for untyped signatures) it is traditional. Confusingly, the cedents are sometimes also called `contexts'; then one speaks of the \textbf{left context} (the antecedent), the \textbf{right context} (the succedent). This is most easily done when ingoring what we have been calling the context; I'm not even sure what term would then be used for that context (maybe the \textbf{type context}?). See also also the \href{sequent#Gentzen2MartinLof}{interpretation of sequents as hypothetical judgements} (which I hope to write soon), which shows how the type context, antecedent, and succedent are all used to form the context of a related [[hypothetical judgement]]. One also sometimes uses `antecedent' or `succedent' for an \emph{individual} proposition on the relevant side of the sequent; see also the use of `consequent' in \hyperlink{minimaletc}{minimal sequents} below. \end{remark} \begin{defn} \label{}\hypertarget{}{} A \textbf{[[theory]]} over $\Sigma$ is an arbitrary [[subset|set]] of sequents over $\Sigma$, called the \textbf{[[axioms]]} of the theory. \end{defn} We will explain below how the sequent calculus may be used to prove [[theorems]] from the axioms; each theorem will also be a sequent. We may also consider particular kinds of sequents or theories, by form or content. \begin{defn} \label{minimaletc}\hypertarget{minimaletc}{} A \textbf{closed sequent} is one in which the context is empty; a \textbf{closed theory} is one in which every sequent is closed. An \textbf{intuitionistic sequent} is one in which the succedent consists of at most one proposition; an \textbf{intuitionistic theory} is one in which every sequent is intuitionistic. A \textbf{minimal sequent} is one in which the succedent consists of exactly one proposition (then called the \textbf{[[consequent]]}); a \textbf{minimal theory} is one in which every sequent is minimal. A \textbf{dual-intuitionistic sequent} is one in which the antecedent consists of at most one proposition; a \textbf{dual-intuitionistic theory} is one in which every sequent is dual-intuitionistic. A \textbf{dual-minimal sequent} is one in which the antecedent consists of exactly one proposition; a \textbf{dual-minimal theory} is one in which every sequent is dual-minimal. A \textbf{classical sequent} is simply a sequent, emphasising that we are not restricting to (dual)-intuitionistic or (dual)-minimal sequents; similarly, a \textbf{classical theory} is any theory. \end{defn} These (except the first) are so-called because of their relationship to [[intuitionistic logic]], [[minimal logic]], [[classical logic]], etc. Any of these [[logics]] may be presented using any sort of sequent, but Gentzen's original sequent calculi presented each logic using only corresponding sequents. \begin{defn} \label{}\hypertarget{}{} A \textbf{regular sequent} is one in which every proposition is given by a formula in [[regular logic]]; a \textbf{regular theory} is one in which every sequent is regular. A \textbf{coherent sequent} is one in which every proposition is given by a formula in [[coherent logic]]; a \textbf{coherent theory} is one in which every sequent is coherent. \end{defn} Obviously, this list could be continued. \hypertarget{rules_of_proof}{}\subsection*{{Rules of proof}}\label{rules_of_proof} The basic building blocks of a proof in sequent calculus are \textbf{[[rule of inference|inference rules]]} or \textbf{rewrite rules}, which are rules for inferring the validity of certain sequents from other sequents. One writes \begin{displaymath} \frac{\vec{\sigma}}{\tau} \end{displaymath} where $\vec{\sigma}$ is a (possibly empty) [[list]] of sequents and $\tau$ is a single sequent, to indicate that from the validity of the sequents in $\vec{\sigma}$ that of $\tau$ may be inferred, or that $\vec{\sigma}$ may be \emph{rewritten} to $\tau$. A \textbf{[[deduction|derivation]]} or \textbf{proof tree} is a compound of rewrite rules, such as \begin{displaymath} \frac{\frac{\sigma_1 \;\; \sigma_2}{\sigma_3} \;\; \sigma_4 }{\sigma_5} \,. \end{displaymath} We will give the rules in several classes. \begin{defn} \label{}\hypertarget{}{} The \textbf{[[identity rule]]} is \begin{displaymath} \frac{}{\alpha \vdash_{\vec{x}\colon \vec{T}} \alpha} \,, \end{displaymath} stating that, in any context $\Gamma$, any proposition in $\Gamma$ follows from itself, always. The \textbf{[[substitution rule]]} is \begin{displaymath} \frac{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi} }{ \vec{\phi}[\vec{s}/\vec{x}] \vdash_{\vec{y}\colon \vec{U}} \psi[\vec{s}/\vec{x}] } \,, \end{displaymath} or \begin{displaymath} \frac{ \vec{\phi} \vdash_{\Gamma} \vec{\psi} }{ a^*\vec{\phi} \vdash_{\Delta} a^*\vec{\psi} } \,, \end{displaymath} where $a$ is any [[interpretation]] of $\Gamma$ in $\Delta$. Explicitly, such an interpretation is a list $\vec{s}$ of [[terms]] (of the same length as the list which is the context $\Gamma$), where each term $s_i$ is a term over $\Sigma$ of type $T_i$ in the context $\Delta$. Then $a^*\phi$, or $\phi[\vec{s}/\vec{x}]$, is obtained from $\phi$ by substituting each $s_i$ for the corresponding $x_i$, and $a^*\vec{\phi}$ (and $a^*\vec{\psi}$) are obtained by applying this substitution to every proposition in the list. Of course, this rule is vacuous if $\Sigma$ has no terms. The \textbf{[[cut rule]]} is \begin{displaymath} \frac{ (\vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, \alpha) \;\;\; (\alpha, \vec{\chi} \vdash_{\vec{x}\colon \vec{T}} \vec{\omega}) }{ \vec{\phi}, \vec{\chi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, \vec{\omega} }\,; \end{displaymath} the proposition $\alpha$ has been `cut'. The \textbf{[[exchange rules]]} are \begin{displaymath} \frac{ \vec{\phi}, \alpha, \beta, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi}, \vec{\omega} }{ \vec{\phi}, \beta, \alpha, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi}, \vec{\omega} } \, \end{displaymath} and \begin{displaymath} \frac{ \vec{\phi}, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi}, \alpha, \beta, \vec{\omega} }{ \vec{\phi}, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi}, \beta, \alpha, \vec{\omega} } \,; \end{displaymath} more generally, we may apply any [[permutation]] of the antecedent and succedent (because every permutation is a [[composite]] of [[transposition]]s). The \textbf{[[weakening rules]]} are \begin{displaymath} \frac{ \vec{\phi}, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi} }{ \vec{\phi}, \alpha, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi} } \, \end{displaymath} and \begin{displaymath} \frac{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, \vec{\chi} }{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, \alpha, \vec{\chi} } \,; \end{displaymath} more generally, we may insert any sequence of propositions anywhere in the antecedent and succedent. The \textbf{[[contraction rules]]} are \begin{displaymath} \frac{ \vec{\phi}, \alpha, \alpha, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi} }{ \vec{\phi}, \alpha, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi} } \end{displaymath} and \begin{displaymath} \frac{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, \alpha, \alpha, \vec{\chi} }{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, \alpha, \vec{\chi} } \,; \end{displaymath} in the absence of the exchange rule, it is important that we can remove duplications only one proposition at a time (as shown here) and only if they are adjacent (as shown here). \end{defn} Some or all of these rules may be dropped in a [[substructural logic]]. However, even so, the first three rules can generally be proved (except for the identity rule for atomic propositions); see [[cut elimination]]. \begin{defn} \label{}\hypertarget{}{} The \textbf{[[truth]] rule} is \begin{displaymath} \frac{ }{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, \top, \vec{\chi} } \,; \end{displaymath} that is, any sequent is valid if the necessarily true statement is in the succedent. [[de Morgan duality|Dually]], the \textbf{[[falsehood]] rule} is \begin{displaymath} \frac{ }{ \vec{\phi}, \bot, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi} } \,; \end{displaymath} that is, any sequent is valid if the necessarily false statement is in the antecedent. The \textbf{binary [[conjunction]] rules} are \begin{displaymath} \frac{ \vec{\phi}, \alpha, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi} }{ \vec{\phi}, \alpha \wedge \beta, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi} } \, \end{displaymath} and \begin{displaymath} \frac{ \vec{\phi}, \beta, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi} }{ \vec{\phi}, \alpha \wedge \beta, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi} } \, \end{displaymath} on the left and \begin{displaymath} \frac{ (\vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, \alpha, \vec{\chi}) \;\;\; (\vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, \beta, \vec{\chi}) }{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, \alpha \wedge \beta, \vec{\chi} } \, \end{displaymath} on the right. Dually, the \textbf{binary [[disjunction]] rules} are \begin{displaymath} \frac{ (\vec{\phi}, \alpha, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi}) \;\;\; (\vec{\phi}, \beta, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi}) }{ \vec{\phi}, \alpha \vee \beta, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi} } \, \end{displaymath} on the left and \begin{displaymath} \frac{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, \alpha, \vec{\chi} }{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, \alpha \vee \beta, \vec{\chi} } \, \end{displaymath} and \begin{displaymath} \frac{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, \beta, \vec{\chi} }{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, \alpha \vee \beta, \vec{\chi} } \, \end{displaymath} on the right. The \textbf{[[negation]] rules} are \begin{displaymath} \frac{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \alpha, \vec{\psi} }{ \vec{\phi}, \neg{\alpha} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi} } \, \end{displaymath} and \begin{displaymath} \frac{ \vec{\phi}, \alpha \vdash_{\vec{x}\colon \vec{T}} \vec{\psi} }{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \neg{\alpha}, \vec{\psi} } \,; \end{displaymath} in other words, a proposition may be moved to the other side if it is replaced by its negation. The \textbf{[[conditional]] rules} are \begin{displaymath} \frac{ (\vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \alpha) \;\;\; (\beta \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}) }{ \vec{\phi}, \alpha \to \beta \vdash_{\vec{x}\colon \vec{T}} \vec{\psi} } \, \end{displaymath} and \begin{displaymath} \frac{ \vec{\phi}, \alpha \vdash_{\vec{x}\colon \vec{T}} \beta, \vec{\psi} }{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \alpha \to \beta, \vec{\psi} } \,. \end{displaymath} Dually, the \textbf{[[relative complement]] rules} are \begin{displaymath} \frac{ \vec{\phi}, \alpha \vdash_{\vec{x}\colon \vec{T}} \beta, \vec{\psi} }{ \vec{\phi}, \alpha \setminus \beta \vdash_{\vec{x}\colon \vec{T}} \vec{\psi} } \, \end{displaymath} and \begin{displaymath} \frac{ (\vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \alpha) \;\;\; (\beta \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}) }{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \alpha \setminus \beta, \vec{\psi} } \,. \end{displaymath} The \textbf{[[equality]] rules} are \begin{displaymath} \frac{ (\vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \alpha[s/y]) \;\;\; (\alpha[t/y] \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}) }{ \vec{\phi}, (s = t) \vdash_{\vec{x}\colon \vec{T}} \vec{\psi} } \,, \end{displaymath} where $s$ and $t$ are any [[terms]] of the same [[type]] $U$ in the context $\vec{x}\colon \vec{T}$ and $\chi$ is a proposition in the context $(\vec{x}\colon \vec{T}, y\colon U)$, and \begin{displaymath} \frac{ }{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, (s = s) } \,, \end{displaymath} where $s$ is any [[term]] of any type in the context $\vec{x}\colon \vec{T}$. One could (but rarely does) introduce dual [[apartness]] rules. The \textbf{[[universal quantification]] rules} are \begin{displaymath} \frac{ \vec{\phi}, \alpha[s/y], \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi} }{ \vec{\phi}, \forall(y\colon U) \alpha, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi} } \,, \end{displaymath} where $s$ is a term of type $U$ in the context $\vec{x}\colon \vec{T}$ and $\alpha$ is a proposition in the [[extended context]] $(\vec{x}\colon \vec{T}, y\colon U)$, and \begin{displaymath} \frac{ \vec{\phi}[\hat{y}] \vdash_{\vec{x}\colon \vec{T}, y\colon U} \vec{\psi}[\hat{y}], \alpha, \vec{\chi}[\hat{y}] }{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, \forall(y\colon U) \alpha, \vec{\chi} } \,, \end{displaymath} where $\phi[\hat{y}]$ is the proposition in the context $(\vec{x}\colon \vec{T}, y\colon U)$ produced by [[weakening rule|weakening]] (in the type context) the proposition $\phi$ in the context $\vec{x}\colon \vec{T}$ (and similarly for a list of propositions). Dually, the \textbf{[[existential quantification]] rules} are \begin{displaymath} \frac{ \vec{\phi}[\hat{y}], \alpha, \vec{\psi}[\hat{y}] \vdash_{\vec{x}\colon \vec{T}, y\colon U} \vec{\chi}[\hat{y}] }{ \vec{\phi}, \exists(y\colon U) \alpha, \vec{\psi} \vdash_{\vec{x}\colon \vec{T}} \vec{\chi} } \, \end{displaymath} and \begin{displaymath} \frac{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, \alpha[s/y], \vec{\chi} }{ \vec{\phi} \vdash_{\vec{x}\colon \vec{T}} \vec{\psi}, \exists(y\colon U) \alpha, \vec{\chi} } \,. \end{displaymath} \end{defn} We have written the rules \emph{additively}, which works best when using (dual)-intuitionistic or (dual)-minimal sequents, but it is also possible to write them in a slightly different \emph{multiplicative} manner. This makes a difference if some of the [[weakening rule]] and [[contraction rule]] are abandoned; [[linear logic]] uses both rules but for different connectives. Similarly, we have written the rules to apply as much as possible without the [[exchange rule]], but the remaining asymmetries (in the rules for $\neg$, $\to$, $\setminus$, and $=$) mean that these diverge into left and right operations in [[noncommutative logic]]. (Compare the difference between [[left duals]] and [[right duals]] in a non-[[symmetric monoidal category|symmetric]] [[monoidal category]].) Gentzen originally did not include $\top$ or $\bot$, but if $\top$ is defined as $\alpha \to \alpha$ for any atomic $\alpha$ and $\bot$ is defined as $\neg{\top}$, then their rules can be proved. Similarly, he did not use $\setminus$; we may define $\alpha \setminus \beta$ to mean $\alpha \wedge \neg{\beta}$. It would also be possible to leave out $\to$, defining $\alpha \to \beta$ as $\neg{\alpha} \vee \beta$. With or without these optional operations and rules, the resulting logic is [[classical logic|classical]]. If we use only those rules that can be stated using intuitionistic sequents, then the result is [[intuitionistic logic]]; this is again true with or without $\top$, $\bot$, or $\setminus$. However, if we leave out $\to$, then we cannot reconstruct it; the definition of $\to$ using $\neg$ and $\vee$ is not valid intuitionistically. On the other hand, if we include $\bot$ (and optionally $\top$) but leave out $\neg$ and $\setminus$, then we get intuitionistic logic using all (classical) sequents. (Conversely, we could get classical logic using only intuitionistic sequents and adding the law of [[excluded middle]] as an axiom.) If we use only those rules that can be stated using minimal sequents (so necessarily leaving out $\setminus$), then the result is still intuitionistic logic; but if we also leave out $\bot$, then the result is [[minimal logic]]. Dual results hold for dual-intuitionistic and dual-minimal sequents. \hypertarget{subformulaprop}{}\subsection*{{Cut-free proofs}}\label{subformulaprop} The [[cut rule]] expresses the composition of proofs. Gentzen's main result (\hyperlink{Gentzen}{Gentzen, Haupsatz}) is that any derivation that uses the cut rule can be transformed into one that doesn't -- the \emph{[[cut-elimination theorem]]}. This yields a [[normalization]] algorithm for proofs, which provided much of the inspiration behind [[Lambek]]`s approach to categorical logic. Similarly, any derivation that uses the identity rule can be transformed into one that uses it only for atomic propositions (those provided by the signature $\Sigma$ and equality). The most important property of cut-free proofs is that every formula occurring anywhere in a proof is a subformula of a formula contained in the conclusion of the proof (the [[subformula property]]). This makes induction over proof-trees much more straightforward than with [[natural deduction]] or other systems. See [[cut elimination]]. \hypertarget{related_entries}{}\subsection*{{Related entries}}\label{related_entries} \begin{itemize}% \item [[natural deduction]] \item [[cut rule]] \item [[Paul Hertz]] \end{itemize} \hypertarget{references}{}\subsection*{{References}}\label{references} For the basic definition of bi-minimal sequents see for instance def. D1.1.5 of \begin{itemize}% \item [[Peter Johnstone]], \emph{[[Sketches of an Elephant]]} \end{itemize} and section D1.3 for sequent calculus. Sequent calculus was introduced by Gerhard Gentzen in his 1933 thesis \begin{itemize}% \item [[Gerhard Gentzen]] \emph{Untersuchungen \"u{}ber das logische Schlie\ss{}en I} \emph{Mathematische Zeitschrift} 39(1), Springer-Verlag 1935. $<$http://dx.doi.org/10.1007/BF01201353{\tt \symbol{62}}. \end{itemize} as a formal system for studying the properties of proof systems presented in the style of [[natural deduction]] (also introduced by Gentzen). It has since been widely adopted over natural deduction in [[proof theory]]. The article \begin{itemize}% \item Wikipedia, \emph{\href{http://en.wikipedia.org/wiki/Sequent_calculus}{Sequent calculus}} \end{itemize} provides a good summary. So does \begin{itemize}% \item \href{http://sakharov.net/sequent.html}{Sequent Calculus Primer} \end{itemize} In \begin{itemize}% \item St\'e{}phane Lengrand, Roy Dyckhoff, James McKinna, \emph{A Sequent Calculus for Type Theory} (\href{http://hal.inria.fr/hal-00150286/}{inria}, \href{http://138.251.206.45/~rd/publications/42070441.pdf}{pdf}) \end{itemize} is discussion of a sequent calculus for [[type theory]]. [[!redirects sequent calculus]] [[!redirects sequent calculi]] [[!redirects sequent calculuses]] \end{document}