\documentclass[12pt,titlepage]{article} \usepackage{amsmath} \usepackage{mathrsfs} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{amsthm} \usepackage{mathtools} \usepackage{graphicx} \usepackage{color} \usepackage{ucs} \usepackage[utf8x]{inputenc} \usepackage{xparse} \usepackage{hyperref} %----Macros---------- % % Unresolved issues: % % \righttoleftarrow % \lefttorightarrow % % \color{} with HTML colorspec % \bgcolor % \array with options (without options, it's equivalent to the matrix environment) % Of the standard HTML named colors, white, black, red, green, blue and yellow % are predefined in the color package. Here are the rest. \definecolor{aqua}{rgb}{0, 1.0, 1.0} \definecolor{fuschia}{rgb}{1.0, 0, 1.0} \definecolor{gray}{rgb}{0.502, 0.502, 0.502} \definecolor{lime}{rgb}{0, 1.0, 0} \definecolor{maroon}{rgb}{0.502, 0, 0} \definecolor{navy}{rgb}{0, 0, 0.502} \definecolor{olive}{rgb}{0.502, 0.502, 0} \definecolor{purple}{rgb}{0.502, 0, 0.502} \definecolor{silver}{rgb}{0.753, 0.753, 0.753} \definecolor{teal}{rgb}{0, 0.502, 0.502} % Because of conflicts, \space and \mathop are converted to % \itexspace and \operatorname during preprocessing. % itex: \space{ht}{dp}{wd} % % Height and baseline depth measurements are in units of tenths of an ex while % the width is measured in tenths of an em. \makeatletter \newdimen\itex@wd% \newdimen\itex@dp% \newdimen\itex@thd% \def\itexspace#1#2#3{\itex@wd=#3em% \itex@wd=0.1\itex@wd% \itex@dp=#2ex% \itex@dp=0.1\itex@dp% \itex@thd=#1ex% \itex@thd=0.1\itex@thd% \advance\itex@thd\the\itex@dp% \makebox[\the\itex@wd]{\rule[-\the\itex@dp]{0cm}{\the\itex@thd}}} \makeatother % \tensor and \multiscript \makeatletter \newif\if@sup \newtoks\@sups \def\append@sup#1{\edef\act{\noexpand\@sups={\the\@sups #1}}\act}% \def\reset@sup{\@supfalse\@sups={}}% \def\mk@scripts#1#2{\if #2/ \if@sup ^{\the\@sups}\fi \else% \ifx #1_ \if@sup ^{\the\@sups}\reset@sup \fi {}_{#2}% \else \append@sup#2 \@suptrue \fi% \expandafter\mk@scripts\fi} \def\tensor#1#2{\reset@sup#1\mk@scripts#2_/} \def\multiscripts#1#2#3{\reset@sup{}\mk@scripts#1_/#2% \reset@sup\mk@scripts#3_/} \makeatother % \slash \makeatletter \newbox\slashbox \setbox\slashbox=\hbox{$/$} \def\itex@pslash#1{\setbox\@tempboxa=\hbox{$#1$} \@tempdima=0.5\wd\slashbox \advance\@tempdima 0.5\wd\@tempboxa \copy\slashbox \kern-\@tempdima \box\@tempboxa} \def\slash{\protect\itex@pslash} \makeatother % math-mode versions of \rlap, etc % from Alexander Perlis, "A complement to \smash, \llap, and lap" % http://math.arizona.edu/~aprl/publications/mathclap/ \def\clap#1{\hbox to 0pt{\hss#1\hss}} \def\mathllap{\mathpalette\mathllapinternal} \def\mathrlap{\mathpalette\mathrlapinternal} \def\mathclap{\mathpalette\mathclapinternal} \def\mathllapinternal#1#2{\llap{$\mathsurround=0pt#1{#2}$}} \def\mathrlapinternal#1#2{\rlap{$\mathsurround=0pt#1{#2}$}} \def\mathclapinternal#1#2{\clap{$\mathsurround=0pt#1{#2}$}} % Renames \sqrt as \oldsqrt and redefine root to result in \sqrt[#1]{#2} \let\oldroot\root \def\root#1#2{\oldroot #1 \of{#2}} \renewcommand{\sqrt}[2][]{\oldroot #1 \of{#2}} % Manually declare the txfonts symbolsC font \DeclareSymbolFont{symbolsC}{U}{txsyc}{m}{n} \SetSymbolFont{symbolsC}{bold}{U}{txsyc}{bx}{n} \DeclareFontSubstitution{U}{txsyc}{m}{n} % Manually declare the stmaryrd font \DeclareSymbolFont{stmry}{U}{stmry}{m}{n} \SetSymbolFont{stmry}{bold}{U}{stmry}{b}{n} % Manually declare the MnSymbolE font \DeclareFontFamily{OMX}{MnSymbolE}{} \DeclareSymbolFont{mnomx}{OMX}{MnSymbolE}{m}{n} \SetSymbolFont{mnomx}{bold}{OMX}{MnSymbolE}{b}{n} \DeclareFontShape{OMX}{MnSymbolE}{m}{n}{ <-6> MnSymbolE5 <6-7> MnSymbolE6 <7-8> MnSymbolE7 <8-9> MnSymbolE8 <9-10> MnSymbolE9 <10-12> MnSymbolE10 <12-> MnSymbolE12}{} % Declare specific arrows from txfonts without loading the full package \makeatletter \def\re@DeclareMathSymbol#1#2#3#4{% \let#1=\undefined \DeclareMathSymbol{#1}{#2}{#3}{#4}} \re@DeclareMathSymbol{\neArrow}{\mathrel}{symbolsC}{116} \re@DeclareMathSymbol{\neArr}{\mathrel}{symbolsC}{116} \re@DeclareMathSymbol{\seArrow}{\mathrel}{symbolsC}{117} \re@DeclareMathSymbol{\seArr}{\mathrel}{symbolsC}{117} \re@DeclareMathSymbol{\nwArrow}{\mathrel}{symbolsC}{118} \re@DeclareMathSymbol{\nwArr}{\mathrel}{symbolsC}{118} \re@DeclareMathSymbol{\swArrow}{\mathrel}{symbolsC}{119} \re@DeclareMathSymbol{\swArr}{\mathrel}{symbolsC}{119} \re@DeclareMathSymbol{\nequiv}{\mathrel}{symbolsC}{46} \re@DeclareMathSymbol{\Perp}{\mathrel}{symbolsC}{121} \re@DeclareMathSymbol{\Vbar}{\mathrel}{symbolsC}{121} \re@DeclareMathSymbol{\sslash}{\mathrel}{stmry}{12} \re@DeclareMathSymbol{\bigsqcap}{\mathop}{stmry}{"64} \re@DeclareMathSymbol{\biginterleave}{\mathop}{stmry}{"6} \re@DeclareMathSymbol{\invamp}{\mathrel}{symbolsC}{77} \re@DeclareMathSymbol{\parr}{\mathrel}{symbolsC}{77} \makeatother % \llangle, \rrangle, \lmoustache and \rmoustache from MnSymbolE \makeatletter \def\Decl@Mn@Delim#1#2#3#4{% \if\relax\noexpand#1% \let#1\undefined \fi \DeclareMathDelimiter{#1}{#2}{#3}{#4}{#3}{#4}} \def\Decl@Mn@Open#1#2#3{\Decl@Mn@Delim{#1}{\mathopen}{#2}{#3}} \def\Decl@Mn@Close#1#2#3{\Decl@Mn@Delim{#1}{\mathclose}{#2}{#3}} \Decl@Mn@Open{\llangle}{mnomx}{'164} \Decl@Mn@Close{\rrangle}{mnomx}{'171} \Decl@Mn@Open{\lmoustache}{mnomx}{'245} \Decl@Mn@Close{\rmoustache}{mnomx}{'244} \makeatother % Widecheck \makeatletter \DeclareRobustCommand\widecheck[1]{{\mathpalette\@widecheck{#1}}} \def\@widecheck#1#2{% \setbox\z@\hbox{\m@th$#1#2$}% \setbox\tw@\hbox{\m@th$#1% \widehat{% \vrule\@width\z@\@height\ht\z@ \vrule\@height\z@\@width\wd\z@}$}% \dp\tw@-\ht\z@ \@tempdima\ht\z@ \advance\@tempdima2\ht\tw@ \divide\@tempdima\thr@@ \setbox\tw@\hbox{% \raise\@tempdima\hbox{\scalebox{1}[-1]{\lower\@tempdima\box \tw@}}}% {\ooalign{\box\tw@ \cr \box\z@}}} \makeatother % \mathraisebox{voffset}[height][depth]{something} \makeatletter \NewDocumentCommand\mathraisebox{moom}{% \IfNoValueTF{#2}{\def\@temp##1##2{\raisebox{#1}{$\m@th##1##2$}}}{% \IfNoValueTF{#3}{\def\@temp##1##2{\raisebox{#1}[#2]{$\m@th##1##2$}}% }{\def\@temp##1##2{\raisebox{#1}[#2][#3]{$\m@th##1##2$}}}}% \mathpalette\@temp{#4}} \makeatletter % udots (taken from yhmath) \makeatletter \def\udots{\mathinner{\mkern2mu\raise\p@\hbox{.} \mkern2mu\raise4\p@\hbox{.}\mkern1mu \raise7\p@\vbox{\kern7\p@\hbox{.}}\mkern1mu}} \makeatother %% Fix array \newcommand{\itexarray}[1]{\begin{matrix}#1\end{matrix}} %% \itexnum is a noop \newcommand{\itexnum}[1]{#1} %% Renaming existing commands \newcommand{\underoverset}[3]{\underset{#1}{\overset{#2}{#3}}} \newcommand{\widevec}{\overrightarrow} \newcommand{\darr}{\downarrow} \newcommand{\nearr}{\nearrow} \newcommand{\nwarr}{\nwarrow} \newcommand{\searr}{\searrow} \newcommand{\swarr}{\swarrow} \newcommand{\curvearrowbotright}{\curvearrowright} \newcommand{\uparr}{\uparrow} \newcommand{\downuparrow}{\updownarrow} \newcommand{\duparr}{\updownarrow} \newcommand{\updarr}{\updownarrow} \newcommand{\gt}{>} \newcommand{\lt}{<} \newcommand{\map}{\mapsto} \newcommand{\embedsin}{\hookrightarrow} \newcommand{\Alpha}{A} \newcommand{\Beta}{B} \newcommand{\Zeta}{Z} \newcommand{\Eta}{H} \newcommand{\Iota}{I} \newcommand{\Kappa}{K} \newcommand{\Mu}{M} \newcommand{\Nu}{N} \newcommand{\Rho}{P} \newcommand{\Tau}{T} \newcommand{\Upsi}{\Upsilon} \newcommand{\omicron}{o} \newcommand{\lang}{\langle} \newcommand{\rang}{\rangle} \newcommand{\Union}{\bigcup} \newcommand{\Intersection}{\bigcap} \newcommand{\Oplus}{\bigoplus} \newcommand{\Otimes}{\bigotimes} \newcommand{\Wedge}{\bigwedge} \newcommand{\Vee}{\bigvee} \newcommand{\coproduct}{\coprod} \newcommand{\product}{\prod} \newcommand{\closure}{\overline} \newcommand{\integral}{\int} \newcommand{\doubleintegral}{\iint} \newcommand{\tripleintegral}{\iiint} \newcommand{\quadrupleintegral}{\iiiint} \newcommand{\conint}{\oint} \newcommand{\contourintegral}{\oint} \newcommand{\infinity}{\infty} \newcommand{\bottom}{\bot} \newcommand{\minusb}{\boxminus} \newcommand{\plusb}{\boxplus} \newcommand{\timesb}{\boxtimes} \newcommand{\intersection}{\cap} \newcommand{\union}{\cup} \newcommand{\Del}{\nabla} \newcommand{\odash}{\circleddash} \newcommand{\negspace}{\!} \newcommand{\widebar}{\overline} \newcommand{\textsize}{\normalsize} \renewcommand{\scriptsize}{\scriptstyle} \newcommand{\scriptscriptsize}{\scriptscriptstyle} \newcommand{\mathfr}{\mathfrak} \newcommand{\statusline}[2]{#2} \newcommand{\tooltip}[2]{#2} \newcommand{\toggle}[2]{#2} % Theorem Environments \theoremstyle{plain} \newtheorem{theorem}{Theorem} \newtheorem{lemma}{Lemma} \newtheorem{prop}{Proposition} \newtheorem{cor}{Corollary} \newtheorem*{utheorem}{Theorem} \newtheorem*{ulemma}{Lemma} \newtheorem*{uprop}{Proposition} \newtheorem*{ucor}{Corollary} \theoremstyle{definition} \newtheorem{defn}{Definition} \newtheorem{example}{Example} \newtheorem*{udefn}{Definition} \newtheorem*{uexample}{Example} \theoremstyle{remark} \newtheorem{remark}{Remark} \newtheorem{note}{Note} \newtheorem*{uremark}{Remark} \newtheorem*{unote}{Note} %------------------------------------------------------------------- \begin{document} %------------------------------------------------------------------- \section*{linear logic} \hypertarget{context}{}\subsubsection*{{Context}}\label{context} \hypertarget{foundations}{}\paragraph*{{Foundations}}\label{foundations} [[!include foundations - contents]] \hypertarget{category_theory}{}\paragraph*{{$(0,1)$-Category theory}}\label{category_theory} [[!include (0,1)-category theory - contents]] \hypertarget{monoidal_categories}{}\paragraph*{{Monoidal categories}}\label{monoidal_categories} [[!include monoidal categories - contents]] \hypertarget{contents}{}\section*{{Contents}}\label{contents} \noindent\hyperlink{idea}{Idea}\dotfill \pageref*{idea} \linebreak \noindent\hyperlink{quantum_logic}{Quantum logic}\dotfill \pageref*{quantum_logic} \linebreak \noindent\hyperlink{resource_availability}{Resource availability}\dotfill \pageref*{resource_availability} \linebreak \noindent\hyperlink{game_semantics}{Game semantics}\dotfill \pageref*{game_semantics} \linebreak \noindent\hyperlink{as_a_relevant_logic}{As a relevant logic}\dotfill \pageref*{as_a_relevant_logic} \linebreak \noindent\hyperlink{definition}{Definition}\dotfill \pageref*{definition} \linebreak \noindent\hyperlink{variants}{Variants}\dotfill \pageref*{variants} \linebreak \noindent\hyperlink{categorial_semantics}{Categorial semantics}\dotfill \pageref*{categorial_semantics} \linebreak \noindent\hyperlink{autonomous_categories}{$*$-autonomous categories}\dotfill \pageref*{autonomous_categories} \linebreak \noindent\hyperlink{polycategories}{Polycategories}\dotfill \pageref*{polycategories} \linebreak \noindent\hyperlink{game_semantics_2}{Game semantics}\dotfill \pageref*{game_semantics_2} \linebreak \noindent\hyperlink{multiple_exponential_operators}{Multiple exponential operators}\dotfill \pageref*{multiple_exponential_operators} \linebreak \noindent\hyperlink{related_concepts}{Related concepts}\dotfill \pageref*{related_concepts} \linebreak \noindent\hyperlink{references}{References}\dotfill \pageref*{references} \linebreak \hypertarget{idea}{}\subsection*{{Idea}}\label{idea} Linear logic is sometimes thought of as being a logic for arguing about resource sensitive issues, but it can also be thought of categorically, or interpreted using Game Semantics, or as being related to Petri nets, or as a particular form of [[quantum logic]]. A bit more formally: \textbf{Linear logic} is a [[substructural logic]] in which the [[contraction rule]] and the [[weakening rule]] are omitted, or at least have their applicability restricted. In the original definition of (\hyperlink{Girard}{Girard 87}) linear logic is the [[internal logic]] of/has [[categorical semantics]] in [[star-autonomous categories]] (\hyperlink{Seely}{Seely 89, prop. 1.5}). But more generally \emph{linear logic} came to refer to the [[internal logic]] of any possibly-non-[[cartesian monoidal category|cartesian]] [[symmetric monoidal category|symmetric]] [[closed monoidal category]] (then usually called \emph{multiplicative intuitionistic linear logic} -- \emph{MILL}) or even [[polycategory]] (\hyperlink{Szabo78}{Szabo 78} see the \href{linear%20type%20theory#HistoryCategoricalSemantics}{history section} and see also \hyperlink{dePaiva89}{de Paiva 89}, \hyperlink{Blute91}{Blute 91}, \hyperlink{BentonBiermanPaivaHyland92}{Benton-Bierman-de Paiva-Hyland 92}, \hyperlink{HylandPaiva93}{Hyland-de Paiva 93}, \hyperlink{Bierman95}{Bierman 95}, \hyperlink{Barber97}{Barber 97}, \hyperlink{Schalk04}{Schalk 04}, \hyperlink{Mellies09}{Melli\`e{}s 09}). Under this interpretation, \emph{[[proof nets]]} (or the associated [[Kelly-Mac Lane graphs]]) of linear logic are similar to [[string diagrams]] for monoidal categories. Indeed, these more general senses of linear logic still faithfully follow the original motivation for the term ``linear'' as connoting ``resource availability'' explained below (and in \hyperlink{Girard87}{Girard 87}), since the non-cartesianness of the [[tensor product]] means the absence of a [[diagonal]] map and hence the impossibility of [[functions]] to depend on more than a single (linear) copy of their [[variables]]. In addition to these usual ``denotational'' categorical semantics, linear logic also has an ``operational'' categorical semantics, called the \emph{[[Geometry of Interaction]]}, in [[traced monoidal categories]]. Although linear logic is traditionally presented in terms of inference rules, it was apparently discovered by Girard while studying [[coherence spaces]]. \hypertarget{quantum_logic}{}\subsubsection*{{Quantum logic}}\label{quantum_logic} Linear logic and [[linear type theory]] can be argued to be the proper incarnation of \emph{[[quantum logic]]} (see there for references). In this context the linearity of the logic, hence the absence of [[diagonal]] maps in its [[categorical semantics]] (in non-[[cartesian monoidal category|cartesian]] [[symmetric monoidal categories]]) reflects the \emph{[[no-cloning theorem]]} of [[quantum physics]]. \hypertarget{resource_availability}{}\subsubsection*{{Resource availability}}\label{resource_availability} Unlike traditional [[logics]], which deal with the \emph{[[truth]]} of \emph{[[propositions]]}, linear logic is often described as dealing with the \emph{availability} of \emph{resources}. A proposition, if it is true, remains true no matter how we use that fact in proving other propositions. By contrast, in using a resource $A$ to make available a resource $B$, $A$ itself may be consumed or otherwise modified. Linear logic deals with this by restricting our ability to duplicate or discard resources freely. For example, we have \begin{displaymath} \text{have cake} \vdash \text{eat cake} \end{displaymath} from which we can prove \begin{displaymath} \text{have cake},\; \text{have cake} \vdash \text{have cake} \wedge \text{eat cake} \end{displaymath} which by left [[contraction rule|contraction]] (duplication of inputs) in [[classical logic]] yields \begin{displaymath} \text{have cake} \vdash \text{have cake} \wedge \text{eat cake} \end{displaymath} Linear logic would disallow the contraction step and treat $\text{have cake},\; \text{have cake} \vdash A$ as explicitly meaning that \emph{two} slices of cake yield $A$. Disallowing contraction then corresponds to the fact that we can't turn one slice of cake into two (more's the pity), so you can't have your cake and eat it too. \hypertarget{game_semantics}{}\subsubsection*{{Game semantics}}\label{game_semantics} Linear logic can also be interpreted using a semantics of ``games'' or ``interactions''. Under this interpretation, each proposition in a sequent represents a game being played or a transaction protocol being executed. An assertion of, for instance, \begin{displaymath} P, Q \vdash R \end{displaymath} means roughly that if I am playing three simultaneous games of $P$, $Q$, and $R$, in which I am the left player in $P$ and $Q$ and the right player in $R$, then I have a strategy which will enable me to win at least one of them. Now the above statements about ``resources'' translate into saying that I have to play in all the games I am given and can't invent new ones on the fly. For example, $P \vdash P$ can be won by using the copycat strategy where one makes the two games identical except with left and right players reversed. \hypertarget{as_a_relevant_logic}{}\subsubsection*{{As a relevant logic}}\label{as_a_relevant_logic} Linear logic is closely related to notions of [[relevant logic]], which have been studied for much longer. The goal of relevant logic is to disallow statements like ``if pigs can fly, then grass is green'' which are true, under the usual logical interpretation of [[implication]], but in which the hypothesis has nothing to do with the conclusion. Clearly there is a relationship with the ``resource semantics'': if we want to require that all hypotheses are ``used'' in a proof then we need to disallow weakening. \hypertarget{definition}{}\subsection*{{Definition}}\label{definition} Linear logic is usually given in terms of [[sequent calculus]]. There is a set of \textbf{[[propositions]]} (although as remarked above, to be thought of more as resources to be acquired than as statements to be proved) which we construct through [[recursion]]. Each pair of [[lists]] of propositions is a \textbf{sequent} (written as usual with `$\vdash$' between the lists), some of which are \textbf{valid}; we determine which are valid also through recursion. Technically, the [[propositional calculus]] of linear logic also requires a set of \textbf{propositional variables} from which to start; this is usually identified with the set of [[natural numbers]] (so the variables are $p_0$, $p_1$, etc), although one can also consider the linear logic $LL(V)$ where $V$ is any initial [[set]] of propositional variables. Here we define the set of propositions: \begin{itemize}% \item Every propositional variable is a proposition. \item For each proposition $A$, there is a proposition $A^\perp$, the \textbf{[[negation]]} of $A$. \item For each proposition $A$ and proposition $B$, there are four additional propositions:\begin{itemize}% \item $A \& B$ (read `with'), the \textbf{additive [[logical conjunction|conjunction]]} of $A$ and $B$; \item $A \oplus B$ (read `plus'), the \textbf{additive [[disjunction]]} of $A$ and $B$; \item $A \otimes B$ (read `times'), the \textbf{[[multiplicative conjunction]]} of $A$ and $B$; \item $A \parr B$ (read `par' and sometimes written $A \mid B$), the \textbf{[[multiplicative disjunction]]} of $A$ and $B$. \end{itemize} \item There are also four constants to go with the four binary operations above:\begin{itemize}% \item $\top$ (read `top'), the \textbf{additive [[truth]]}; \item $\mathbf{0}$ (read `zero'), the \textbf{additive [[falsity]]}; \item $\mathbf{1}$ (read `one'), the \textbf{multiplicative truth}; \item $\bot$ (read `bottom'), the \textbf{multiplicative falsity}. \end{itemize} \item For each proposition $A$, there are two additional propositions:\begin{itemize}% \item $!{A}$ (read `[[of course]]'), the \textbf{[[exponential conjunction]]} of $A$; \item $?{A}$ (read `why not'), the \textbf{exponential disjunction} of $A$. \end{itemize} \end{itemize} The terms ``exponential'', ``multiplicative'', and ``additive'' come from the fact that ``exponentiation converts addition to multiplication'': we have $!{(A \& B)}\equiv !{A} \otimes !{B}$ and so on (see below). However, the connectives and constants can also be grouped in different ways. For instance, the multiplicative conjunction $\otimes$ and additive disjunction $\oplus$ are both [[positive types]], while the additive conjunction $\&$ and multiplicative disjunction $\parr$ are [[negative types]]. Similarly, the multiplicative truth $\mathbf{1}$ and the additive falsity $\mathbf{0}$ are positive, while the additive truth $\top$ and multiplicative falsity $\bot$ are negative. This grouping has the advantage that the similarity of symbols matches the adjective used. But conversely, the natural grouping by multiplicative/additive, or equivalently by de Morgan dual pairs, has led many authors to alter Girard's notation, in particular reverting to the category-theoretic $\times$ and $+$ for the additives $\&$ and $\oplus$, and introducing a different symbol such as $\odot$, $\bullet$ or (confusingly) $\oplus$ for Girard's $\parr$. But on this page we will stick to Girard's conventions for consistency. In [[relevant logic]], the terms ``conjunction'' and ``disjunction'' are often reserved for the additive versions $\&$ and $\oplus$, which are written with the traditional notations $\wedge$ and $\vee$. In this case, the multiplicative conjunction $A\otimes B$ is called \textbf{fusion} and denoted $A\circ B$, while the multiplicative disjunction $A\parr B$ is called \textbf{fission} and denoted $A+B$ (or sometimes, confusingly, $A\oplus B$). In relevant logic the symbol $\bot$ may also be used for the \emph{additive} falsity, here denoted $\mathbf{0}$. Also, sometimes the additive connectives are called \textbf{extensional} and the multiplicatives \textbf{intensional}. Sometimes one does not define the operation of negation, defining only $p^\perp$ for a propositional variable $p$. It is a theorem that every proposition above is equivalent (in the sense defined below) to a proposition in which negation is applied only to propositional variables. We now define the valid sequents, where we write $A, B, C \vdash D, E, F$ to state the validity of the sequent consisting of the list $(A,B,C)$ and the list $(D,E,F)$ and use Greek letters for sublists: \begin{itemize}% \item The [[structural rule]]s:\begin{itemize}% \item The [[exchange rule]]: If a sequent is valid, then any [[permutation]] of it (created by permuting its left and right sides independently) is valid. \item The restricted [[weakening rule]]: If $\Gamma, \Delta \vdash \Theta$, then $\Gamma, !{A}, \Delta \vdash \Theta$, for any $A$; conversely and dually, if $\Gamma \vdash \Delta, \Theta$, then $\Gamma \vdash \Delta, ?{A}, \Theta$ for any $A$. \item The restricted [[contraction rule]]: If $\Gamma, !{A}, !{A}, \Delta \vdash \Theta$, then $\Gamma, !{A}, \Delta \vdash \Theta$; conversely and dually, if $\Gamma \vdash \Delta, ?{A}, ?{A}, \Theta$, then $\Gamma \vdash \Delta, ?{A}, \Theta$. \item The [[identity rule]]: Always, $A \vdash A$; \item The [[cut rule]]: If $\Gamma \vdash A, \Phi$ and $\Psi,A \vdash \Delta$, then $\Psi,\Gamma \vdash \Delta,\Phi$. \end{itemize} \item The logical rules for each operation:\begin{itemize}% \item If $\Gamma \vdash A, \Delta$, then $\Gamma, A^\perp \vdash \Delta$; conversely and dually, if $\Gamma, A \vdash \Delta$, then $\Gamma \vdash A^\perp, \Delta$. \item If $\Gamma, A, \Delta \vdash \Theta$ or $\Gamma, B, \Delta \vdash \Theta$, then $\Gamma, A \& B, \Delta \vdash \Theta$; conversely, if $\Gamma \vdash \Delta, A, \Theta$ and $\Gamma \vdash \Delta, B, \Theta$, then $\Gamma \vdash \Delta, A \& B, \Theta$. \item Dually, if $\Gamma \vdash \Delta, A, \Theta$ or $\Gamma \vdash \Delta, B, \Theta$, then $\Gamma \vdash \Delta, A \oplus B, \Theta$; conversely, if $\Gamma, A, \Delta \vdash \Theta$ and $\Gamma, B, \Delta \vdash \Theta$, then $\Gamma, A \oplus B, \Delta \vdash \Theta$. \item If $\Gamma, A, B, \Delta \vdash \Theta$, then $\Gamma, A \otimes B, \Delta \vdash \Theta$; conversely, if $\Gamma \vdash \Delta, A$ and $\Lambda \vdash B, \Theta$, then $\Gamma, \Lambda \vdash \Delta, A \otimes B, \Theta$. \item Dually, if $\Gamma \vdash \Delta, A, B, \Theta$, then $\Gamma \vdash \Delta, A \parr B, \Theta$; conversely, if $\Gamma, A \vdash \Delta$ and $B, \Theta \vdash \Lambda$, then $\Gamma, A \parr B, \Theta \vdash \Delta, \Lambda$. \item Always $\Gamma \vdash \Delta, \top, \Theta$; dually (there is no converse), $\Gamma, \mathbf{0}, \Delta \vdash \Theta$. \item If $\Gamma, \Delta \vdash \Theta$, then $\Gamma, \mathbf{1}, \Delta \vdash \Theta$; conversely, $\vdash \mathbf{1}$. \item Dually, if $\Gamma \vdash \Delta, \Theta$, then $\Gamma \vdash \Delta, \bot, \Theta$; conversely, $\bot \vdash$. \item If $\Gamma, A, \Delta \vdash \Theta$, then $\Gamma, !{A}, \Delta \vdash \Theta$; conversely, if $\Gamma \vdash \Delta, A, \Theta$, then $\Gamma \vdash \Delta, !{A}, \Theta$, whenever $\Gamma$ consists entirely of propositions of the form $!{-}$ while $\Delta$ and $\Theta$ consist entirely of propositions of the form $?{-}$. \item Dually, if $\Gamma \vdash \Delta, A, \Theta$, then $\Gamma \vdash \Delta, ?{A}, \Theta$; conversely, if $\Gamma, A, \Delta \vdash \Theta$, then $\Gamma, ?{A}, \Delta \vdash \Theta$, whenever $\Gamma$ and $\Delta$ consist entirely of propositions of the form $!{-}$ while $\Theta$ consists entirely of propositions of the form $?{-}$. \end{itemize} \end{itemize} The main point of linear logic is the restricted use of the weakening and contraction rules; if these were universally valid (applying to any $A$ rather than only to $!{A}$ or $?{A}$), then the additive and multiplicative operations would be equivalent (in the sense defined below) and similarly $!{A}$ and $?{A}$ would be equivalent to $A$, which would give us [[classical logic]]. On the other hand, one can also remove the exchange rule to get a variety of [[noncommutative logic]]; one must then be careful about how to write the other rules (which we have been above). As usual, there is a theorem of [[cut elimination]] showing that the cut rule and identity rule follow from all other rules and the special cases of the identity rule of the form $p \vdash p$ for a propositional variable $p$. The propositions $A$ and $B$ are (propositionally) \textbf{equivalent} if $A \vdash B$ and $B \vdash A$ are both valid, which we express by writing $A \equiv B$. It is then a theorem that either may be swapped for the other anywhere in a sequent without affecting its validity. Up to equivalence, negation is an [[involution]], and the operations $\&$, $\oplus$, $\otimes$, and $\parr$ are all [[associative]], with respective [[identity elements]] $\top$, $\mathbf{0}$, $\mathbf{1}$, and $\bot$. These operations are also [[commutative operation|commutative]] (although this fails for the multiplicative connectives if we drop the exchange rule). The additive connectives are also [[idempotent]] (but the multiplicative ones are not). \begin{remark} \label{}\hypertarget{}{} There is a more refined notion of equivalence, where we pay attention to specific derivations $\pi: A \vdash B$ of sequents, and deem two derivations $\pi, \pi'$ of $A \vdash B$ \emph{Lambek-equivalent} if they map to the same morphism under any categorical semantics $S$; see below. Given a pair of derivations $\pi: A \vdash B$ and $\rho: B \vdash A$, it then makes sense to ask whether they are Lambek-inverse to one another (i.e., whether $S(\rho) = S(\pi)^{-1}$ under any semantics), so that the derivations exhibit an isomorphism $S(A) \cong S(B)$ under any semantics $S$. This equivalence relation $A \equiv_{Lambek} B$ is strictly stronger than propositional equivalence. It should be observed that all equivalences $A \equiv B$ listed below are in fact Lambek equivalences. \end{remark} We also have [[distributive law|distributive laws]] that explain the adjectives `additive', `multiplicative', and `exponential': \begin{itemize}% \item Multiplication distributes over addition if one is a conjunction and one is a disjunction:\begin{itemize}% \item $A \otimes (B \oplus C) \equiv (A \otimes B) \oplus (A \otimes C)$ (and on the other side); \item $A \parr (B \& C) \equiv (A \parr B) \& (A \parr C)$ (and on the other side); \item $A \otimes \mathbf{0} \equiv \mathbf{0}$ (and on the other side); \item $A \parr \top \equiv \top$ (and on the other side). \end{itemize} \item Exponentiation converts addition into multiplication if all are conjunctions or all are disjunctions:\begin{itemize}% \item $!{(A \& B)} \equiv !{A} \otimes !{B}$; \item $?{(A \oplus B)} \equiv ?{A} \parr ?{B}$; \item $!{\top} \equiv \mathbf{1}$; \item $?{\mathbf{0}} \equiv \bot$. \end{itemize} \end{itemize} It is also a theorem that negation (except for the negations of propositional variables) can be defined (up to equivalence) recursively as follows: \begin{itemize}% \item $(A^\perp)^\perp \equiv A$; \item $(A \& B)^\perp \equiv A^\perp \oplus B^\perp$ and vice versa; \item $(A \otimes B)^\perp \equiv A^\perp \parr B^\perp$ and vice versa; \item $\top^\perp \equiv \mathbf{0}$ and vice versa; \item $\mathbf{1}^\perp \equiv \bot$ and vice versa; \item $(!{A})^\perp \equiv ?{A^\perp}$ and vice versa. \end{itemize} The logical rules for [[negation]] can then be proved. In this way, linear logic in the original sense (interpreted in [[star-autonomous categories]]) has a perfect [[de Morgan duality]]. But observe that more general variants (interpreted in more general [[symmetric monoidal categories]]) need not, see for instance (\hyperlink{HylandPaiva93}{Hyland-de Paiva 93}). We can also restrict attention to sequents with one term on either side as follows: $\Gamma \vdash \Delta$ is valid if and only if $\bigotimes \Gamma \vdash \parr \Delta$ is valid, where $\bigotimes(A, B, C) \coloneqq A \otimes B \otimes C$, etc, and similarly for $\parr$ (using implicitly that these are associative, with identity elements to handle the [[empty sequence]]). We can even restrict attention to sequents with no term on the left side and one term on the right; $A \vdash B$ is valid if and only if $\vdash A \multimap B$ is valid, where $A \multimap B \coloneqq A^\perp \parr B$. In this way, it's possible to ignore sequents entirely and speak only of propositions and valid propositions, eliminating half of the logical rules in the process. However, this approach is not as beautifully symmetric as the full sequent calculus. \hypertarget{variants}{}\subsection*{{Variants}}\label{variants} The logic described above is full classical linear logic. There are many important [[fragments]] and variants of linear logic, such as: \begin{itemize}% \item [[multiplicative linear logic]] (MLL), which contains only $\otimes,\parr$ and their units $\mathbf{1},\bot$ as well as the negation $(-)^\perp$. \item multiplicative-exponential linear logic (MELL), which contains only $\otimes,\parr,\mathbf{1},\bot,(-)^\perp$ and the exponential modalities $!,?$. \item multiplicative-additive linear logic (MALL), which contains everything \emph{except} the exponential modalities $!,?$. \item multiplicative intuitionistic linear logic (MILL), which contains only $\otimes,\mathbf{1},\multimap$ (the latter now as a primitive operation); in particular there is no longer the involutive negation $(-)^\perp$. The sequents are also restricted to have only one formula on the right. \item intuitionistic linear logic (ILL), which contains all the additive connectives $\&,\oplus,\mathbf{0},\top$ as well as the intutionistic multiplicatives $\otimes,\mathbf{1},\multimap$ and the exponential $!$, with one formula on the right as above. In this case all connectives are all independent of each other. \item full intuitionistic linear logic (FILL), which in addition to ILL contains the multiplicative disjunction $\parr$, and perhaps the exponential $?$. (Sometimes ILL without $\parr,?$ is also called ``full'' intuitionistic linear logic.) \item non-commutative linear logics (braided or not) \item ``light'' and ``soft'' linear logics, which limit the use of ! to constrain the computational complexity of proofs \item first-order linear logic, which adds quantifiers $\exists$ and $\forall$ (sometimes denoted $\bigvee$ and $\bigwedge$), either over a fixed domain or over varying types. These quantifiers are usually considered ``additive''; for a theory that has a certain kind of ``multiplicative quantifier'' see [[bunched implication]]. \end{itemize} One can also consider adding additional rules to linear logic. For instance, by adding the [[weakening rule]] (but not the [[contraction rule]]) one obtains [[affine logic]], whereas by adding contraction but not weakening one obtains [[relevance logic]]. Another rule that is sometimes considered is the [[mix rule]]. [[linear-non-linear logic |Linear-non-linear logic]] is an equivalent presentation of intuitionistic linear logic that decomposes the $!$ modality into an adjunction between a cartesian logic and a linear one in which cartesian variables can also appear. Some models of linear logic allow for a \emph{codereliction} operation, which allows for one to take the ``linear approximation'' of a proof $!(A) \to B$. These models lead to the development of \emph{differential linear logic}, the categorical semantics of which was laid out in (\hyperlink{Blute-Cockett-Seely06}{Blute-Cockett-Seely}). \hypertarget{categorial_semantics}{}\subsection*{{Categorial semantics}}\label{categorial_semantics} We discuss the [[categorical semantics]] of linear logic. See also at \emph{[[relation between type theory and category theory]]}. \hypertarget{autonomous_categories}{}\subsubsection*{{$*$-autonomous categories}}\label{autonomous_categories} One way to explain linear logic to a category theorist is to say that its models are [[\emph{-autonomous categories]] with extra structure (\hyperlink{Seely}{Seely, 1989, prop. 1.5}). (If the underlying category is a [[suplattice]] then these are commutative [[quantales]], (\hyperlink{Yetter90}{Yetter 90}))} Firstly, there is a monoidal `[[tensor product|tensor]]' connective $A \otimes B$. [[negation|Negation]] $A^\bot$ is modelled by the [[dual object]] involution $(-)^*$, while [[linear implication]] $A\multimap B$ corresponds to the [[internal hom]], which can be defined as $(A\otimes B^\bot)^\bot$. There is a [[de Morgan dual]] of the tensor called `par', with $A \parr B = (A^\bot\otimes B^\bot)^\bot$. Tensor and par are the `multiplicative' connectives, which roughly speaking represent the parallel availability of resources. The `additive' connectives $\&$ and $\oplus$, which correspond in another way to traditional [[conjunction]] and [[disjunction]], are modelled as usual by [[products]] and [[coproducts]]. \hyperlink{Seely}{Seely (1989)} notes that products are sufficient, as $*$-autonomy then guarantees the existence of coproducts; that is, they are also linked by [[de Morgan duality]]. Recall also that linear logic recaptures the notion of a resource that can be discarded or copied arbitrarily by the use of the [[modal logic|modal]] operator $!$ the [[exponential modality]]: $!A$ denotes an `$A$-factory', a resource that can produce zero or more $A$s on demand. It is modelled using a suitably monoidal [[comonad]] $!$ on the underlying $*$-autonomous category. There are various inequivalent ways to make this precise, however; see \href{https://ncatlab.org/nlab/show/!-modality}{!-modality} for discussion. An LL sequent \begin{displaymath} A_1,\ldots,A_n \vdash B_1,\ldots,B_m \end{displaymath} is interpreted as a morphism \begin{displaymath} \otimes_i A_i \to \parr_j B_j \end{displaymath} The comonoid structure on $!A$ then yields the weakening \begin{displaymath} \Gamma\otimes !A \to \Gamma \otimes I \to \Gamma \end{displaymath} and contraction \begin{displaymath} \Gamma\otimes !A \to \Gamma \otimes !A \otimes !A \end{displaymath} maps. The corresponding rules are interpreted by precomposing the interpretation of a sequent with one of these maps. The (co)-[[Kleisli category]] of $!$ is [[cartesian closed category|cartesian closed]], and the product there coincides with the product in the base category. The [[exponential object|exponential]] (unsurprisingly for a Kleisli category) is $B^A \cong !A\multimap B$. Particular monoidal and $*$-autonomous [[posets]] for modeling linear logic can be obtained by [[Day convolution]] from [[ternary frames]]. This includes Girard's \emph{phase spaces} as a particular example. First-order linear logic is correspondingly modeled in a [[linear hyperdoctrine]]. \hypertarget{polycategories}{}\subsubsection*{{Polycategories}}\label{polycategories} A different way to explain linear logic categorically (though equivalent, in the end) is to start with a categorical structure which lacks any of the connectives, but has sufficient structure to enable us to characterize them with universal properties. If we ignore the exponentials for now, such a structure is given by a [[polycategory]]. The polymorphisms \begin{displaymath} (A,B,C) \to (D,E,F) \end{displaymath} in a polycategory correspond to sequents $A,B,C \vdash D,E,F$ in linear logic. The multiplicative connectives are then characterized by representability and corepresentability properties: \begin{displaymath} \frac{(A,B) \to C}{A\otimes B \to C} \end{displaymath} and \begin{displaymath} \frac{A \to (B,C)}{A \to B\parr C} \end{displaymath} (Actually, we should allow arbitrarily many unrelated objects to carry through in both cases.) The additives are similarly characterized as categorical products and coproducts, in a polycategorically suitable sense. Finally, dual objects can be recovered as a sort of ``adjoint'': \begin{displaymath} \frac{(A,B) \to C}{A \to (B^*,C)} \end{displaymath} If all these representing objects exist, then we recover a $*$-autonomous category. One merit of the polycategory approach is that it makes the role of the structural rules clearer, and also helps explain why $\parr$ sometimes seems like a disjunction and sometimes like a conjunction. Allowing contraction and weakening on the left corresponds to our polycategory being ``left [[cartesian multicategory|cartesian]]''; that is, we have ``diagonal'' and ``projection'' operations such as $Hom(A,A; B) \to Hom(A;B)$ and $Hom(;B) \to Hom(A,B)$. In the presence of these operations, a representing object is automatically a cartesian product; thus $\otimes$ coincides with $\&$. Similarly, allowing contraction and weakening on the right makes the polycategory ``right cocartesian'', which causes corepresenting objects to be coproducts and thus $\parr$ to coincide with $\oplus$. On the other hand, if we allow ``multi-composition'' in our polycategory, i.e. we can compose a morphism $A \to (B,C)$ with one $(B,C)\to D$ to obtain a morphism $A\to D$, then our polycategory becomes a [[PROP]], and representing and corepresenting objects must coincide; thus $\otimes$ and $\parr$ become the same. This explains why $\parr$ has both a disjunctive and a conjunctive aspect. Of course, if in addition to multi-composition we have the left and right cartesian properties, then all four connectives coincide (including the categorical product and coproduct) and we have an [[additive category]]. \hypertarget{game_semantics_2}{}\subsection*{{Game semantics}}\label{game_semantics_2} We can interpret any proposition in linear logic as a game between two players: we and they. The overall rules are perfectly symmetric between us and them, although no individual game is. At any given moment in a game, exactly one of these four situations obtains: it is our turn, it is their turn, we have won, or they have won; the last two states continue forever afterwards (and the game is over). If it is our turn (or if they have won), then they are winning; if it is their turn (or if we have won), then we are winning. So there are two ways to win: because the game is over (and a winner has been decided), or because it is forever the other players' turn (either because the other players have no move or because every move results in the other players' turn again). This is a little complicated, but it's important in order to be able to distinguish the four constants: \begin{itemize}% \item In $\top$, it is their turn, but they have no moves; the game never ends, but we win. \item Dually, in $\mathbf{0}$, it is our turn, but we have no moves; the game never ends, but they win. \item In contrast, in $\mathbf{1}$, the game ends immediately, and we have won. \item Dually, in $\bot$, the game ends immediately, and they have won. \end{itemize} The binary operators show how to combine two games into a larger game: \begin{itemize}% \item In $A \& B$, it is their turn, and they must choose to play either $A$ or $B$. Once they make their choice, play continues in the chosen game, with ending and winning conditions as in that game. \item Dually, in $A \oplus B$, it is our turn, and we must choose to play either $A$ or $B$. Once we make our choice, play continues in the chosen game, with ending and winning conditions as in that game. \item In $A \otimes B$, play continues with both games in parallel. If it is our turn in either game, then it is our turn overall; if it is their turn in both games, then it is their turn overall. If either game ends, then play continues in the other game; if both games end, then the overall game ends. If we have won both games, then we have won overall; if they have won either game, then they have won overall. \item Dually, in $A \parr B$, play continues with both games in parallel. If it is their turn in either game, then it is their turn overall; if it is our turn in both games, then it is our turn overall. If either game ends, then play continues in the other game; if both games end, then the overall game ends. If they have won both games, then they have won overall; if we have won either game, then we have won overall. \end{itemize} So we can classify things as follows: \begin{itemize}% \item In a conjunction, they choose what game to play; in a disjunction, we have control. Whoever has control must win at least one game to win overall. \item In an addition, one game must be played; in a multiplication, all games must be played. \end{itemize} To further clarify the difference between $\top$ and $\mathbf{1}$ (the additive and multiplicative versions of truth, both of which we win); consider $A \parr \top$ and $A \parr \mathbf{1}$. In $A \parr \top$, it is always their move (since it is their move in $\top$, hence their move in at least one game), so we win just as we win $\top$. (In fact, $A \parr \top \equiv \top$.) However, in $A \parr \mathbf{1}$, the game $\mathbf{1}$ ends immediately, so play continues as in $A$. We have won $\mathbf{1}$, so we only have to end the game to win overall, but there is no guarantee that this will happen. Indeed, in $\mathbf{0} \parr \mathbf{1}$, the game never ends and it is always our turn, so they win. (In $\bot \parr \mathbf{1}$, both games end immediately, and we win. In $A \otimes \mathbf{1}$, we must win both games to win overall, so this reduces to $A$; indeed, $A \otimes \mathbf{1} \equiv A$.) Negation is easy: \begin{itemize}% \item To play $A^\perp$, simply swap roles and play $A$. \end{itemize} There are several ways to think of the exponentials. As before, they have control in a conjunction, while we have control in a disjunction. Whoever has control of $!{A}$ or $?{A}$ chooses how many copies of $A$ to play and must win them all to win overall. There are many variations on whether the player in control can spawn new copies of $A$ or close old copies of $A$ prematurely, and whether the other player can play different moves in different copies (whenever the player in control plays the same moves). Other than the decisions made by the player in control of a game, all moves are made by transmitting resources. Ultimately, these come down to the propositional variables; in the game $p$, we must transmit a $p$ to them, while they must transmit a $p$ to us in $p^\perp$. A game is \textbf{valid} if we have a strategy to win (whether by putting the game in a state where we have won or by guaranteeing that it is forever their turn). The soundness and completeness of this interpretation is the theorem that $A$ is a valid game if and only if $\vdash A$ is a valid sequent. (Recall that all questions of validity of sequents can be reduced to the validity of single propositions.) Game semantics for linear logic was first proposed by [[Andreas Blass]], in \hyperlink{Blass1992}{Blass (1992)}. The semantics here is essentially the same as that proposed by Blass. \hypertarget{multiple_exponential_operators}{}\subsection*{{Multiple exponential operators}}\label{multiple_exponential_operators} Much as there are many [[exponential functions]] (say from $\mathbb{R}$ to $\mathbb{R}$), even though there is only one addition operation and one multiplication operation, so there can be many versions of the exponential operators $!$ and $?$. (However, there doesn't seem to be any analogue of the [[logarithm]] to convert between them.) More precisely, if we add to the language of linear logic two more operators, $!'$ and $?'$, and postulate of them the same rules as for $!$ and $?$, we cannot prove that $!{A} \equiv !'{A}$ and $?{A} \equiv ?'{A}$. In contrast, if we introduce $\&'$, $\bot'$, etc, we \emph{can} prove that the new operators are equivalent to the old ones. In terms of the categorial interpretation above, there may be many comonads $!$; it is not determined by the underlying $*$-autonomous category. In terms of game/resource semantics, there are several slightly different interpretations of the exponentials. One sometimes thinks of the exponentials as coming from infinitary applications of the other operations. For example: \begin{itemize}% \item $!{A} \coloneqq 1 \& A \& (A \otimes A) \& (A \otimes A \otimes A) \& \cdots$, \item $!{A} \coloneqq (1 \& A) \otimes (1 \& A) \otimes (1 \& A) \otimes \cdots$, \item $!{A} \coloneqq 1 \& k A \& (k^2/2) (A \otimes A) \& (k^3/6) (A \otimes A \otimes A) \& \cdots$ (which is $\mathrm{e}^{k A}$ in an appropriate sense), where $n A$ means an $n$-fold additive conjunction $A \& \cdots \& A$ for $n$ a [[natural number]], and we pretend that $k$ is a positive number such that $k^n/{n!}$ is always a natural number (which of course is impossible). \end{itemize} All of these justify the rules for the exponentials, so again we see that there may be many ways to satisfy these rules. \hypertarget{related_concepts}{}\subsection*{{Related concepts}}\label{related_concepts} \begin{itemize}% \item [[bounded linear logic]] \item [[game semantics]] \item [[linear type theory]] \item [[light logic]] \begin{itemize}% \item [[light linear logic]] \item [[soft linear logic]] \end{itemize} \item [[proof net]] \item [[linearly distributive category]] \item [[transcendental syntax]] \item [[propositions as projections]] \item [[substructural logic]] \begin{itemize}% \item [[structural rule]] \item [[affine logic]] \item [[relevance logic]] \item [[bunched logic]] \item [[separation logic]] \end{itemize} \end{itemize} \hypertarget{references}{}\subsection*{{References}}\label{references} The original article is \begin{itemize}% \item [[Jean-Yves Girard]], \emph{Linear logic}, Theoretical Computer Science 50:1, 1987. (\href{http://iml.univ-mrs.fr/~girard/linear.pdf}{pdf}) \end{itemize} Reviews include \begin{itemize}% \item [[Jean-Yves Girard]], \emph{Linear logic, its syntax and semantics} (\href{http://www.cs.brandeis.edu/~cs112/2006-cs112/docs/girard-intro.pdf}{pdf}) \item [[Jean-Yves Girard]], part III of \emph{[[Lectures on Logic]]}, European Mathematical Society 2011 \item [[Daniel Murfet]], \emph{Logic and linear algebra: an introduction} (\href{http://arxiv.org/abs/1407.2650}{arXiv:1407.2650}) \end{itemize} The [[categorical semantics]] of linear logic in [[star-autonomous categories]] originally appeared in \begin{itemize}% \item [[R. A. G. Seely]], \emph{Linear logic, $\ast$-autonomous categories and cofree coalgebras}, \emph{Contemporary Mathematics} 92, 1989. ([[SeelyLinearLogic.pdf:file]], \href{http://www.math.mcgill.ca/rags/nets/llsac.ps.gz}{ps.gz}) \end{itemize} and for the special case of [[quantales]] in \begin{itemize}% \item [[David Yetter]], \emph{Quantales and (noncommutative) linear logic}, Journal of Symbolic Logic 55 (1990), 41-64. \end{itemize} The more general case of of multiplicative intuitionistic linear logic interpreted more generally in [[symmetric monoidal categories]] was systematically developed in \begin{itemize}% \item M.E. Szabo, \emph{Algebra of Proofs}, Studies in Logic and the Foundations of Mathematics, vol. 88 (1978), North-Holland. \end{itemize} (that was before Girard introduced the ``linear'' terminology). More recent articles exploring this include \begin{itemize}% \item [[Valeria de Paiva]], \emph{The Dialectica Categories}, \emph{Contemporary Mathematics} 92, 1989. (\href{http://www.cs.bham.ac.uk/~vdp/publications/dial87.pdf}{web}) \end{itemize} \begin{itemize}% \item [[Richard Blute]], \emph{Linear logic, coherence, and dinaturality}, Dissertation, University of Pennsylvania 1991, published in Theoretical Computer Science archive Volume 115 Issue 1, July 5, 1993 Pages 3 - 41 \end{itemize} \begin{itemize}% \item Nick Benton, Gavin Bierman, [[Valeria de Paiva]], [[Martin Hyland]], \emph{Term assignments for intuitionistic linear logic}. Technical report 262, Cambridge 1992 (\href{http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.31.8666}{citeseer}, \href{http://www.cs.bham.ac.uk/~vdp/publications/tr262.ps}{ps}) \end{itemize} \begin{itemize}% \item [[Martin Hyland]], [[Valeria de Paiva]], \emph{Full Intuitionistic Linear Logic} (extended abstract). Annals of Pure and Applied Logic, 64(3), pp.273-291, 1993. (\href{http://www.cs.bham.ac.uk/~vdp/publications/fill.pdf}{pdf}) \end{itemize} \begin{itemize}% \item G. Bierman, \emph{On Intuitionistic Linear Logic} PhD thesis, Computing Laboratory, University of Cambridge, 1995 (\href{http://research.microsoft.com/~gmb/papers/thesis.pdf}{pdf}) \end{itemize} \begin{itemize}% \item Andrew Graham Barber, \emph{Linear Type Theories, Semantics and Action Calculi}, PhD thesis, Edinburgh 1997 (\href{http://www.lfcs.inf.ed.ac.uk/reports/97/ECS-LFCS-97-371/‎}{web}, \href{http://www.lfcs.inf.ed.ac.uk/reports/97/ECS-LFCS-97-371/ECS-LFCS-97-371.pdf}{pdf}) \end{itemize} This is reviewed/further discussed in \begin{itemize}% \item Andrea Schalk, \emph{What is a categorical model for linear logic?} (\href{http://www.cs.man.ac.uk/~schalk/notes/llmodel.pdf}{pdf}) \end{itemize} \begin{itemize}% \item [[Richard Blute]], Philip Scott, \emph{Category theory for linear logicians}, 2004 (\href{http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.83.6250}{citeseer}) \item [[Paul-André Melliès]] , \emph{Categorial Semantics of Linear Logic}, in \emph{Interactive models of computation and program behaviour}, Panoramas et synth\`e{}ses 27, 2009 (\href{http://www.pps.univ-paris-diderot.fr/~mellies/papers/panorama.pdf}{pdf}) \end{itemize} The relation of dual intuitionistic linear logic and $\pi$-calculus is given in \begin{itemize}% \item Luis Caires, Frank Pfenning. \emph{Session Types as Intuitionistic Linear Propositions}. \end{itemize} Noncommutative linear logic is discussed for instance in \begin{itemize}% \item [[Richard Blute]], F. Lamarche, Paul Ruet, \emph{Entropic Hopf algebras and models of non-commutative logic}, Theory and Applications of Categories, Vol. 10, No. 17, 2002, pp. 424--460. (\href{http://www.emis.ams.org/journals/TAC/volumes/10/17/10-17.pdf}{pdf}) \end{itemize} Further discussion of [[linear type theory]] is for instance in \begin{itemize}% \item \emph{Capter 7, Linear type theory} \href{http://www.cs.cmu.edu/~fp/courses/linear/handouts/lintt.pdf}{pdf} \item Anders Schack-Nielsen, Carsten Sch\"u{}rmann, \emph{Linear contextual modal type theory} \href{http://www.itu.dk/~carsten/papers/lcmtt.pdf}{pdf} \end{itemize} See also \begin{itemize}% \item [[Andreas Blass]], 1992. \emph{A game semantics for linear logic}. \emph{Annals of Pure and Applied Logic} 56: 183--220. 1992. \item The \href{https://secure.wikimedia.org/wikipedia/en/wiki/Linear_logic}{article} on the English Wikipedia has good summaries of the meanings of the logical operators and of the commonly studied [[fragments]]. \end{itemize} Discussion of application of linear logic to [[quantum logic]], [[quantum computing]] and generally to [[quantum physics]] includes \begin{itemize}% \item [[Vaughan Pratt]], \emph{Linear logic for generalized quantum mechanics}, in Proc. of \emph{Workshop on Physics and Computation (PhysComp'92)} (\href{http://boole.stanford.edu/pub/ql.pdf}{pdf}, \href{http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.137.7817}{web}) \item [[Ross Duncan]], \emph{Types for quantum mechanics}, 2006 (\href{http://homepages.ulb.ac.be/~rduncan/papers/rduncan-thesis.pdf}{pdf}) \item Gianpiero Cattaneo, Maria Luisa Dalla Chiara, Roberto Giuntini and Francesco Paoli, section 9 of \emph{Quantum Logic and Nonclassical Logics}, p. 127 in Kurt Engesser, Dov M. Gabbay, Daniel Lehmann (eds.) \emph{Handbook of Quantum Logic and Quantum Structures: Quantum Logic}, 2009 North Holland \item Ugo Dal Lago, Claudia Faggian, \emph{On Multiplicative Linear Logic, Modality and Quantum Circuits} (\href{http://arxiv.org/abs/1210.0613}{arXiv:1210.0613}) \end{itemize} Discussion for [[matrix factorization]] in [[Landau-Ginzburg models]] is in \begin{itemize}% \item [[Daniel Murfet]], \emph{Computing with cut systems} (\href{http://arxiv.org/abs/1402.4541}{arXiv:1402.4541}) \end{itemize} Discussion of [[linear type theory]] without [[units]] is in \begin{itemize}% \item [[Robin Houston]], \emph{Linear Logic without Units} (\href{http://arxiv.org/abs/1305.2231}{arXiv:1305.2231}) \end{itemize} Discussion of [[inductive types]] in the context of [[linear type theory]] is in \begin{itemize}% \item St\'e{}phane Gimenez, \emph{Towards Generic Inductive Constructions in Systems of Nets} (\href{http://www.imn.htwk-leipzig.de/WST2013/papers/paper_16.pdf}{pdf}) \end{itemize} The categorical semantics of differential linear logic is introduced in: \begin{itemize}% \item [[Richard Blute]], [[Robin Cockett]], [[Robert Seely]], \emph{Differential Categories}, Mathematical structures in computer science 16.6 (2006): 1049-1083.(\href{https://pdfs.semanticscholar.org/82b0/f9533b451d174f407b35e1c39e2376138ac2.pdf}{pdf}) \end{itemize} [[!redirects linear logic]] [[!redirects linear logics]] [[!redirects classical linear logic]] [[!redirects classical linear logics]] [[!redirects intuitionistic linear logic]] [[!redirects multiplicative intuitionistic linear logic]] \end{document}