\documentclass[12pt,titlepage]{article} \usepackage{amsmath} \usepackage{mathrsfs} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{amsthm} \usepackage{mathtools} \usepackage{graphicx} \usepackage{color} \usepackage{ucs} \usepackage[utf8x]{inputenc} \usepackage{xparse} \usepackage{hyperref} %----Macros---------- % % Unresolved issues: % % \righttoleftarrow % \lefttorightarrow % % \color{} with HTML colorspec % \bgcolor % \array with options (without options, it's equivalent to the matrix environment) % Of the standard HTML named colors, white, black, red, green, blue and yellow % are predefined in the color package. Here are the rest. \definecolor{aqua}{rgb}{0, 1.0, 1.0} \definecolor{fuschia}{rgb}{1.0, 0, 1.0} \definecolor{gray}{rgb}{0.502, 0.502, 0.502} \definecolor{lime}{rgb}{0, 1.0, 0} \definecolor{maroon}{rgb}{0.502, 0, 0} \definecolor{navy}{rgb}{0, 0, 0.502} \definecolor{olive}{rgb}{0.502, 0.502, 0} \definecolor{purple}{rgb}{0.502, 0, 0.502} \definecolor{silver}{rgb}{0.753, 0.753, 0.753} \definecolor{teal}{rgb}{0, 0.502, 0.502} % Because of conflicts, \space and \mathop are converted to % \itexspace and \operatorname during preprocessing. % itex: \space{ht}{dp}{wd} % % Height and baseline depth measurements are in units of tenths of an ex while % the width is measured in tenths of an em. \makeatletter \newdimen\itex@wd% \newdimen\itex@dp% \newdimen\itex@thd% \def\itexspace#1#2#3{\itex@wd=#3em% \itex@wd=0.1\itex@wd% \itex@dp=#2ex% \itex@dp=0.1\itex@dp% \itex@thd=#1ex% \itex@thd=0.1\itex@thd% \advance\itex@thd\the\itex@dp% \makebox[\the\itex@wd]{\rule[-\the\itex@dp]{0cm}{\the\itex@thd}}} \makeatother % \tensor and \multiscript \makeatletter \newif\if@sup \newtoks\@sups \def\append@sup#1{\edef\act{\noexpand\@sups={\the\@sups #1}}\act}% \def\reset@sup{\@supfalse\@sups={}}% \def\mk@scripts#1#2{\if #2/ \if@sup ^{\the\@sups}\fi \else% \ifx #1_ \if@sup ^{\the\@sups}\reset@sup \fi {}_{#2}% \else \append@sup#2 \@suptrue \fi% \expandafter\mk@scripts\fi} \def\tensor#1#2{\reset@sup#1\mk@scripts#2_/} \def\multiscripts#1#2#3{\reset@sup{}\mk@scripts#1_/#2% \reset@sup\mk@scripts#3_/} \makeatother % \slash \makeatletter \newbox\slashbox \setbox\slashbox=\hbox{$/$} \def\itex@pslash#1{\setbox\@tempboxa=\hbox{$#1$} \@tempdima=0.5\wd\slashbox \advance\@tempdima 0.5\wd\@tempboxa \copy\slashbox \kern-\@tempdima \box\@tempboxa} \def\slash{\protect\itex@pslash} \makeatother % math-mode versions of \rlap, etc % from Alexander Perlis, "A complement to \smash, \llap, and lap" % http://math.arizona.edu/~aprl/publications/mathclap/ \def\clap#1{\hbox to 0pt{\hss#1\hss}} \def\mathllap{\mathpalette\mathllapinternal} \def\mathrlap{\mathpalette\mathrlapinternal} \def\mathclap{\mathpalette\mathclapinternal} \def\mathllapinternal#1#2{\llap{$\mathsurround=0pt#1{#2}$}} \def\mathrlapinternal#1#2{\rlap{$\mathsurround=0pt#1{#2}$}} \def\mathclapinternal#1#2{\clap{$\mathsurround=0pt#1{#2}$}} % Renames \sqrt as \oldsqrt and redefine root to result in \sqrt[#1]{#2} \let\oldroot\root \def\root#1#2{\oldroot #1 \of{#2}} \renewcommand{\sqrt}[2][]{\oldroot #1 \of{#2}} % Manually declare the txfonts symbolsC font \DeclareSymbolFont{symbolsC}{U}{txsyc}{m}{n} \SetSymbolFont{symbolsC}{bold}{U}{txsyc}{bx}{n} \DeclareFontSubstitution{U}{txsyc}{m}{n} % Manually declare the stmaryrd font \DeclareSymbolFont{stmry}{U}{stmry}{m}{n} \SetSymbolFont{stmry}{bold}{U}{stmry}{b}{n} % Manually declare the MnSymbolE font \DeclareFontFamily{OMX}{MnSymbolE}{} \DeclareSymbolFont{mnomx}{OMX}{MnSymbolE}{m}{n} \SetSymbolFont{mnomx}{bold}{OMX}{MnSymbolE}{b}{n} \DeclareFontShape{OMX}{MnSymbolE}{m}{n}{ <-6> MnSymbolE5 <6-7> MnSymbolE6 <7-8> MnSymbolE7 <8-9> MnSymbolE8 <9-10> MnSymbolE9 <10-12> MnSymbolE10 <12-> MnSymbolE12}{} % Declare specific arrows from txfonts without loading the full package \makeatletter \def\re@DeclareMathSymbol#1#2#3#4{% \let#1=\undefined \DeclareMathSymbol{#1}{#2}{#3}{#4}} \re@DeclareMathSymbol{\neArrow}{\mathrel}{symbolsC}{116} \re@DeclareMathSymbol{\neArr}{\mathrel}{symbolsC}{116} \re@DeclareMathSymbol{\seArrow}{\mathrel}{symbolsC}{117} \re@DeclareMathSymbol{\seArr}{\mathrel}{symbolsC}{117} \re@DeclareMathSymbol{\nwArrow}{\mathrel}{symbolsC}{118} \re@DeclareMathSymbol{\nwArr}{\mathrel}{symbolsC}{118} \re@DeclareMathSymbol{\swArrow}{\mathrel}{symbolsC}{119} \re@DeclareMathSymbol{\swArr}{\mathrel}{symbolsC}{119} \re@DeclareMathSymbol{\nequiv}{\mathrel}{symbolsC}{46} \re@DeclareMathSymbol{\Perp}{\mathrel}{symbolsC}{121} \re@DeclareMathSymbol{\Vbar}{\mathrel}{symbolsC}{121} \re@DeclareMathSymbol{\sslash}{\mathrel}{stmry}{12} \re@DeclareMathSymbol{\bigsqcap}{\mathop}{stmry}{"64} \re@DeclareMathSymbol{\biginterleave}{\mathop}{stmry}{"6} \re@DeclareMathSymbol{\invamp}{\mathrel}{symbolsC}{77} \re@DeclareMathSymbol{\parr}{\mathrel}{symbolsC}{77} \makeatother % \llangle, \rrangle, \lmoustache and \rmoustache from MnSymbolE \makeatletter \def\Decl@Mn@Delim#1#2#3#4{% \if\relax\noexpand#1% \let#1\undefined \fi \DeclareMathDelimiter{#1}{#2}{#3}{#4}{#3}{#4}} \def\Decl@Mn@Open#1#2#3{\Decl@Mn@Delim{#1}{\mathopen}{#2}{#3}} \def\Decl@Mn@Close#1#2#3{\Decl@Mn@Delim{#1}{\mathclose}{#2}{#3}} \Decl@Mn@Open{\llangle}{mnomx}{'164} \Decl@Mn@Close{\rrangle}{mnomx}{'171} \Decl@Mn@Open{\lmoustache}{mnomx}{'245} \Decl@Mn@Close{\rmoustache}{mnomx}{'244} \makeatother % Widecheck \makeatletter \DeclareRobustCommand\widecheck[1]{{\mathpalette\@widecheck{#1}}} \def\@widecheck#1#2{% \setbox\z@\hbox{\m@th$#1#2$}% \setbox\tw@\hbox{\m@th$#1% \widehat{% \vrule\@width\z@\@height\ht\z@ \vrule\@height\z@\@width\wd\z@}$}% \dp\tw@-\ht\z@ \@tempdima\ht\z@ \advance\@tempdima2\ht\tw@ \divide\@tempdima\thr@@ \setbox\tw@\hbox{% \raise\@tempdima\hbox{\scalebox{1}[-1]{\lower\@tempdima\box \tw@}}}% {\ooalign{\box\tw@ \cr \box\z@}}} \makeatother % \mathraisebox{voffset}[height][depth]{something} \makeatletter \NewDocumentCommand\mathraisebox{moom}{% \IfNoValueTF{#2}{\def\@temp##1##2{\raisebox{#1}{$\m@th##1##2$}}}{% \IfNoValueTF{#3}{\def\@temp##1##2{\raisebox{#1}[#2]{$\m@th##1##2$}}% }{\def\@temp##1##2{\raisebox{#1}[#2][#3]{$\m@th##1##2$}}}}% \mathpalette\@temp{#4}} \makeatletter % udots (taken from yhmath) \makeatletter \def\udots{\mathinner{\mkern2mu\raise\p@\hbox{.} \mkern2mu\raise4\p@\hbox{.}\mkern1mu \raise7\p@\vbox{\kern7\p@\hbox{.}}\mkern1mu}} \makeatother %% Fix array \newcommand{\itexarray}[1]{\begin{matrix}#1\end{matrix}} %% \itexnum is a noop \newcommand{\itexnum}[1]{#1} %% Renaming existing commands \newcommand{\underoverset}[3]{\underset{#1}{\overset{#2}{#3}}} \newcommand{\widevec}{\overrightarrow} \newcommand{\darr}{\downarrow} \newcommand{\nearr}{\nearrow} \newcommand{\nwarr}{\nwarrow} \newcommand{\searr}{\searrow} \newcommand{\swarr}{\swarrow} \newcommand{\curvearrowbotright}{\curvearrowright} \newcommand{\uparr}{\uparrow} \newcommand{\downuparrow}{\updownarrow} \newcommand{\duparr}{\updownarrow} \newcommand{\updarr}{\updownarrow} \newcommand{\gt}{>} \newcommand{\lt}{<} \newcommand{\map}{\mapsto} \newcommand{\embedsin}{\hookrightarrow} \newcommand{\Alpha}{A} \newcommand{\Beta}{B} \newcommand{\Zeta}{Z} \newcommand{\Eta}{H} \newcommand{\Iota}{I} \newcommand{\Kappa}{K} \newcommand{\Mu}{M} \newcommand{\Nu}{N} \newcommand{\Rho}{P} \newcommand{\Tau}{T} \newcommand{\Upsi}{\Upsilon} \newcommand{\omicron}{o} \newcommand{\lang}{\langle} \newcommand{\rang}{\rangle} \newcommand{\Union}{\bigcup} \newcommand{\Intersection}{\bigcap} \newcommand{\Oplus}{\bigoplus} \newcommand{\Otimes}{\bigotimes} \newcommand{\Wedge}{\bigwedge} \newcommand{\Vee}{\bigvee} \newcommand{\coproduct}{\coprod} \newcommand{\product}{\prod} \newcommand{\closure}{\overline} \newcommand{\integral}{\int} \newcommand{\doubleintegral}{\iint} \newcommand{\tripleintegral}{\iiint} \newcommand{\quadrupleintegral}{\iiiint} \newcommand{\conint}{\oint} \newcommand{\contourintegral}{\oint} \newcommand{\infinity}{\infty} \newcommand{\bottom}{\bot} \newcommand{\minusb}{\boxminus} \newcommand{\plusb}{\boxplus} \newcommand{\timesb}{\boxtimes} \newcommand{\intersection}{\cap} \newcommand{\union}{\cup} \newcommand{\Del}{\nabla} \newcommand{\odash}{\circleddash} \newcommand{\negspace}{\!} \newcommand{\widebar}{\overline} \newcommand{\textsize}{\normalsize} \renewcommand{\scriptsize}{\scriptstyle} \newcommand{\scriptscriptsize}{\scriptscriptstyle} \newcommand{\mathfr}{\mathfrak} \newcommand{\statusline}[2]{#2} \newcommand{\tooltip}[2]{#2} \newcommand{\toggle}[2]{#2} % Theorem Environments \theoremstyle{plain} \newtheorem{theorem}{Theorem} \newtheorem{lemma}{Lemma} \newtheorem{prop}{Proposition} \newtheorem{cor}{Corollary} \newtheorem*{utheorem}{Theorem} \newtheorem*{ulemma}{Lemma} \newtheorem*{uprop}{Proposition} \newtheorem*{ucor}{Corollary} \theoremstyle{definition} \newtheorem{defn}{Definition} \newtheorem{example}{Example} \newtheorem*{udefn}{Definition} \newtheorem*{uexample}{Example} \theoremstyle{remark} \newtheorem{remark}{Remark} \newtheorem{note}{Note} \newtheorem*{uremark}{Remark} \newtheorem*{unote}{Note} %------------------------------------------------------------------- \begin{document} %------------------------------------------------------------------- \section*{logical framework} \hypertarget{context}{}\subsubsection*{{Context}}\label{context} \hypertarget{deduction_and_induction}{}\paragraph*{{Deduction and Induction}}\label{deduction_and_induction} [[!include deduction and induction - contents]] \hypertarget{type_theory}{}\paragraph*{{Type theory}}\label{type_theory} [[!include type theory - contents]] \hypertarget{foundations}{}\paragraph*{{Foundations}}\label{foundations} [[!include foundations - contents]] \hypertarget{contents}{}\section*{{Contents}}\label{contents} \noindent\hyperlink{idea}{Idea}\dotfill \pageref*{idea} \linebreak \noindent\hyperlink{overview}{Overview}\dotfill \pageref*{overview} \linebreak \noindent\hyperlink{Synthetic}{Synthetic presentations}\dotfill \pageref*{Synthetic} \linebreak \noindent\hyperlink{Analytic}{Analytic presentation}\dotfill \pageref*{Analytic} \linebreak \noindent\hyperlink{HOAS}{Higher-order abstract syntax}\dotfill \pageref*{HOAS} \linebreak \noindent\hyperlink{the_power_of_weak_frameworks}{The power of weak frameworks}\dotfill \pageref*{the_power_of_weak_frameworks} \linebreak \noindent\hyperlink{implementations}{Implementations}\dotfill \pageref*{implementations} \linebreak \noindent\hyperlink{References}{References}\dotfill \pageref*{References} \linebreak \hypertarget{idea}{}\subsection*{{Idea}}\label{idea} A \emph{logical framework} is a formal [[metalanguage]] for [[deductive systems]], such as [[logic]], [[natural deduction]], [[type theories]], [[sequent calculus]], etc. Of course, like any formal system, these systems can be described in any sufficiently strong metalanguage. However, all logical systems of this type share certain distinguishing features, so it is helpful to have a particular metalanguage which is well-adapted to describing systems with those features. Much of the description below is taken from \hyperlink{Harper}{(Harper)}. \hypertarget{overview}{}\subsection*{{Overview}}\label{overview} The sentences of a logical framework are called [[judgments]]. It turns out that in deductive systems, there are two kinds of non-basic forms that arise very commonly, which we may call \begin{itemize}% \item \emph{[[hypothetical judgments]]}: one judgment is a logical consequence of some others. \item \emph{generic judgments}: a judgment that is made generally for all values of some parameters, each ranging over a ``domain'' or ``syntactic category''. (Note that a ``syntactic category'' in this sense is unrelated to the notion of [[syntactic category]] that is a [[category]] built out of syntax; here we mean ``category'' in its informal sense of a subgrouping, so that for instance we have one syntactic category of ``types'' and another of ``terms''.) \end{itemize} These two forms turn out to have many parallel features, e.g. [[reflexivity]] and [[transitivity]] of hypothetical judgments correspond to [[variable]]-use and [[substitution]] in generic judgments. Appealing to the [[propositions as types]] principle, therefore, it is convenient to describe a system in which they are actually both instances of the same thing. That is, we identify the notion of \emph{evidence for a judgment} with the notion of \emph{object of a syntactic category}. This leads to a notion that we will call an \emph{LF-type}. Thus we will have types such as \begin{itemize}% \item The \emph{LF-type of evidence for some judgment}. \item The \emph{LF-type of objects of a syntactic category}. \end{itemize} We will also have some general type-forming operations. Perhaps surprisingly, it turns out that \begin{itemize}% \item \emph{[[dependent product types]]} \end{itemize} are all that we need. There is a potential confusion of terminology, because these \emph{LF-types} in a logical framework (being itself a [[type theory]]) are distinct from the objects that may be called ``types'' in any particular logic we might be talking about \emph{inside} the logical framework. Thus, for instance, when formalizing [[Martin-Lof type theory]] in a logical framework, there is an ``LF-type'' which is the type of objects of the syntactic category of MLTT-types. This is furthermore distinct from a [[type of types]], which is itself an object of the syntactic category of MLTT-types, i.e. a term belonging to the LF-type of such. The type theory of a logical framework often includes a second layer called \emph{LF-kinds}, which enables us to classify families of LF-types. For instance, the universe $Type$ of all LF-types is an LF-kind, as is the collection $A\to\Type$ of all families of LF-types dependent on some LF-type $A$. The LF-types and LF-kinds together are very similar to a [[pure type system]] with two sorts $Type$ and $Kind$, with axiom $Type:Kind$ and rules $(Type,Kind)$ and $(Type,Type)$, although there are some minor technical differences such as the treatment of definitional equality (PTS's generally use untyped conversion, whereas logical frameworks are often formulated in a way so that only canonical forms exist). Thus, we might have the following hierarchy of ``universes'', which we summarize to fix the notation: \begin{itemize}% \item $Kind$, the sort of LF-kinds \item $Type$, the LF-kind of LF-types, i.e. $Type:Kind$. \item $tp$ or $type$, the LF-type of \emph{all} types in some object theory being discussed in LF, i.e. $tp : Type$. \item $\mathcal{U}_i$, a type-of-types in such an object-theory, i.e. $\mathcal{U}_i:tp$. \end{itemize} Once we have set up the logical framework as a language, there are then two approaches to describing a given logic inside of it. See (\hyperlink{Harper}{Harper}), and the other \hyperlink{References}{references}, for more details. \hypertarget{Synthetic}{}\subsubsection*{{Synthetic presentations}}\label{Synthetic} In a synthetic presentation, we use LF-types to represent the syntactic objects and judgments of the [[object theory]]. Thus, if the object theory is a type theory, then in LF we have things like: \begin{itemize}% \item an LF-type $tp$ of object-theory types \item an LF-type $tm$ of object-theory terms \item a dependent LF-type $of : tm \to tp \to Type$ expressing the object-theory typing judgment. That is, for each object-theory term $a:tm$ and each object-theory type $A:tp$, we have an LF-type $of(a,A)$ expressing the object-theory judgment ``$a:A$'' that $a$ is of type $A$. According to [[propositions as types]] (at the level of the metatheory, sometimes called ``judgments as types''), the elements of $of(a,A)$ are ``proofs'' that $a:A$. \end{itemize} Note that we do not have to explicitly carry around an ambient context, as we sometimes do when presenting type theories in a more explicit style of a [[deductive system]]. This is because the notions of hypothetical and generic judgments are built into the logical framework and handled automatically by \emph{its} contexts. We will discuss this further \hyperlink{HOAS}{below}. Synthetic presentations are preferred by the school of \hyperlink{HHP}{Harper-Honsell-Plotkin} and are generally used with implementations such as [[Twelf]]. They are very flexible and can be used to represent many different object-theories. Moreover, they generally support an \textbf{adequacy theorem}, that there is a compositional bijection between the syntactic objects of the object-theory, as usually presented, and the canonical forms of appropriate LF-type in its LF-presentation. Here ``compositional'' means that the bijection respects substitution. Note that the adequacy theorem is a correspondence at the level of \emph{syntax}; it does not even incorporate the object-theory notion of definitional equality! Two object-theory terms such as $(\lambda x.x)y$ and $y$ that are definitionally equal (by [[beta-reduction]]) are syntactically distinct, and hence also correspond to distinct syntactic entities in the LF-encoding. The definitional equality that relates them is represented by an element of the LF-type $defeq(\dots)$ encoding the definitional-equality judgment of the object-theory. This is appropriate because such LF-encodings are used, among other things, for the \emph{study of the syntax} of the object-theory, e.g. for proving properties of its definitional equality. However, synthetic presentations do not make maximal use of the framework in the case when the object-theory is also a type theory whose judgments are ``analytic''. Here ``synthetic'' means roughly ``requires evidence'' whereas ``analytic'' means roughly ``obvious''. \hypertarget{Analytic}{}\subsubsection*{{Analytic presentation}}\label{Analytic} An analytic presentation is only possible for certain kinds of object-theories, generally those which are type theories similar to LF itself. In this case, we represent object-theory types by LF-types themselves. Thus we still have the LF-type $tp$ of object-theory types, but instead of the LF-type $tm$ of terms and the dependent LF-type $of$ representing the object-theory typing judgment, we have \begin{itemize}% \item a dependent LF-type $el : tp \to Type$ \end{itemize} which assigns to each object-theory type, the LF-type of its elements. In other words, the typing judgment of the object-theory is encoded \emph{by the typing judgment} of the meta-theory. Now we have to make a choice about how to represent the definitional equality of the object-theory. A consistent choice is to also represent it by the definitional equality of the meta-theory. That is, in addition to merely giving ``axioms'' such as $tp$ and $el$, we must give \emph{equations} representing the rules of the object-theory as [[equalities]] in the logical framework. For instance, we must have a [[beta-reduction]] rule such as \begin{verbatim}app A B (lam A B F) M = F M\end{verbatim} If the object-theory is itself a dependent type theory whose only definitional equalities are beta-reductions like this, then if we make the coercion $el$ implicit, we can think of the resulting encoding as analogous to a [[pure type system]] with \emph{three} sorts, $tp$, $Type$, and $Kind$, with $tp:Type$ and $Type:Kind$. However, from a practical point-of-view, rather than extending the logical framework with ad hoc definitional equalities to represent a particular object-theory, often what is actually done is that equality is defined as another type family with explicitly-introduced constructors. In other words, we use the analytic representation of types, but the synthetic representation of definitional equality. For example, in [[Twelf]], the above equation could be represented by first assuming an LF-type family \begin{verbatim}eq : {A:tp} el A -> el A -> type\end{verbatim} (Twelf uses braces as notation for the [[dependent product]]), and then postulating a constant \begin{verbatim}beta : {A:tp} {B:tp} eq (app A B (lam A B F)) (F M)\end{verbatim} in addition to the other axioms of equality. The analytic encoding is associated with \hyperlink{ML}{Martin-Lof}. While convenient for the description of rules in type theories, it is often less appropriate for the purposes of metatheoretic analysis. For instance, in the hybrid style with the LF-type family $eq$, the terms in $el(A)$ will involve explicit coercions along equalities in $eq$. This destroys the adequacy theorem, since coercions along definitional equalities are generally silent in the usual presentation of a theory. Moreover, one must assert explicitly that dependent types respect definitional equalities, and for multiply-dependent types this requires [[dependent products]] in the object-theory. On the other hand, the ``fully analytic'' version where definitional equalities in the object-theory are also definitional equalities in the meta-theory involves adding additional equations to the meta-theory, which in general can make it impossible to reason about. One needs a decision procedure for these equalities even to be able to check proofs. Thus, analytic approaches are less general and flexible; they are best adapted to \emph{describing} the rules and semantics of a dependent type theory, whereas synthetic approaches are better for reasoning about syntax and for studying more general object-theories. \hypertarget{HOAS}{}\subsubsection*{{Higher-order abstract syntax}}\label{HOAS} In both synthetic and analytic presentations, we use [[higher-order abstract syntax]] (HOAS). Roughly, this means that \emph{variables} in the object-theory are not terms of some LF-type, but are represented by actual LF-variables. For instance, when describing a type theory containing [[function types]] synthetically, we would have \begin{itemize}% \item an LF-term $arr : tp \to tp \to tp$, where for object-theory types $A:tp$ and $B:tp$, the term $arr(A,B):tp$ represents their function-type \item an LF-term $app : tm \to tm \to tm$, where $app(f,a)$ represents the function application $f(a)$ \item an LF-term $lam : (tm \to tm) \to tm$, representing lambda abstraction. \end{itemize} The point is that the argument of $lam$ (the ``body'' of the lambda abstraction) is not a ``term containing a free variable $x$'' but rather an LF-function from object-theory terms to object-theory terms. This is intended to be the function ``substitute'' which knows about the body of the lambda-abstraction, and when given an argument it substitutes it for the variable in that body and returns the result. This approach completely avoids dealing with the problems of variable binding and substitution in the object language, by making use of the binding and substitution in the metalanguage LF. One might say that the variables in LF are the ``universal notion of variable'' which is merely reused by all object-theories. \hypertarget{the_power_of_weak_frameworks}{}\subsubsection*{{The power of weak frameworks}}\label{the_power_of_weak_frameworks} It may be tempting to think of the LF-types such as $tp$ and $tm$ as [[inductive type|inductively]] defined by their specified constructors (such as $arr$ for $tp$, and $app$ and $lam$ for $tm$). However, this is incorrect; LF does not have inductive types. In fact, this weakness is essential in order to guarantee ``adequacy'' of the HOAS encoding. Suppose, for instance, that $tm$ were inductively defined inside of LF. Then we could define a function $tm\to tm$ by pattern-matching on the structure of $tm$, doing one thing if $tm$ were a lambda-abstraction and another thing if it were a function application. But such a function is definitely \emph{not} the sort of thing that we want to be able to pass to the LF-function $lam$! By disallowing such matching, though, we can guarantee that the only functions $tm\to tm$ we can define and pass to $lam$ correspond to ``substituting in a fixed term'' as we intended. As an even simpler example, suppose we consider an object-theory containing just one LF-type $nat$ together with constructors $z : nat$ and $s : nat \to nat$. Although we would like to think of $nat$ as representing the natural numbers, because of the lack of an induction principle, the LF-type $nat \to nat$ certainly cannot be shown to contain all the functions from natural numbers to natural numbers (essentially, we can only construct the constant functions and those incrementing their argument by a fixed constant). On the other hand, to some extent it is possible to get around this restriction by taking a \emph{relational} rather than a functional point-of-view. For example, addition of natural numbers can be defined as a type family \begin{verbatim}add : nat -> nat -> nat -> type\end{verbatim} together with a pair of constructors \begin{verbatim}add/z : {N:nat} add z N N. add/s : {M:nat}{N:nat}{P:nat} add M N P -> add (s M) N (s P).\end{verbatim} Now, it is still not possible to prove \emph{inside} LF that $add$ is a total functional relation (i.e., that for all {\colorbox[rgb]{1.00,0.93,1.00}{\tt M\char58nat}} and {\colorbox[rgb]{1.00,0.93,1.00}{\tt N\char58nat}} there exists a unique {\colorbox[rgb]{1.00,0.93,1.00}{\tt P\char58nat}} such that {\colorbox[rgb]{1.00,0.93,1.00}{\tt add\char32M\char32N\char32P}}). However, in this case that is certainly easy to verify by inspection, and the [[Twelf]] proof assistant has facilities for verifying such properties automatically (though in general checking totality is better supported than checking uniqueness). There are tricks one can play in order to use a HOAS-like representation even in a framework that has true inductive types such as [[Coq]]. For instance, \hyperlink{XORN18}{XORN18} uses ``parametrized HOAS'' in which the higher-order functions take as domain an \emph{arbitrary}, unspecified, type, thereby eliminating the possibility of defining functions inductively over elements of any specific type. However, this apparently requires [[parametricity]] axioms in order to work, which are consistent, but false in natural set-theoretic and categorical models. \hypertarget{implementations}{}\subsection*{{Implementations}}\label{implementations} One of the uses of a logical framework is that as a type theory itself, it can be implemented in a computer. This provides a convenient system in which one can ``program'' the rules of any other specific type theory or logic which one wants to study. For a list of logical framework implementations, see \emph{\href{http://www.cs.cmu.edu/~fp/lfs-impl.html}{Specific logical Frameworks and Implementations}}. Historically, the first logical framework implementation was [[Automath]]. The goal of the Automath project was to provide a tool for the formalization of mathematics without [[foundations|foundational]] prejudice. Many modern logical frameworks carry influences of this. Then inspired by the development of [[Martin-Löf dependent type theory]] was the [[Edinburgh Logical Framework]] (ELF). The [[logic]] and [[type theory]]-approaches were later combined in the [[Elf]] language. This gave rise to [[Twelf]]. [[Isabelle]] is a logical framework that does not have dependent types, but instead works in a ``predicate logic'' framework with only one layer of dependency, proof-irrelevant propositions over a base [[simply typed lambda calculus]]. Unlike Twelf, it is designed for practical use in working \emph{in} object-languages rather than proving meta-theorems about them. Its most common application is to [[higher-order logic]], but there are also Isabelle formulations of [[first-order logic]], [[ZFC]], and a general library for [[sequent calculus]]. \hypertarget{References}{}\subsection*{{References}}\label{References} The original logical framework using a synthetic approach was introduced in \begin{itemize}% \item [[Bob Harper]], Furio Honsell, and Gordon Plotkin, \emph{A framework for defining logics} \end{itemize} while the analytic version was proposed by \begin{itemize}% \item [[Per Martin-Lof]], \emph{On the meanings of the logical constants and the justifications of the logical laws} \end{itemize} General overviews include: \begin{itemize}% \item [[Frank Pfenning]], \emph{Logical frameworks -- a brief introduction} (\href{http://www.cs.cmu.edu/~fp/papers/mdorf01.pdf}{pdf}) \item [[Frank Pfenning]], \emph{Logical frameworks} In Alan Robinson and Andrei Voronkov (eds.) \emph{Handbook of Automated Reasoning}, chapter 17, pages 1063--1147. Elsevier Science Publishers, 1999. (\href{http://www-2.cs.cmu.edu/twelf/notes/handbook00.ps}{ps}). \item [[Frank Pfenning]], \emph{Logical frameworks web site} (\href{http://www.cs.cmu.edu/~fp/lfs.html}{web}), including an extensive bibliography and a list of implementations \item Randy Pollack, \emph{Some recent logical frameworks} (2010) (\href{http://homepages.inf.ed.ac.uk/rpollack/export/canonicalLF_talk.pdf}{pdf}) \item [[UF-IAS-2012]], \emph{\href{http://uf-ias-2012.wikispaces.com/Logical+Frameworks}{Logical frameworks}}. \item [[Robert Harper]], \emph{\href{http://www.cs.cmu.edu/~rwh/papers/lfias/lf.pdf}{Notes on logical frameworks}} \end{itemize} A number of examples of encoding object-theories into LF can be found in \begin{itemize}% \item Arnon Avron, Furio Honsell, Ian A. Mason, and Robert Pollack, \emph{Using typed lambda calculus to implement formal systems on a machine} \end{itemize} Other references: \begin{itemize}% \item Bruno Xavier, Carlos Olarte, Giselle Reis, Vivek Nigam, \emph{Mechanizing Linear Logic in Coq}, \href{http://www.cs.cmu.edu/~iliano/projects/metaCLF2/inc/dl/papers/lsfa17.pdf}{pdf} \end{itemize} \begin{itemize}% \item Mary Southern, Gopalan Nadathur, \emph{Towards a Logic for Reasoning About LF Specifications}, \href{https://arxiv.org/abs/1806.10199}{arxiv} \end{itemize} [[!redirects logical frameworks]] [[!redirects higher-order abstract syntax]] \end{document}