\documentclass[12pt,titlepage]{article} \usepackage{amsmath} \usepackage{mathrsfs} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{amsthm} \usepackage{mathtools} \usepackage{graphicx} \usepackage{color} \usepackage{ucs} \usepackage[utf8x]{inputenc} \usepackage{xparse} \usepackage{hyperref} %----Macros---------- % % Unresolved issues: % % \righttoleftarrow % \lefttorightarrow % % \color{} with HTML colorspec % \bgcolor % \array with options (without options, it's equivalent to the matrix environment) % Of the standard HTML named colors, white, black, red, green, blue and yellow % are predefined in the color package. Here are the rest. \definecolor{aqua}{rgb}{0, 1.0, 1.0} \definecolor{fuschia}{rgb}{1.0, 0, 1.0} \definecolor{gray}{rgb}{0.502, 0.502, 0.502} \definecolor{lime}{rgb}{0, 1.0, 0} \definecolor{maroon}{rgb}{0.502, 0, 0} \definecolor{navy}{rgb}{0, 0, 0.502} \definecolor{olive}{rgb}{0.502, 0.502, 0} \definecolor{purple}{rgb}{0.502, 0, 0.502} \definecolor{silver}{rgb}{0.753, 0.753, 0.753} \definecolor{teal}{rgb}{0, 0.502, 0.502} % Because of conflicts, \space and \mathop are converted to % \itexspace and \operatorname during preprocessing. % itex: \space{ht}{dp}{wd} % % Height and baseline depth measurements are in units of tenths of an ex while % the width is measured in tenths of an em. \makeatletter \newdimen\itex@wd% \newdimen\itex@dp% \newdimen\itex@thd% \def\itexspace#1#2#3{\itex@wd=#3em% \itex@wd=0.1\itex@wd% \itex@dp=#2ex% \itex@dp=0.1\itex@dp% \itex@thd=#1ex% \itex@thd=0.1\itex@thd% \advance\itex@thd\the\itex@dp% \makebox[\the\itex@wd]{\rule[-\the\itex@dp]{0cm}{\the\itex@thd}}} \makeatother % \tensor and \multiscript \makeatletter \newif\if@sup \newtoks\@sups \def\append@sup#1{\edef\act{\noexpand\@sups={\the\@sups #1}}\act}% \def\reset@sup{\@supfalse\@sups={}}% \def\mk@scripts#1#2{\if #2/ \if@sup ^{\the\@sups}\fi \else% \ifx #1_ \if@sup ^{\the\@sups}\reset@sup \fi {}_{#2}% \else \append@sup#2 \@suptrue \fi% \expandafter\mk@scripts\fi} \def\tensor#1#2{\reset@sup#1\mk@scripts#2_/} \def\multiscripts#1#2#3{\reset@sup{}\mk@scripts#1_/#2% \reset@sup\mk@scripts#3_/} \makeatother % \slash \makeatletter \newbox\slashbox \setbox\slashbox=\hbox{$/$} \def\itex@pslash#1{\setbox\@tempboxa=\hbox{$#1$} \@tempdima=0.5\wd\slashbox \advance\@tempdima 0.5\wd\@tempboxa \copy\slashbox \kern-\@tempdima \box\@tempboxa} \def\slash{\protect\itex@pslash} \makeatother % math-mode versions of \rlap, etc % from Alexander Perlis, "A complement to \smash, \llap, and lap" % http://math.arizona.edu/~aprl/publications/mathclap/ \def\clap#1{\hbox to 0pt{\hss#1\hss}} \def\mathllap{\mathpalette\mathllapinternal} \def\mathrlap{\mathpalette\mathrlapinternal} \def\mathclap{\mathpalette\mathclapinternal} \def\mathllapinternal#1#2{\llap{$\mathsurround=0pt#1{#2}$}} \def\mathrlapinternal#1#2{\rlap{$\mathsurround=0pt#1{#2}$}} \def\mathclapinternal#1#2{\clap{$\mathsurround=0pt#1{#2}$}} % Renames \sqrt as \oldsqrt and redefine root to result in \sqrt[#1]{#2} \let\oldroot\root \def\root#1#2{\oldroot #1 \of{#2}} \renewcommand{\sqrt}[2][]{\oldroot #1 \of{#2}} % Manually declare the txfonts symbolsC font \DeclareSymbolFont{symbolsC}{U}{txsyc}{m}{n} \SetSymbolFont{symbolsC}{bold}{U}{txsyc}{bx}{n} \DeclareFontSubstitution{U}{txsyc}{m}{n} % Manually declare the stmaryrd font \DeclareSymbolFont{stmry}{U}{stmry}{m}{n} \SetSymbolFont{stmry}{bold}{U}{stmry}{b}{n} % Manually declare the MnSymbolE font \DeclareFontFamily{OMX}{MnSymbolE}{} \DeclareSymbolFont{mnomx}{OMX}{MnSymbolE}{m}{n} \SetSymbolFont{mnomx}{bold}{OMX}{MnSymbolE}{b}{n} \DeclareFontShape{OMX}{MnSymbolE}{m}{n}{ <-6> MnSymbolE5 <6-7> MnSymbolE6 <7-8> MnSymbolE7 <8-9> MnSymbolE8 <9-10> MnSymbolE9 <10-12> MnSymbolE10 <12-> MnSymbolE12}{} % Declare specific arrows from txfonts without loading the full package \makeatletter \def\re@DeclareMathSymbol#1#2#3#4{% \let#1=\undefined \DeclareMathSymbol{#1}{#2}{#3}{#4}} \re@DeclareMathSymbol{\neArrow}{\mathrel}{symbolsC}{116} \re@DeclareMathSymbol{\neArr}{\mathrel}{symbolsC}{116} \re@DeclareMathSymbol{\seArrow}{\mathrel}{symbolsC}{117} \re@DeclareMathSymbol{\seArr}{\mathrel}{symbolsC}{117} \re@DeclareMathSymbol{\nwArrow}{\mathrel}{symbolsC}{118} \re@DeclareMathSymbol{\nwArr}{\mathrel}{symbolsC}{118} \re@DeclareMathSymbol{\swArrow}{\mathrel}{symbolsC}{119} \re@DeclareMathSymbol{\swArr}{\mathrel}{symbolsC}{119} \re@DeclareMathSymbol{\nequiv}{\mathrel}{symbolsC}{46} \re@DeclareMathSymbol{\Perp}{\mathrel}{symbolsC}{121} \re@DeclareMathSymbol{\Vbar}{\mathrel}{symbolsC}{121} \re@DeclareMathSymbol{\sslash}{\mathrel}{stmry}{12} \re@DeclareMathSymbol{\bigsqcap}{\mathop}{stmry}{"64} \re@DeclareMathSymbol{\biginterleave}{\mathop}{stmry}{"6} \re@DeclareMathSymbol{\invamp}{\mathrel}{symbolsC}{77} \re@DeclareMathSymbol{\parr}{\mathrel}{symbolsC}{77} \makeatother % \llangle, \rrangle, \lmoustache and \rmoustache from MnSymbolE \makeatletter \def\Decl@Mn@Delim#1#2#3#4{% \if\relax\noexpand#1% \let#1\undefined \fi \DeclareMathDelimiter{#1}{#2}{#3}{#4}{#3}{#4}} \def\Decl@Mn@Open#1#2#3{\Decl@Mn@Delim{#1}{\mathopen}{#2}{#3}} \def\Decl@Mn@Close#1#2#3{\Decl@Mn@Delim{#1}{\mathclose}{#2}{#3}} \Decl@Mn@Open{\llangle}{mnomx}{'164} \Decl@Mn@Close{\rrangle}{mnomx}{'171} \Decl@Mn@Open{\lmoustache}{mnomx}{'245} \Decl@Mn@Close{\rmoustache}{mnomx}{'244} \makeatother % Widecheck \makeatletter \DeclareRobustCommand\widecheck[1]{{\mathpalette\@widecheck{#1}}} \def\@widecheck#1#2{% \setbox\z@\hbox{\m@th$#1#2$}% \setbox\tw@\hbox{\m@th$#1% \widehat{% \vrule\@width\z@\@height\ht\z@ \vrule\@height\z@\@width\wd\z@}$}% \dp\tw@-\ht\z@ \@tempdima\ht\z@ \advance\@tempdima2\ht\tw@ \divide\@tempdima\thr@@ \setbox\tw@\hbox{% \raise\@tempdima\hbox{\scalebox{1}[-1]{\lower\@tempdima\box \tw@}}}% {\ooalign{\box\tw@ \cr \box\z@}}} \makeatother % \mathraisebox{voffset}[height][depth]{something} \makeatletter \NewDocumentCommand\mathraisebox{moom}{% \IfNoValueTF{#2}{\def\@temp##1##2{\raisebox{#1}{$\m@th##1##2$}}}{% \IfNoValueTF{#3}{\def\@temp##1##2{\raisebox{#1}[#2]{$\m@th##1##2$}}% }{\def\@temp##1##2{\raisebox{#1}[#2][#3]{$\m@th##1##2$}}}}% \mathpalette\@temp{#4}} \makeatletter % udots (taken from yhmath) \makeatletter \def\udots{\mathinner{\mkern2mu\raise\p@\hbox{.} \mkern2mu\raise4\p@\hbox{.}\mkern1mu \raise7\p@\vbox{\kern7\p@\hbox{.}}\mkern1mu}} \makeatother %% Fix array \newcommand{\itexarray}[1]{\begin{matrix}#1\end{matrix}} %% \itexnum is a noop \newcommand{\itexnum}[1]{#1} %% Renaming existing commands \newcommand{\underoverset}[3]{\underset{#1}{\overset{#2}{#3}}} \newcommand{\widevec}{\overrightarrow} \newcommand{\darr}{\downarrow} \newcommand{\nearr}{\nearrow} \newcommand{\nwarr}{\nwarrow} \newcommand{\searr}{\searrow} \newcommand{\swarr}{\swarrow} \newcommand{\curvearrowbotright}{\curvearrowright} \newcommand{\uparr}{\uparrow} \newcommand{\downuparrow}{\updownarrow} \newcommand{\duparr}{\updownarrow} \newcommand{\updarr}{\updownarrow} \newcommand{\gt}{>} \newcommand{\lt}{<} \newcommand{\map}{\mapsto} \newcommand{\embedsin}{\hookrightarrow} \newcommand{\Alpha}{A} \newcommand{\Beta}{B} \newcommand{\Zeta}{Z} \newcommand{\Eta}{H} \newcommand{\Iota}{I} \newcommand{\Kappa}{K} \newcommand{\Mu}{M} \newcommand{\Nu}{N} \newcommand{\Rho}{P} \newcommand{\Tau}{T} \newcommand{\Upsi}{\Upsilon} \newcommand{\omicron}{o} \newcommand{\lang}{\langle} \newcommand{\rang}{\rangle} \newcommand{\Union}{\bigcup} \newcommand{\Intersection}{\bigcap} \newcommand{\Oplus}{\bigoplus} \newcommand{\Otimes}{\bigotimes} \newcommand{\Wedge}{\bigwedge} \newcommand{\Vee}{\bigvee} \newcommand{\coproduct}{\coprod} \newcommand{\product}{\prod} \newcommand{\closure}{\overline} \newcommand{\integral}{\int} \newcommand{\doubleintegral}{\iint} \newcommand{\tripleintegral}{\iiint} \newcommand{\quadrupleintegral}{\iiiint} \newcommand{\conint}{\oint} \newcommand{\contourintegral}{\oint} \newcommand{\infinity}{\infty} \newcommand{\bottom}{\bot} \newcommand{\minusb}{\boxminus} \newcommand{\plusb}{\boxplus} \newcommand{\timesb}{\boxtimes} \newcommand{\intersection}{\cap} \newcommand{\union}{\cup} \newcommand{\Del}{\nabla} \newcommand{\odash}{\circleddash} \newcommand{\negspace}{\!} \newcommand{\widebar}{\overline} \newcommand{\textsize}{\normalsize} \renewcommand{\scriptsize}{\scriptstyle} \newcommand{\scriptscriptsize}{\scriptscriptstyle} \newcommand{\mathfr}{\mathfrak} \newcommand{\statusline}[2]{#2} \newcommand{\tooltip}[2]{#2} \newcommand{\toggle}[2]{#2} % Theorem Environments \theoremstyle{plain} \newtheorem{theorem}{Theorem} \newtheorem{lemma}{Lemma} \newtheorem{prop}{Proposition} \newtheorem{cor}{Corollary} \newtheorem*{utheorem}{Theorem} \newtheorem*{ulemma}{Lemma} \newtheorem*{uprop}{Proposition} \newtheorem*{ucor}{Corollary} \theoremstyle{definition} \newtheorem{defn}{Definition} \newtheorem{example}{Example} \newtheorem*{udefn}{Definition} \newtheorem*{uexample}{Example} \theoremstyle{remark} \newtheorem{remark}{Remark} \newtheorem{note}{Note} \newtheorem*{uremark}{Remark} \newtheorem*{unote}{Note} %------------------------------------------------------------------- \begin{document} %------------------------------------------------------------------- \section*{graph} \begin{quote}% This page is about the notion in [[combinatorics]]. For other notions of the same name see at \emph{[[graph of a function]]} and \emph{[[graph of a functor]]}. \end{quote} \vspace{.5em} \hrule \vspace{.5em} \hypertarget{context}{}\subsubsection*{{Context}}\label{context} \hypertarget{graph_theory}{}\paragraph*{{Graph theory}}\label{graph_theory} [[!include graph theory - contents]] \hypertarget{contents}{}\section*{{Contents}}\label{contents} \noindent\hyperlink{idea}{Idea}\dotfill \pageref*{idea} \linebreak \noindent\hyperlink{definitions}{Definitions}\dotfill \pageref*{definitions} \linebreak \noindent\hyperlink{undirected_graphs}{Undirected graphs}\dotfill \pageref*{undirected_graphs} \linebreak \noindent\hyperlink{directed_graphs}{Directed graphs}\dotfill \pageref*{directed_graphs} \linebreak \noindent\hyperlink{undirected_graphs_as_directed_graphs_with_an_involution}{Undirected graphs as directed graphs with an involution}\dotfill \pageref*{undirected_graphs_as_directed_graphs_with_an_involution} \linebreak \noindent\hyperlink{orientations}{Orientations}\dotfill \pageref*{orientations} \linebreak \noindent\hyperlink{auxiliary_definitions}{Auxiliary definitions}\dotfill \pageref*{auxiliary_definitions} \linebreak \noindent\hyperlink{morphisms_of_graphs}{Morphisms of graphs}\dotfill \pageref*{morphisms_of_graphs} \linebreak \noindent\hyperlink{notions_of_subgraphs_from_the_npov}{Notions of subgraphs from the nPOV}\dotfill \pageref*{notions_of_subgraphs_from_the_npov} \linebreak \noindent\hyperlink{undirected_graphs_as_1complexes_barycentric_subdivision}{Undirected graphs as 1-complexes, barycentric subdivision}\dotfill \pageref*{undirected_graphs_as_1complexes_barycentric_subdivision} \linebreak \noindent\hyperlink{flavors_of_graphs}{Flavors of graphs}\dotfill \pageref*{flavors_of_graphs} \linebreak \noindent\hyperlink{lawveres_remarks_on_graph_theory}{Lawvere's remarks on graph theory}\dotfill \pageref*{lawveres_remarks_on_graph_theory} \linebreak \noindent\hyperlink{related_concepts}{Related concepts}\dotfill \pageref*{related_concepts} \linebreak \noindent\hyperlink{references}{References}\dotfill \pageref*{references} \linebreak \hypertarget{idea}{}\subsection*{{Idea}}\label{idea} A \textbf{graph} is a collection of \emph{[[vertices]]} and \emph{[[edges]]}; each [[edge]] links a pair of [[vertices]], defining a relationship of \emph{incidence} between vertices and edges. There are several variations on the idea, described below. This is the sense of graph in [[combinatorics]]; the other sense in high-school algebra, which interprets a [[morphism]] $f: A \to B$ as a [[subobject]] of the [[product]] $A \times B$, is unrelated; see \emph{[[graph of a function]]} for more on this. \hypertarget{definitions}{}\subsection*{{Definitions}}\label{definitions} Let $V$ and $E$ be [[sets]]; call an element of $V$ a \textbf{vertex} and an element of $E$ an \textbf{edge}. A \textbf{graph} is given by $V$, $E$, and a mapping $d$ that interprets edges as pairs of vertices. Exactly what this means depends on how one defines `mapping that interprets' and `pair'; the possibilities are given below. We will need the following notation: \begin{itemize}% \item $V^2$ is the [[cartesian product]] of $V$ with itself, the set of ordered pairs $(x,y)$ of vertices; \item $\Delta_V$ is the [[diagonal subset]] of $V$, the set of pairs $(x,x)$, so that its [[complement]] $V^2 \setminus \Delta_V$ is the set of pairs as above where $x \neq y$; \item $\left\langle{V \atop 2}\right\rangle$ is the [[quotient set]] of $V^2$ in which $(x,y)$ is identified with $(y,x)$, the set of unordered pairs $\{x,y\}$ of vertices; \item $\left({V \atop 2}\right)$ is the quotient set of $V^2 \setminus \Delta_V$ in which $(x,y)$ is identified with $(y,x)$, the set of unordered pairs where $x \neq y$. \end{itemize} \hypertarget{undirected_graphs}{}\subsubsection*{{Undirected graphs}}\label{undirected_graphs} We are now ready for the first batch of definitions. \begin{itemize}% \item For a \textbf{simple graph}, a pair of vertices is a [[subset]] of $V$ of [[cardinality]] $2$, and we interpret edges as unordered pairs of vertices in a one-to-one way. Thus a simple graph is given by $V$, $E$, and an [[injective function]] $d: E \hookrightarrow \left({V \atop 2}\right)$. Among graph theorists, this is often the default meaning of `graph' unless another is specified. \item For a \textbf{[[multigraph]]}, a pair of vertices is the same as above, but we interpret edges as pairs of vertices in a many-to-one way. Thus a multigraph is given by $V$, $E$, and an arbitrary [[function]] $d: E \to \left({V \atop 2}\right)$. \item For a \textbf{loop graph}, a pair of vertices is any subset of the form $\{x,y\}$, where $x = y$ is allowed, and we interpret edges as pairs of vertices in a one-to-one way again. Thus a loop-graph is given by $V$, $E$, and an injective function $d: E \hookrightarrow \left\langle{V \atop 2}\right\rangle$. \item For a \textbf{[[pseudograph]]}, a pair of vertices is as in a loop graph, while edges are interpreted as pairs of vertices as in a multigraph. Thus a pseudograph is given by $V$, $E$, and a function $d: E \to \left\langle{V \atop 2}\right\rangle$. \end{itemize} Note: While `simple graph' is unambiguous, the other terms above are not. In particular, `multigraph' sometimes means a pseudograph, `pseudograph' sometimes means a loop graph, and `loop graph' sometimes means a pseudograph. These could be made unambiguous by saying `simple multigraph', `simple loop graph', and `multipseudograph', respectively, but we will try to keep our terminology short. An oldfashioned (e.g. \hyperlink{Harary1953}{p. 142}) term for `simple graph' is `linear graph', traces of which remain in the usual term `linear hypergraph' in combinatorics (i.e. hypergraph any two edges of which intersect in at most one element of the ground set). \hypertarget{directed_graphs}{}\subsubsection*{{Directed graphs}}\label{directed_graphs} In all four of the above, edges are interpreted as \emph{unordered} pairs. If we instead interpret edges as \emph{ordered} pairs, then we get four new concepts: \begin{itemize}% \item A \textbf{directed graph} consists of $V$, $E$, and an injective function $d: E \hookrightarrow V^2 \setminus \Delta_V$; \item a \textbf{directed multigraph} consists of $V$, $E$, and a function $d: E \to V^2 \setminus \Delta_V$; \item a \textbf{directed loop graph} consists of $V$, $E$, and an injective function $d: E \hookrightarrow V^2$; \item a \textbf{[[directed pseudograph]]} consists of $V$, $E$, and a function $d: E \to V^2$. \end{itemize} Directed pseudographs are commonly used in [[category theory]], where they are often called `directed graphs', `digraphs', or (less ambiguously) `[[quivers]]'. The same terminological ambiguities as above apply here as well, and they can be resolved in the same way, including using `simple directed graph' for a directed graph if necessary. One can also use `undirected' in place of `directed' to emphasise that the previous definitions apply instead of these. It is always possible to interpret any kind of graph as a directed pseudograph (a quiver), in which there happens to be at most one edge between a given pair of vertices, or there happen to be no loops (or alternatively exactly one of every possible kind of loop), or in which there is an edge from $x$ to $y$ if and only if there is an edge from $y$ to $x$, or some mixture of these. \hypertarget{undirected_graphs_as_directed_graphs_with_an_involution}{}\subsubsection*{{Undirected graphs as directed graphs with an involution}}\label{undirected_graphs_as_directed_graphs_with_an_involution} There is an alternative definition of an undirected graph allowing loops and multiple edges (cf. \hyperlink{Serre1977}{Serre 1977}), that begins with the structure of a quiver $s,t : E \rightrightarrows V$ and then asks in addition for a [[fixed point]] free [[involution]] on edges $i : E \to E$ swapping source and target, i.e., such that \begin{displaymath} i(i(e)) = e \quad i(e) \ne e \quad s(i(e)) = t(e) \quad t(i(e)) = s(e) \end{displaymath} Of course, since the source $s : E \to V$ and target $t : E \to V$ functions determine each other in the presence of the involution $i : E \to E$, it is enough to give, say, $s$ and $i$ to define an undirected graph. In that case, one might alternatively view $E$ as a set of ``half-edges'' or ``flags'' (with $i$ the involution swapping the two halves of an edge), and even lift the condition that $i$ has no fixed points to allow for the possibility of undirected graphs with ``dangling'' or ``open'' edges (i.e., with only one side attached to a vertex). Although this definition of undirected graphs with open edges is standard (cf. [[ribbon graph]]), \hyperlink{Kock2016BM}{Kock (2016b)} remarks that ``it does not naturally lead to good notions of morphisms, beyond isomorphisms''. A slight variation of this definition with a more natural notion of morphism was introduced by \hyperlink{JoyalKockQPL}{Joyal and Kock (2009)}: they define a ``Feynman graph'' as a triple of [[finite sets]] $V, E, H$ together with a triple of a function $t : H \to V$, an injection $s : H \to E$, and a fixed point free involution $i : E \to E$. (See also \hyperlink{Kock2016GHP}{Kock (2016a)} for further discussion.) \hypertarget{orientations}{}\subsubsection*{{Orientations}}\label{orientations} An \textbf{orientation} of an undirected graph is the choice of a direction for every edge. Formally, if we define undirected graphs as above to be quivers $E \rightrightarrows V$ equipped with a fixed point free involution $i : E \to E$, then an orientation corresponds to the choice of a subset $E^+ \subseteq E$ such that $E$ is the [[disjoint union]] $E = E^+ \uplus i(E^+)$. Any orientation of an undirected graph induces a corresponding directed graph $E^+ \rightrightarrows V$. In many situations, though, it is useful to consider a given undirected graph equipped with one of many possible orientations. For example, various [[graph invariants]] (such as the [[flow polynomial]], or [[W. T. Tutte|Tutte]]`s original definition of the [[Tutte polynomial]]) can be defined for an arbitrary orientation of a graph, but are independent of the choice of orientation. \hypertarget{auxiliary_definitions}{}\subsection*{{Auxiliary definitions}}\label{auxiliary_definitions} The term \textbf{arc} is often used for an \emph{ordered} edge, while \textbf{line} is sometimes used for an \emph{unordered} edge. We say that an arc $e$ with $d(e) = (x,y)$ is an arc \textbf{from $x$ to $y$}, while a line $e$ such that $d(e) = \{x,y\}$ is a line \textbf{between $x$ and $y$}. In either case, a \textbf{loop} is an edge from a vertex to itself or between a vertex and itself; only (possibly directed) loop graphs and pseudographs can have loops. Given any sort of graph, we can define a [[binary relation]] on $V$; say that $x$ and $y$ are \textbf{adjacent}, written $x \sim y$, if there exists an edge $e$ such that $d(e) = (x,y)$ or $d(e) = \{x,y\}$. A directed loop graph is determined entirely by this relation; we may say that it \emph{is} $V$ equipped with a binary relation. Then a simple directed graph is $V$ equipped with an [[irreflexive relation]] (or equivalently a [[reflexive relation]]), and an undirected loop graph is $V$ equipped with a [[symmetric relation]]. A graph is \textbf{[[finite graph|finite]]} if $V$ and $E$ are both [[finite sets]]. Given a [[linear ordering]] of the [[vertices]] of a [[finite graph]], its \textbf{adjacency matrix} is a square [[matrix]] whose $(i,j)$th entry gives the number of edges $e$ between the $i$th and $j$th vertices or from the $i$th vertex to the $j$th vertex. In the examples above where a graph is determined by a binary relation on $V$, then matrix multiplication gives composition of relations. \hypertarget{morphisms_of_graphs}{}\subsection*{{Morphisms of graphs}}\label{morphisms_of_graphs} An \textbf{[[isomorphism]]} from $G = (V,E,d)$ to $G' = (V',E',d')$ consists of a bijection $f: V \to V'$, together with a bijection from $E$ to $E'$ (also denoted $f$) such that $f$ commutes with $d$; that is, $d(f(e)) = (f(x),f(y))$ or $d(f(e)) = \{f(x),f(y)\}$ whenever $d(e) = (x,y)$ or $d(e) = \{x,y\}$ (as appropriate). Two graphs $G$ and $G'$ are \textbf{isomorphic} if there exists such an isomorphism. Then [[finite graphs]] $G$ and $G'$ are isomorphic if and only if they have the same number of vertices and, for some ordering of their vertices, they have the same adjacency matrix. A \textbf{[[morphism]]} from $G$ to $G'$ should consist of functions $f: V \to V'$ and $f: E \to E'$ such that $f$ commutes with $d$. However, some authors allow $f(e)$ to be undefined if $d(e) = (x,y)$ or $d(e) = \{x,y\}$ but $f(x) = f(y)$ when using a notion of graph where loops are forbidden. The difference amounts to whether one interprets a simple graph as a special kind of loop graph in which no loops exist (the first kind of morphism) or in which each vertex has a unique loop (the second kind of morphism). Either way, an isomorphism (as defined above) is precisely an invertible morphism. [[Mike Shulman]]: Isn't there something backwards about defining ``isomorphic'' and \emph{then} ``isomorphism'' and \emph{then} ``morphism''? Doesn't the logic generally flow in the other direction (especially around here)? [[David Roberts]]: well at least there's a historical precedent: this is how Bourbaki would have done it via structures :) \emph{Toby}: That, and it's simpler to state the definition of `isomorphic' than `isomorphism'. Not to mention that graph theorists, in my experience, tend to care much more about the property of being isomorphic than the structure of having an isomorphism. As for `morphism', there's even disagreement about what that should be; I think that my definition is the obvious correct one, but it disagrees with the one at, for example, \href{http://en.wikipedia.org/wiki/graph}{Wikipedia} (although they had my definition in the past); both versions give the same notion of isomorphism, however. [[Mike Shulman]]: Well, but we aren't graph theorists here, are we? Isn't it okay for us to rephrase their definitions in the more categorically sensible order? \emph{Toby}: I disagree that `morphism' before `isomorphism' is more categorially sensible. That's like defining $\leq$ before $=$; sometimes useful, sometimes not. Since I am getting ambiguity about morphisms in what I find online, I'd prefer to do isomorphisms first. With luck, I'll find some terminology to distinguish the two kinds of morphisms, or we can make up our own. [[Mike Shulman]]: Okay, I'll accept that sometimes it makes sense to have isomorphisms before morphisms. Certainly there are other situations in which the notion of isomorphism is more obvious, or less subject to debate, than the notion of morphism. But I am happy that now ``isomorphism'' comes before ``isomorphic.'' [[RonnieBrown]]: I hope it is helpful to add the reference below, which also contains quite a few references to categorical treatments of graphs. Under the second notion of morphism (where simple graphs are identified with sets equipped with a symmetric reflexive relation), the [[category of simple graphs]] has many desirable properties (q.v.). \hypertarget{notions_of_subgraphs_from_the_npov}{}\subsection*{{Notions of subgraphs from the nPOV}}\label{notions_of_subgraphs_from_the_npov} A usual definition of \emph{subgraph} in combinatorics is, roughly: \emph{subset}. More precisely, if \emph{undirected simple graph} means \emph{pair $(V,E)$ of two sets, with $E\subseteq[V]^2$ any subset of the set of all two-element subsets of $V$}, then a usual meaning of \emph{subgraph} of $(V,E)$ is (cf. e.g. \hyperlink{DiestelGraphTheoryFourthEd}{p. 3}): any pair $(W,F)$ with $W\subseteq V$, $F\subseteq E$, and\footnote{The last condition is to ensure that $(W,F)$ is itself again a graph, which would not be guaranteed if we defined subgraph to be just any pair of subsets of the respective sets $V$ and $E$.} $F\subseteq [W]^2$. In particular, such a subgraph $(W,F)$ may have isolated vertices which are not isolated in the ambient graph $G$, and $(W,F)$ need not be an \emph{induced} subgraph, which by definition is a subgraph in the above sense for which $F = [W]^2\cap E$. In words: the edge-set of an \emph{induced} subgraph must contain all edges \emph{induced} within the ambient graph by the vertex set of the subgraph.\footnote{This happens to be the usual notion of substructure in model theory, for any relational structure.} Another usual notion of subgraph in combinatorics is\footnote{Somewhat counterintuitively (with regard to connotations of the words \emph{spanning} and \emph{induced}), a \emph{spanning} subgraph need not be \emph{induced}, and in fact it never is, \emph{except} if the subgraph is the graph itself.} \emph{spanning} subgraph: this means just any subgraph $(W,F)$ in the above sense with $W=V$. There are three kinds of spanning subgraphs which are the most studied: [[Hamilton circuit]]s\footnote{We here follow A. Bondy's choice of words in \hyperlink{HandbookOfCombinatoricsVol1}{p. 20}, both in the decision whether to use \emph{hamiltonian} or \emph{Hamilton}, and whether to use \emph{cycle} or \emph{circuit}. The term \emph{circuit} is less usual than \emph{cycle} in combinatorics, but less ambiguous, not longer, and more clearly signalling that the combinatorial notion is meant (not one of the many other meanings of \emph{cycle}). One argument in favor of \emph{Hamilton} is that \emph{any} circuit, by itself, is \emph{hamiltonian}.} , [[perfect matching]]s and [[spanning tree]]s. From the [[nPOV]], it is often possible to describe notions of subgraph in terms of types of monomorphisms in categories of graphs; for example, \begin{itemize}% \item \emph{subgraphs} are [[monos]], and \item \emph{induced subgraphs} are [[regular monos]] \end{itemize} in the [[category of simple graphs]], and similarly for suitable categories of other types of graph. One synonym, in the nLab\footnote{Incidentally, the term \emph{full} was in use in mid-twentieth century graph theory, then seems to have fallen out of favor.} for \emph{induced subgraph} is \emph{full subgraph}, for brevity, and for harmony with other uses of \emph{full} in category theory (but also for more precise reasons). The precise meaning of \emph{subgraph} depends on the chosen formalization of \emph{graph}, needless to say. \hypertarget{undirected_graphs_as_1complexes_barycentric_subdivision}{}\subsection*{{Undirected graphs as 1-complexes, barycentric subdivision}}\label{undirected_graphs_as_1complexes_barycentric_subdivision} Recall that a [[simplicial complex]] of dimension one consists of the data of a set $V$ together with a set $S$ of non-empty subsets of $V$ of cardinality at most $2$, that contains all of the [[singleton]] subsets. In other words, a 1-dimensional simplicial complex is essentially the same thing as a simple graph, with the set of edges being determined by the set of simplices and vice versa: \begin{displaymath} E = \{\{x,y\} \mid \{x,y\} \in S, x \ne y\} \qquad S = \{\{x\} \mid x \in V\} \cup E \end{displaymath} For this reason, simple graphs are sometimes referred to as \textbf{simplicial graphs} \hyperlink{GrossTucker}{(Gross \& Tucker 1987)}. On the other hand, an undirected graph $G$ with loops or multiple edges can more generally be seen as a 1-dimensional [[CW-complex]] (or more precisely, it has a [[geometric realization]] $|G|$ as a CW-complex in which 0-cells correspond to vertices and 1-cells to edges). Given a general undirected graph, it is always possible to obtain a simple graph through the process of \emph{[[barycentric subdivision]]}. Let $G$ be a graph with vertex set $V$ and edge set $E$. The \textbf{barycentric subdivision} of $G$ is the graph $G'$ with vertex set $V \cup E$, and with an edge joining $v \in V$ to $e \in E$ just in case $v$ is incident to (i.e., at either end of) $e$ in $G$. It is easy to check that $G'$ contains no loops, while the two-fold barycentric subdivision $G''$ contains no loops or multiple edges, in other words is a simple graph. (More generally, the $n$-fold barycentric subdivision contains no circuit of length $\le n$). Part of the reason for the importance of simple graphs is that many ``topological'' properties of a graph $G$ (such as [[planar graph|planarity]], first [[Betti number]], etc., which can be defined in terms of the geometric realization of $G$) are preserved under barycentric subdivision. (Although obviously, not all \emph{graph-theoretic} properties are preserved. For example, barycentric subdivision always produces a [[bipartite graph]]). \hypertarget{flavors_of_graphs}{}\subsection*{{Flavors of graphs}}\label{flavors_of_graphs} \begin{itemize}% \item [[reflexive graph]] \item [[directed graph]] \end{itemize} \hypertarget{lawveres_remarks_on_graph_theory}{}\subsection*{{Lawvere's remarks on graph theory}}\label{lawveres_remarks_on_graph_theory} At the [[Como]] conference in 1990, [[William Lawvere]] gave a videotaped lecture including the following remarks: \begin{quote}% I have great problems reading books on graph theory, books and papers on graph theory, because they never tell you exactly what they are talking about. Sometimes the graphs are word inaudible, even when played slower, sometimes they are absolutely reflexive, sometimes they are not. Even if they go so far as talking about homomorphisms, I still don't know exactly what that is, i.e., which category are we in? What they should do is admit that they are working in three or four \emph{different} categories and they don't know how to pass from one to the other, and so on, and inaudible words to simplify.\newline But no, they prefer to talk in a vague way and smushing these together. inaudible tried to understand some of the problems of graph theorists and get bogged? locked? in the first page. Does anybody actually know what a graph minor is? some interjection from the audience Graph minor. Big problem. (..) you see, this famous inaudible works problem on graph minors. Looks like that that might be interesting. But I can't determine \emph{exactly} what it is, because, if you read the first parts of the paper, they waffle, you see, they don't give you a \emph{property} (\ldots{}) (William Lawvere in his 1990 lecture at [[Como]], Italy, Villa Olmo) \end{quote} \hypertarget{related_concepts}{}\subsection*{{Related concepts}}\label{related_concepts} \begin{itemize}% \item \textbf{graph} \item [[omega-graph]] \item [[planar graph]], [[connected graph]] \item [[empty graph]] \item [[ribbon graph]] \item [[hypergraph]] \item [[quiver]], [[McKay quiver]] \item [[n-quiver]] \item [[relation]] \item [[category]], [[free category]] \item [[Feynman diagram]] \item [[graph complex]], [[formality of the little n-disk operad]] \end{itemize} \hypertarget{references}{}\subsection*{{References}}\label{references} Textbooks: \begin{itemize}% \item Frank Harary (1969), \emph{Graph Theory}, Addison-Wesley. \item Frank Harary and E.M. Palmer (1973), \emph{Graphical Enumeration}, Academic Press. \item [[Jean-Pierre Serre]] (1977), \emph{Trees}, Springer. \item [[W. T. Tutte]] (1984), \emph{Graph Theory}, Addison-Wesley. \item Jonathan L. Gross and Thomas W. Tucker (1987), \emph{Topological Graph Theory}, Wiley. \item Gunther Schmidt and Thomas Str\"o{}hlein (1993), \emph{Relations and Graphs: Discrete Mathematics for Computer Scientists}, EATCS Monographs on Theoretical Computer Science, Springer. \item A. Bondy. Basic Graph Theory: Paths and Circuits. Elsevier Amsterdam, 1995, Vol. 1, pp. 1--110 \item Chris Godsil and Gordon Royle (2001), \emph{Algebraic Graph Theory}, Springer. \item R. Diestel, Graph Theory, 4. ed., Springer 2010 \end{itemize} Other references: \begin{itemize}% \item [[Bill Lawvere]] (1989), \emph{Qualitative distinctions between some toposes of generalized graphs}, in Categories in computer science and logic (Boulder, CO, 1987), volume 92 of \emph{Contemporary Mathematics}, 261--299. American Mathematical Society, Providence, RI. \item E. Babson, H. Barcelo, M. de Longueville, R. Laubenbacher, A Homotopy Theory for Graphs, \href{http://arxiv.org/abs/math/0403146}{arXiv:math/0403146} \item [[Ronnie Brown]], I. Morris, J. Shrimpton, and C.D. Wensley (2008), \emph{Graphs of Morphisms of Graphs}, Electronic Journal of Combinatorics, A1 of Volume 15(1), 1--28. \item [[Frank Harary]]: \emph{On the notion of balance of a signed graph}. The Michigan Mathemathical Journal, Volume 2, Issue 2 (1953), 143-146. \item [[André Joyal]] and [[Joachim Kock]], Feynman graphs, and nerve theorem for compact symmetric multicategories (extended abstract), in \emph{Proceedings of the 6th International Workshop on Quantum Physics and Logic}(Oxford 2009), Electronic Notes in Theoretical Computer Science 270 (2) (2011), 105-113. \href{https://arxiv.org/abs/0908.2675}{arXiv:0908.2675} \item [[Joachim Kock]], Graphs, hypergraphs, and properads, \emph{Collect. Math.} 67 (2016), 155-190. \href{https://arxiv.org/abs/1407.3744}{arXiv:1407.3744} \item [[Joachim Kock]], Cospan construction of the graph category of Borisov and Manin, \href{https://arxiv.org/abs/1611.10342}{arXiv:1611.10342} \end{itemize} Discussion of use of [[simplicial complexes]] in graph theory: \begin{itemize}% \item MO, \emph{\href{http://mathoverflow.net/questions/161586/what-have-simplicial-complexes-ever-done-for-graph-theory}{What have simplicial complexes ever done for graph theory?}} \end{itemize} [[!redirects linear graph]] [[!redirects graph]] [[!redirects graphs]] [[!redirects undirected graph]] [[!redirects undirected graphs]] [[!redirects simple directed graph]] [[!redirects simple directed graphs]] [[!redirects graph theory]] \end{document}