\documentclass[12pt,titlepage]{article} \usepackage{amsmath} \usepackage{mathrsfs} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{amsthm} \usepackage{mathtools} \usepackage{graphicx} \usepackage{color} \usepackage{ucs} \usepackage[utf8x]{inputenc} \usepackage{xparse} \usepackage{hyperref} %----Macros---------- % % Unresolved issues: % % \righttoleftarrow % \lefttorightarrow % % \color{} with HTML colorspec % \bgcolor % \array with options (without options, it's equivalent to the matrix environment) % Of the standard HTML named colors, white, black, red, green, blue and yellow % are predefined in the color package. Here are the rest. \definecolor{aqua}{rgb}{0, 1.0, 1.0} \definecolor{fuschia}{rgb}{1.0, 0, 1.0} \definecolor{gray}{rgb}{0.502, 0.502, 0.502} \definecolor{lime}{rgb}{0, 1.0, 0} \definecolor{maroon}{rgb}{0.502, 0, 0} \definecolor{navy}{rgb}{0, 0, 0.502} \definecolor{olive}{rgb}{0.502, 0.502, 0} \definecolor{purple}{rgb}{0.502, 0, 0.502} \definecolor{silver}{rgb}{0.753, 0.753, 0.753} \definecolor{teal}{rgb}{0, 0.502, 0.502} % Because of conflicts, \space and \mathop are converted to % \itexspace and \operatorname during preprocessing. % itex: \space{ht}{dp}{wd} % % Height and baseline depth measurements are in units of tenths of an ex while % the width is measured in tenths of an em. \makeatletter \newdimen\itex@wd% \newdimen\itex@dp% \newdimen\itex@thd% \def\itexspace#1#2#3{\itex@wd=#3em% \itex@wd=0.1\itex@wd% \itex@dp=#2ex% \itex@dp=0.1\itex@dp% \itex@thd=#1ex% \itex@thd=0.1\itex@thd% \advance\itex@thd\the\itex@dp% \makebox[\the\itex@wd]{\rule[-\the\itex@dp]{0cm}{\the\itex@thd}}} \makeatother % \tensor and \multiscript \makeatletter \newif\if@sup \newtoks\@sups \def\append@sup#1{\edef\act{\noexpand\@sups={\the\@sups #1}}\act}% \def\reset@sup{\@supfalse\@sups={}}% \def\mk@scripts#1#2{\if #2/ \if@sup ^{\the\@sups}\fi \else% \ifx #1_ \if@sup ^{\the\@sups}\reset@sup \fi {}_{#2}% \else \append@sup#2 \@suptrue \fi% \expandafter\mk@scripts\fi} \def\tensor#1#2{\reset@sup#1\mk@scripts#2_/} \def\multiscripts#1#2#3{\reset@sup{}\mk@scripts#1_/#2% \reset@sup\mk@scripts#3_/} \makeatother % \slash \makeatletter \newbox\slashbox \setbox\slashbox=\hbox{$/$} \def\itex@pslash#1{\setbox\@tempboxa=\hbox{$#1$} \@tempdima=0.5\wd\slashbox \advance\@tempdima 0.5\wd\@tempboxa \copy\slashbox \kern-\@tempdima \box\@tempboxa} \def\slash{\protect\itex@pslash} \makeatother % math-mode versions of \rlap, etc % from Alexander Perlis, "A complement to \smash, \llap, and lap" % http://math.arizona.edu/~aprl/publications/mathclap/ \def\clap#1{\hbox to 0pt{\hss#1\hss}} \def\mathllap{\mathpalette\mathllapinternal} \def\mathrlap{\mathpalette\mathrlapinternal} \def\mathclap{\mathpalette\mathclapinternal} \def\mathllapinternal#1#2{\llap{$\mathsurround=0pt#1{#2}$}} \def\mathrlapinternal#1#2{\rlap{$\mathsurround=0pt#1{#2}$}} \def\mathclapinternal#1#2{\clap{$\mathsurround=0pt#1{#2}$}} % Renames \sqrt as \oldsqrt and redefine root to result in \sqrt[#1]{#2} \let\oldroot\root \def\root#1#2{\oldroot #1 \of{#2}} \renewcommand{\sqrt}[2][]{\oldroot #1 \of{#2}} % Manually declare the txfonts symbolsC font \DeclareSymbolFont{symbolsC}{U}{txsyc}{m}{n} \SetSymbolFont{symbolsC}{bold}{U}{txsyc}{bx}{n} \DeclareFontSubstitution{U}{txsyc}{m}{n} % Manually declare the stmaryrd font \DeclareSymbolFont{stmry}{U}{stmry}{m}{n} \SetSymbolFont{stmry}{bold}{U}{stmry}{b}{n} % Manually declare the MnSymbolE font \DeclareFontFamily{OMX}{MnSymbolE}{} \DeclareSymbolFont{mnomx}{OMX}{MnSymbolE}{m}{n} \SetSymbolFont{mnomx}{bold}{OMX}{MnSymbolE}{b}{n} \DeclareFontShape{OMX}{MnSymbolE}{m}{n}{ <-6> MnSymbolE5 <6-7> MnSymbolE6 <7-8> MnSymbolE7 <8-9> MnSymbolE8 <9-10> MnSymbolE9 <10-12> MnSymbolE10 <12-> MnSymbolE12}{} % Declare specific arrows from txfonts without loading the full package \makeatletter \def\re@DeclareMathSymbol#1#2#3#4{% \let#1=\undefined \DeclareMathSymbol{#1}{#2}{#3}{#4}} \re@DeclareMathSymbol{\neArrow}{\mathrel}{symbolsC}{116} \re@DeclareMathSymbol{\neArr}{\mathrel}{symbolsC}{116} \re@DeclareMathSymbol{\seArrow}{\mathrel}{symbolsC}{117} \re@DeclareMathSymbol{\seArr}{\mathrel}{symbolsC}{117} \re@DeclareMathSymbol{\nwArrow}{\mathrel}{symbolsC}{118} \re@DeclareMathSymbol{\nwArr}{\mathrel}{symbolsC}{118} \re@DeclareMathSymbol{\swArrow}{\mathrel}{symbolsC}{119} \re@DeclareMathSymbol{\swArr}{\mathrel}{symbolsC}{119} \re@DeclareMathSymbol{\nequiv}{\mathrel}{symbolsC}{46} \re@DeclareMathSymbol{\Perp}{\mathrel}{symbolsC}{121} \re@DeclareMathSymbol{\Vbar}{\mathrel}{symbolsC}{121} \re@DeclareMathSymbol{\sslash}{\mathrel}{stmry}{12} \re@DeclareMathSymbol{\bigsqcap}{\mathop}{stmry}{"64} \re@DeclareMathSymbol{\biginterleave}{\mathop}{stmry}{"6} \re@DeclareMathSymbol{\invamp}{\mathrel}{symbolsC}{77} \re@DeclareMathSymbol{\parr}{\mathrel}{symbolsC}{77} \makeatother % \llangle, \rrangle, \lmoustache and \rmoustache from MnSymbolE \makeatletter \def\Decl@Mn@Delim#1#2#3#4{% \if\relax\noexpand#1% \let#1\undefined \fi \DeclareMathDelimiter{#1}{#2}{#3}{#4}{#3}{#4}} \def\Decl@Mn@Open#1#2#3{\Decl@Mn@Delim{#1}{\mathopen}{#2}{#3}} \def\Decl@Mn@Close#1#2#3{\Decl@Mn@Delim{#1}{\mathclose}{#2}{#3}} \Decl@Mn@Open{\llangle}{mnomx}{'164} \Decl@Mn@Close{\rrangle}{mnomx}{'171} \Decl@Mn@Open{\lmoustache}{mnomx}{'245} \Decl@Mn@Close{\rmoustache}{mnomx}{'244} \makeatother % Widecheck \makeatletter \DeclareRobustCommand\widecheck[1]{{\mathpalette\@widecheck{#1}}} \def\@widecheck#1#2{% \setbox\z@\hbox{\m@th$#1#2$}% \setbox\tw@\hbox{\m@th$#1% \widehat{% \vrule\@width\z@\@height\ht\z@ \vrule\@height\z@\@width\wd\z@}$}% \dp\tw@-\ht\z@ \@tempdima\ht\z@ \advance\@tempdima2\ht\tw@ \divide\@tempdima\thr@@ \setbox\tw@\hbox{% \raise\@tempdima\hbox{\scalebox{1}[-1]{\lower\@tempdima\box \tw@}}}% {\ooalign{\box\tw@ \cr \box\z@}}} \makeatother % \mathraisebox{voffset}[height][depth]{something} \makeatletter \NewDocumentCommand\mathraisebox{moom}{% \IfNoValueTF{#2}{\def\@temp##1##2{\raisebox{#1}{$\m@th##1##2$}}}{% \IfNoValueTF{#3}{\def\@temp##1##2{\raisebox{#1}[#2]{$\m@th##1##2$}}% }{\def\@temp##1##2{\raisebox{#1}[#2][#3]{$\m@th##1##2$}}}}% \mathpalette\@temp{#4}} \makeatletter % udots (taken from yhmath) \makeatletter \def\udots{\mathinner{\mkern2mu\raise\p@\hbox{.} \mkern2mu\raise4\p@\hbox{.}\mkern1mu \raise7\p@\vbox{\kern7\p@\hbox{.}}\mkern1mu}} \makeatother %% Fix array \newcommand{\itexarray}[1]{\begin{matrix}#1\end{matrix}} %% \itexnum is a noop \newcommand{\itexnum}[1]{#1} %% Renaming existing commands \newcommand{\underoverset}[3]{\underset{#1}{\overset{#2}{#3}}} \newcommand{\widevec}{\overrightarrow} \newcommand{\darr}{\downarrow} \newcommand{\nearr}{\nearrow} \newcommand{\nwarr}{\nwarrow} \newcommand{\searr}{\searrow} \newcommand{\swarr}{\swarrow} \newcommand{\curvearrowbotright}{\curvearrowright} \newcommand{\uparr}{\uparrow} \newcommand{\downuparrow}{\updownarrow} \newcommand{\duparr}{\updownarrow} \newcommand{\updarr}{\updownarrow} \newcommand{\gt}{>} \newcommand{\lt}{<} \newcommand{\map}{\mapsto} \newcommand{\embedsin}{\hookrightarrow} \newcommand{\Alpha}{A} \newcommand{\Beta}{B} \newcommand{\Zeta}{Z} \newcommand{\Eta}{H} \newcommand{\Iota}{I} \newcommand{\Kappa}{K} \newcommand{\Mu}{M} \newcommand{\Nu}{N} \newcommand{\Rho}{P} \newcommand{\Tau}{T} \newcommand{\Upsi}{\Upsilon} \newcommand{\omicron}{o} \newcommand{\lang}{\langle} \newcommand{\rang}{\rangle} \newcommand{\Union}{\bigcup} \newcommand{\Intersection}{\bigcap} \newcommand{\Oplus}{\bigoplus} \newcommand{\Otimes}{\bigotimes} \newcommand{\Wedge}{\bigwedge} \newcommand{\Vee}{\bigvee} \newcommand{\coproduct}{\coprod} \newcommand{\product}{\prod} \newcommand{\closure}{\overline} \newcommand{\integral}{\int} \newcommand{\doubleintegral}{\iint} \newcommand{\tripleintegral}{\iiint} \newcommand{\quadrupleintegral}{\iiiint} \newcommand{\conint}{\oint} \newcommand{\contourintegral}{\oint} \newcommand{\infinity}{\infty} \newcommand{\bottom}{\bot} \newcommand{\minusb}{\boxminus} \newcommand{\plusb}{\boxplus} \newcommand{\timesb}{\boxtimes} \newcommand{\intersection}{\cap} \newcommand{\union}{\cup} \newcommand{\Del}{\nabla} \newcommand{\odash}{\circleddash} \newcommand{\negspace}{\!} \newcommand{\widebar}{\overline} \newcommand{\textsize}{\normalsize} \renewcommand{\scriptsize}{\scriptstyle} \newcommand{\scriptscriptsize}{\scriptscriptstyle} \newcommand{\mathfr}{\mathfrak} \newcommand{\statusline}[2]{#2} \newcommand{\tooltip}[2]{#2} \newcommand{\toggle}[2]{#2} % Theorem Environments \theoremstyle{plain} \newtheorem{theorem}{Theorem} \newtheorem{lemma}{Lemma} \newtheorem{prop}{Proposition} \newtheorem{cor}{Corollary} \newtheorem*{utheorem}{Theorem} \newtheorem*{ulemma}{Lemma} \newtheorem*{uprop}{Proposition} \newtheorem*{ucor}{Corollary} \theoremstyle{definition} \newtheorem{defn}{Definition} \newtheorem{example}{Example} \newtheorem*{udefn}{Definition} \newtheorem*{uexample}{Example} \theoremstyle{remark} \newtheorem{remark}{Remark} \newtheorem{note}{Note} \newtheorem*{uremark}{Remark} \newtheorem*{unote}{Note} %------------------------------------------------------------------- \begin{document} %------------------------------------------------------------------- \section*{determinant} \hypertarget{context}{}\subsubsection*{{Context}}\label{context} \hypertarget{algebra}{}\paragraph*{{Algebra}}\label{algebra} [[!include higher algebra - contents]] \hypertarget{contents}{}\section*{{Contents}}\label{contents} \noindent\hyperlink{idea}{Idea}\dotfill \pageref*{idea} \linebreak \noindent\hyperlink{definition}{Definition}\dotfill \pageref*{definition} \linebreak \noindent\hyperlink{preliminaries_on_exterior_algebra}{Preliminaries on exterior algebra}\dotfill \pageref*{preliminaries_on_exterior_algebra} \linebreak \noindent\hyperlink{determinant_of_a_matrix}{Determinant of a matrix}\dotfill \pageref*{determinant_of_a_matrix} \linebreak \noindent\hyperlink{properties}{Properties}\dotfill \pageref*{properties} \linebreak \noindent\hyperlink{cramers_rule}{Cramer's rule}\dotfill \pageref*{cramers_rule} \linebreak \noindent\hyperlink{characteristic_polynomial_and_cayleyhamilton_theorem}{Characteristic polynomial and Cayley-Hamilton theorem}\dotfill \pageref*{characteristic_polynomial_and_cayleyhamilton_theorem} \linebreak \noindent\hyperlink{over_the_real_numbers_volume_and_orientation}{Over the real numbers: volume and orientation}\dotfill \pageref*{over_the_real_numbers_volume_and_orientation} \linebreak \noindent\hyperlink{AsAPolynomialInTracesofPowers}{As a polynomial in traces of powers}\dotfill \pageref*{AsAPolynomialInTracesofPowers} \linebreak \noindent\hyperlink{in_terms_of_berezinian_integrals}{In terms of Berezinian integrals}\dotfill \pageref*{in_terms_of_berezinian_integrals} \linebreak \noindent\hyperlink{related_entries}{Related entries}\dotfill \pageref*{related_entries} \linebreak \noindent\hyperlink{references}{References}\dotfill \pageref*{references} \linebreak \hypertarget{idea}{}\subsection*{{Idea}}\label{idea} The determinant is the (essentially unique) universal alternating multilinear map. \hypertarget{definition}{}\subsection*{{Definition}}\label{definition} \hypertarget{preliminaries_on_exterior_algebra}{}\subsubsection*{{Preliminaries on exterior algebra}}\label{preliminaries_on_exterior_algebra} Let [[Vect]]${}_k$ be the [[category]] of [[vector space|vector spaces]] over a [[field]] $k$, and assume for the moment that the [[characteristic]] $char(k) \neq 2$. For each $j \geq 0$, let \begin{displaymath} sgn_j \colon S_j \to \hom(k, k) \end{displaymath} be the 1-dimensional [[sign representation]] on the [[symmetric group]] $S_j$, taking each transposition $(i j)$ to $-1 \in k^\ast$. We may linearly extend the sign action of $S_j$, so that $sgn$ names a (right) $k S_j$-[[module]] with underlying [[vector space]] $k$. At the same time, $S_j$ acts on the $j^{th}$ [[tensor product]] of a vector space $V$ by permuting tensor factors, giving a left $k S_j$-module structure on $V^{\otimes j}$. We define the [[Schur functor]] \begin{displaymath} \Lambda^j \colon Vect_k \to Vect_k \end{displaymath} by the formula \begin{displaymath} \Lambda^j(V) = sgn_j \otimes_{k S_j} V^{\otimes j}. \end{displaymath} It is called the $j^{th}$ \textbf{alternating power} (of $V$). Another point of view on the alternating power is via [[superalgebra]]. For any [[cosmos]] $\mathbf{V}$ let $CMon(\mathbf{V})$ be the category of [[commutative monoid]] objects in $\mathbf{V}$. The forgetful functor $CMon(\mathbf{V}) \to \mathbf{V}$ has a [[left adjoint]] \begin{displaymath} \exp(V) = \sum_{n \geq 0} V^{\otimes n}/S_n \end{displaymath} whose values are naturally regarded as graded by degree $n$. This applies in particular to $\mathbf{V}$ the category of [[supervector spaces]]; if $V$ is a supervector space concentrated in odd degree, say with component $V_{odd}$, then each symmetry $\sigma: V \otimes V \to V \otimes V$ maps $v \otimes w \mapsto -w \otimes v$ for elements $v, w \in V_{odd}$. It follows that the graded component $\exp(V)_n$ is concentrated in $parity(n)$ degree, with component $\Lambda^n(V_{odd})$. \begin{prop} \label{}\hypertarget{}{} There is a canonical natural isomorphism $\Lambda^n(V \oplus W) \cong \sum_{j + k = n} \Lambda^j(V) \otimes \Lambda^k(W)$. \end{prop} \begin{proof} Again take $\mathbf{V}$ to be the category of supervector spaces. Since the left adjoint $\exp: \mathbf{V} \to CMon(\mathbf{V})$ preserves [[coproducts]] and since the tensor product $\otimes$ of $\mathbf{V}$ provides the coproduct for commutative monoid objects, we have a natural isomorphism \begin{displaymath} \exp(V \oplus W) \cong \exp(V) \otimes \exp(W). \end{displaymath} Examining the grade $n$ component $\exp(V \oplus W)_n$, this leads to an identification \begin{displaymath} \exp(V \oplus W)_n = \sum_{j + k = n} \exp(V)_j \otimes \exp(W)_k. \end{displaymath} and now the result follows by considering the case where $V, W$ are concentrated in odd degree. \end{proof} \begin{cor} \label{}\hypertarget{}{} If $V$ is $n$-[[dimension|dimensional]], then $\Lambda^j(V)$ has dimension $\binom{n}{j}$. In particular, $\Lambda^n(V)$ is 1-dimensional. \end{cor} \begin{proof} By induction on dimension. If $\dim(V) = 1$, we have that $\Lambda^0(V)$ and $\Lambda^1(V)$ are $1$-dimensional, and clearly $\Lambda^n(V) = 0$ for $n \geq 2$, at least when $char(k) \neq 2$. We then infer \begin{displaymath} \itexarray{ \Lambda^j(V \oplus k) & \cong & \sum_{p + q = j} \Lambda^p(V) \otimes \Lambda^q(k) \\ & \cong & \Lambda^j(V) \oplus \Lambda^{j-1}(V) } \end{displaymath} where the dimensions satisfy the same recurrence relation as for [[binomial coefficients]]: $\binom{n+1}{j} = \binom{n}{j} + \binom{n}{j-1}$. \end{proof} More concretely: if $e_1, \ldots, e_n$ is a basis for $V$, then expressions of the form $e_{n_1} \otimes \ldots \otimes e_{n_j}$ form a [[basis of a vector space|basis]] for $V^{\otimes j}$. Let $e_{n_1} \wedge \ldots \wedge e_{n_j}$ denote the [[image]] of this element under the [[quotient]] map $V^{\otimes j} \to \Lambda^j(V)$. We have \begin{displaymath} e_{n_1} \wedge \ldots \wedge e_{n_i} \wedge e_{n_{i+1}} \wedge \ldots \wedge e_{n_j} = -e_{n_1} \wedge \ldots \wedge e_{n_{i+1}} \wedge e_{n_i} \wedge \ldots \wedge e_{n_j} \end{displaymath} (consider the transposition in $S_j$ which swaps $i$ and $i+1$) and so we may take only such expressions on the left where $n_1 \lt \ldots \lt n_j$ as forming a spanning set for $\Lambda^j(V)$, and indeed these form a basis. The number of such expressions is $\binom{n}{j}$. \begin{remark} \label{}\hypertarget{}{} In the case where $char(k) = 2$, the same development may be carried out by simply decreeing that $e_{n_1} \wedge \ldots \wedge e_{n_j} = 0$ whenever $n_i = n_{i'}$ for some pair of distinct indices $i$, $i'$. \end{remark} Now let $V$ be an $n$-dimensional space, and let $f \colon V \to V$ be a [[linear map]]. By the proposition, the map \begin{displaymath} \Lambda^n(f) \colon \Lambda^n(V) \to \Lambda^n(V), \end{displaymath} being an [[endomorphism]] on a 1-dimensional space, is given by multiplying by a scalar $D(f) \in k$. It is manifestly [[functor|functorial]] since $\Lambda^n$ is, i.e., $D(f g) = D(f) D(g)$. The quantity $D(f)$ is called the \textbf{determinant} of $f$. \hypertarget{determinant_of_a_matrix}{}\subsubsection*{{Determinant of a matrix}}\label{determinant_of_a_matrix} We see then that if $V$ is of dimension $n$, \begin{displaymath} \det \colon End(V) \to k \end{displaymath} is a [[homomorphism]] of multiplicative [[monoids]]; by commutativity of multiplication in $k$, we infer that \begin{displaymath} \det(U A U^{-1}) = \det(A) \end{displaymath} for each [[inverse|invertible]] [[linear map]] $U \in GL(V)$. If we choose a [[basis of a vector space|basis]] of $V$ so that we have an identification $GL(V) \cong Mat_n(k)$, then the determinant gives a [[function]] \begin{displaymath} \det \colon Mat_n(k) \to k \end{displaymath} that takes products of $n \times n$ [[matrices]] to products in $k$. The determinant however is of course independent of choice of basis, since any two choices are related by a change-of-basis matrix $U$, where $A$ and its transform $U A U^{-1}$ have the same determinant. By following the definitions above, we can give an explicit formula: \begin{displaymath} \det(A) = \sum_{\sigma \in S_n} sgn(\sigma) \prod_{i = 1}^n a_{i \sigma(i)}. \end{displaymath} \hypertarget{properties}{}\subsection*{{Properties}}\label{properties} We work over [[fields]] of arbitrary [[characteristic]]. The determinant satisfies the following properties, which taken together uniquely characterize the determinant. Write a square [[matrix]] $A$ as a row of column [[vectors]] $(v_1, \ldots, v_n)$. \begin{enumerate}% \item $\det$ is separately linear in each column vector: \begin{displaymath} \det(v_1, \ldots, a v + b w, \ldots, v_n) = a\det(v_1, \ldots, v, \ldots, v_n) + b\det(v_1, \ldots, w, \ldots, v_n) \end{displaymath} \item $\det(v_1, \ldots, v_n) = 0$ whenever $v_i = v_j$ for distinct $i, j$. \item $\det(I) = 1$, where $I$ is the identity matrix. \end{enumerate} Other properties may be worked out, starting from the explicit formula or otherwise: \begin{itemize}% \item If $A$ is a diagonal matrix, then $\det(A)$ is the product of its diagonal entries. \item More generally, if $A$ is an upper (or lower) triangular matrix, then $\det(A)$ is the product of the diagonal entries. \item If $E/k$ is a [[field extension]] and $f$ is a $k$-linear map $V \to V$, then $\det(f) = \det(E \otimes_k f)$. Using the preceding properties and the [[Jordan normal form]] of a matrix, this means that $\det(f)$ is the product of its [[eigenvalues]] (counted with multiplicity), as computed in the [[algebraic closure]] of $k$. \item If $A^t$ is the transpose of $A$, then $\det(A^t) = \det(A)$. \end{itemize} \hypertarget{cramers_rule}{}\subsubsection*{{Cramer's rule}}\label{cramers_rule} A simple observation which flows from these basic properties is \begin{prop} \label{}\hypertarget{}{} \textbf{(Cramer's Rule)} Let $v_1, \ldots, v_n$ be column vectors of dimension $n$, and suppose \begin{displaymath} w = \sum_j a_j v_j. \end{displaymath} Then for each $i$ we have \begin{displaymath} a_j \det(v_1, \ldots, v_i, \ldots, v_n) = \det(v_1, \ldots, w, \ldots, v_n) \end{displaymath} where $w$ occurs as the $i^{th}$ column vector on the right. \end{prop} \begin{proof} This follows straightforwardly from properties 1 and 2 above. \end{proof} For instance, given a square matrix $A$ such that $\det(A) \neq 0$, and writing $A = (v_1, \ldots, v_n)$, this allows us to solve for a vector $a$ in an equation \begin{displaymath} A \cdot a = w \end{displaymath} and we easily conclude that $A$ is invertible if $\det(A) \neq 0$. \begin{remark} \label{}\hypertarget{}{} This holds true even if we replace the field $k$ by an arbitrary commutative [[ring]] $R$, and we replace the condition $\det(A) \neq 0$ by the condition that $\det(A)$ is a unit. (The entire development given above goes through, \emph{mutatis mutandis}.) \end{remark} \hypertarget{characteristic_polynomial_and_cayleyhamilton_theorem}{}\subsubsection*{{Characteristic polynomial and Cayley-Hamilton theorem}}\label{characteristic_polynomial_and_cayleyhamilton_theorem} Given a linear endomorphism $f: M\to M$ of a finite rank free unital module over a commutative unital ring, one can consider the zeros of the [[characteristic polynomial]] $\det(t \cdot 1_V - f)$. The coefficients of the polynomial are the concern of the [[Cayley-Hamilton theorem]]. \hypertarget{over_the_real_numbers_volume_and_orientation}{}\subsubsection*{{Over the real numbers: volume and orientation}}\label{over_the_real_numbers_volume_and_orientation} A useful intuition to have for determinants of [[real number|real]] matrices is that they measure \emph{change of volume}. That is, an $n \times n$ matrix with real entries will map a standard unit cube in $\mathbb{R}^n$ to a parallelpiped in $\mathbb{R}^n$ (quashed to lie in a hyperplane if the matrix is singular), and the determinant is, up to sign, the volume of the parallelpiped. It is easy to convince oneself of this in the planar case by a simple dissection of a parallelogram, rearranging the dissected pieces in the style of Euclid to form a rectangle. In algebraic terms, the dissection and rearrangement amount to applying shearing or [[elementary column operations]] to the matrix which, by the properties discussed earlier, leave the determinant unchanged. These operations transform the matrix into a diagonal matrix whose determinant is the area of the corresponding rectangle. This procedure easily generalizes to $n$ dimensions. The sign itself is a matter of interest. An invertible transformation $f \colon V \to V$ is said to be \textbf{[[orientation]]-preserving} if $\det(f)$ is positive, and \textbf{orientation-reversing} if $\det(f)$ is negative. Orientations play an important role throughout geometry and algebraic topology, for example in the study of orientable manifolds (where the tangent bundle as $GL(n)$-bundle can be lifted to a $GL_+(n)$-bundle structure, $GL_+(n) \hookrightarrow GL(n)$ being the subgroup of matrices of positive determinant). See also [[KO-theory]]. Finally, we include one more property of determinants which pertains to matrices with real coefficients (which works slightly more generally for matrices with coefficients in a [[local field]]): \hypertarget{AsAPolynomialInTracesofPowers}{}\subsubsection*{{As a polynomial in traces of powers}}\label{AsAPolynomialInTracesofPowers} If $A$ is an $n \times n$ [[matrix]], the determinant of its [[exponential]] equals the [[exponential]] of its [[trace]] \begin{displaymath} \det(\exp(A)) = \exp(tr(A)) \,. \end{displaymath} More generally, the determinant of $A$ is a [[polynomial]] in the [[traces]] of the [[powers]] of $A$: For $2 \times 2$-matrices: \begin{displaymath} det(A) \;=\; \tfrac{1}{2}\left( tr(A)^2 - tr(A^2) \right) \end{displaymath} For $3 \times 3$-matrices: \begin{displaymath} det(A) \;=\; \tfrac{1}{6} \left( (tr(A))^3 - 3 tr(A^2) tr(A) + tr(A^3) \right) \end{displaymath} For $4 \times 4$-matrices: \begin{displaymath} det(A) \;=\; \tfrac{1}{24} \left( (tr(A))^4 - 6 tr(A^2)(tr(A))^2 + 3 (tr(A^2))^2 + 8 tr(A^3) tr(A) - 6 tr(A^4) \right) \end{displaymath} Generally for $n \times n$-matrices (\hyperlink{KondratyukKrivoruchenko92}{Kondratyuk-Krivoruchenko 92, appendix B}): \begin{equation} det(A) \;=\; \underset{ { k_1,\cdots, k_n \in \mathbb{N} } \atop { \underoverset{\ell = 1}{n}{\sum} \ell k_\ell = n } }{\sum} \underoverset{ l = 1 }{ n }{\prod} \frac{ (-1)^{k_l + 1} }{ l^{k_l} k_l ! } \left(tr(A^l)\right)^{k_l} \label{DeterminantAsPolynomialInTracesOfPowers}\end{equation} \begin{proof} It is enough to prove this for semisimple matrices $A$ (matrices that are [[diagonalizable matrix|diagonalizable]] upon passing to the [[algebraic closure]] of the ground field) because this [[subset]] of matrices is [[Zariski topology|Zariski]] [[dense subset|dense]] (using for example the nonvanishing of the [[discriminant]] of the [[characteristic polynomial]]) and the set of $A$ for which the equation holds is [[Zariski topology|Zariski]] [[closed subset|closed]]. Thus, without loss of generality we may suppose that $A$ is [[diagonal matrix|diagonal]] with $n$ [[eigenvalues]] $\lambda_1, \ldots, \lambda_n$ along the diagonal, where the statement can be rewritten as follows. Letting $p_k = tr(A^k) = \lambda_1^k + \ldots + \lambda_n^k$, the following identity holds: \begin{displaymath} \prod_{i=1}^n \lambda_i = \underset{ { k_1,\cdots, k_n \in \mathbb{N} } \atop { \underoverset{\ell = 1}{n}{\sum} \ell k_\ell = n } }{\sum} \underoverset{ l = 1 }{ n }{\prod} \frac{ (-1)^{k_l + 1} }{ l^{k_l} k_l ! } p_l^{k_l} \end{displaymath} This of course is just a [[polynomial]] identity, one closely related to various of the [[Newton identities]] that concern symmetric polynomials in indeterminates $x_1, \ldots, x_n$. Thus we again let $p_k = x_1^k + \ldots + x_n^k$, and define the [[elementary symmetric polynomials]] $\sigma_k = \sigma_k(x_1, \ldots, x_n)$ via the generating function identity \begin{displaymath} \sum_{k \geq 0} \sigma_k t^k = \prod_{i=1}^n (1 + x_i t). \end{displaymath} Then we compute \begin{displaymath} \itexarray{ \sum_{k \geq 0} \sigma_k t^k & = & \prod_{i=1}^n (1 + x_i t) \\ & = & \exp\left(\sum_{i=1}^n \log(1 + x_i t)\right) \\ & = & \exp\left(\sum_{i=1}^n \sum_{k \geq 1} (-1)^{k+1} \frac{x_i^k}{k} t^k \right)\\ & = & \exp\left( \sum_{k \geq 1} (-1)^{k+1} \frac{p_k}{k} t^k\right) } \end{displaymath} and simply match coefficients of $t^n$ in the initial and final series expansions, where we easily compute \begin{displaymath} x_1 x_2 \ldots x_n = \sum_{n = k_1 + 2k_2 + \ldots + n k_n} \prod_{l=1}^n \frac1{(k_l)!} \left(\frac{p_l}{l}\right)^{k_l} (-1)^{k_l+1} \end{displaymath} This completes the proof. \end{proof} \hypertarget{in_terms_of_berezinian_integrals}{}\subsection*{{In terms of Berezinian integrals}}\label{in_terms_of_berezinian_integrals} see [[Pfaffian]] for the moment \hypertarget{related_entries}{}\subsection*{{Related entries}}\label{related_entries} \begin{itemize}% \item [[matrix]], [[linear algebra]], [[exterior algebra]], [[characteristic polynomial]] \item [[quasideterminant]], [[Berezinian]],[[Jacobian]], [[Pfaffian]], [[hafnian]], [[Wronskian]] \item [[resultant]], [[discriminant]], [[hyperdeterminant]] \item [[functional determinant]] \item [[determinant line]], [[determinant line bundle]], [[Pfaffian line bundle]] \item [[density bundle]] \end{itemize} \hypertarget{references}{}\subsection*{{References}}\label{references} See also \begin{itemize}% \item Wikipedia, \emph{\href{https://en.wikipedia.org/wiki/Determinant}{Determinant}} \end{itemize} One derivation of the formula \eqref{DeterminantAsPolynomialInTracesOfPowers} for the determinant as a polynomial in traces of powers is spelled out in appendix B of \begin{itemize}% \item L. A. Kondratyuk, I. Krivoruchenko, \emph{Superconducting quark matter in $SU(2)$ colour group}, Zeitschrift für Physik A Hadrons and Nuclei March 1992, Volume 344, Issue 1, pp 99–115 (\href{https://doi.org/10.1007/BF01291027}{doi:10.1007/BF01291027}) \end{itemize} [[!redirects determinants]] [[!redirects Cramer's rule]] [[!redirects Cramer's rules]] \end{document}