%% 252labsAUT97.tex -- a LaTeX source file Autumn 1997 %% "linalg.sty" is a special style file. Get it with this source! %% Usually location: /u/ma/gustafson/tex/texinput/linalg.sty %% \documentstyle[11pt,linalg,multicol]{report} %\solutionsfalse% DON'T PRINT SOLUTIONS \solutionstrue% UNCOMMENT to print solutions. Don't change previous! \def\SCHEDULE{% \begin{itemize} % % WARNING: Look at a printout before trying to choose the problems. % The numbers below are historic, not the ones of a printout. % Usually, only collect for 9 weeks, I-IX only. % Read below for making changes to this file!!!!!! \small\sf \item[] Take-home I [1,2,3,4]: Week 2 \item[] Take-home II [5,6,7,8,9,10]: Week 3 \item[] Take-home III [11,12,13,14,15,16] Week 4 \item[] Take-home IV [17,18,19,20,21]: Week 5 \item[] Take-home V [22,23,24,25,26,27,28]: Week 6 \item[] Take-home VI [29,30,31,32,33,34,35,36] Week 7 \item[] Take-home VII [37,38,39,40,41,42,43,44]: Week 8 \item[] Take-home VIII [45,46,47,48,49,50,51,52]: Week 9 \item[] Take-home IX [53,54,55,56] Week 10 \end{itemize} } %% %% INSTRUCTIONS: %% To modify a problem, change both the problem and its solution, %% which are located in the order PROBLEM+SOLUTION, with no other %% problems intervening. %% %% To move a problem, move both the problem and solution to the new %% location. All numbering is taken care of by the macros. If you %% want to keep the current numbering scheme, then a new problem %% should be moved into the place previously occupied. %% %% To demote a problem to OPTIONAL, with numbering like 1a, 2c, etc, %% change the "\prob{...}{...}" to "\probx{..}{..}". That's right, %% add "x". %% %% To promote an optional problem to a REQUIRED problem, remove the %% "x", i.e., "\probx{..}{..}" becomes "\prob{...}{...}". %% %% NOTES are added using the "\NOTES{...}" macro. They will not be %% printed unless "\notestrue" appears at the top of the source. %% The definition made at the top of this source allows the NOTES to %% contain verbatim material. %% % % BOOLEAN set in "linalg.sty" for switching solutions in/out % Normally, this is FALSE to print just the problems. % \ifsolutions\long\gdef\NOTES{}\else\long\gdef\NOTES#1{}\fi \pagestyle{plain} \begin{document} \ifsolutions\else\begin{multicols}{2}\fi \begin{center} {\Large\bf Math 252}\\ {\Large\bf Applied Linear Algebra}\\ {\Large\bf \ifsolutions Problem Notes on \fi Take-Home Exams I to IX} \end{center} % Collection of the take-home exam will be made in weekly increments. % WARNING: Look at a printout before trying to choose the problems. % The numbers below are historic, not the ones of a printout. % Usually, only collect for 9 weeks, I-IX only. % \ifsolutions\else \SCHEDULE \fi \begin{exercises} \prob{1.}{($2\times 2$ Linear System)} Find all solutions to $x_1-5x_2=0$, $-x_1+5x_2=0$. \SOL{1.}{ To find all solutions to the system $x_1-5x_2=0$, $-x_1+5x_2=0$, use any method known to you, such as elimination. The strange part of this problem is the expression for the answer. To explain geometrically why the answer must be strange, observe that $-x_1+5x_2=0$ is the same as the first equation $x_1-5x_2=0$, therefore there are not two equations, but only one! The set of planar points satisfying the two equations is exactly the set of points on the straight line $x_1-5x_2=0$ (an infinite number of points). The {\bf standard form of the solution} is obtained by solving for $x_1$ in terms of $x_2$, e.g., $x_1=5x_2$, then write out the vector solution $X$ as follows: $$X=\colvector{x_1 \\ x_2}=\colvector{5x_2 \\ x_2} =x_2\colvector{5 \\ 1}$$ To each value of $x_2$ corresponds a solution of the system of equations, i.e., there are infinitely many solutions. } % end solution \probx{1a.}{($2\times 4$ Linear System)} Find all solutions to $x_1-x_2 + 7x_3 -x_4=0$, $2x_1+3x_2-8x_3+x_4=0$. \SOL{1a.}{ %% A:=genmatrix({x1-x2 + 7*x3 -x4=0,2*x1+3*x2-8*x3+x4=0},{x1,x2,x3,x4}); Subtract two times the first equation from the second to get $5x_2-22x_3+3x_4=0$. Divide the new equation by $5$ to get $x_2-\frac{22}{5}x_3+\frac{3}{5}x_4=0$. Keep this as the replacement for the second equation. Add it to the first equation to get its replacement $x_1+\frac{13}{5}x_3-\frac{2}{5}x_4=0$. The replacement equations are therefore $$ x_1+\frac{13}{5}x_3-\frac{2}{5}x_4=0,$$ $$ x_2-\frac{22}{5}x_3+\frac{3}{5}x_4=0,$$ which correspond exactly to the {\em reduced row echelon form} of the system. The {\em general solution} is $$\colvectorC{x_1\\ x_2\\ x_3\\ x_4} = x_3\colvector{-\frac{13}{5}\\ \frac{22}{5} \\ 1 \\ 0} + x_4\colvector{\frac{2}{5}\\ -\frac{3}{5}\\ 0 \\ 1}. $$ } % end solution \prob{2.}{($4\times 4$ Linear System)} Find a fraction-free Gauss-Elimination form and the Reduced Row-echelon form for the following equations: $x_1 -2x_2 + x_3 + x_4=2$, $3x_1 + 2x_3 - 2x_4= -8$, $4x_2-x_3-x_4 = 1$, $5x_1+3x_3-x_4=0$. \global\edef\FIRSTPROB{\LASTPROB}% \LASTPROB==last problem number \SOL{2.}{ A fraction-free Gauss-Elimination form can be obtained from the reduced row echelon form by multiplying each row by a suitable factor, to clear the fractions. In reality, there are infinitely many fraction-free forms, so there is no way to give an answer that everyone will arrive at. It turns out that the reduced row echelon form is also fraction-free, so it can be reported as the fraction-free answer! The reduced row-echelon form is obtained from the augmented matrix by row operations, using the basic pivot algorithm. The answer for {\em both questions}: $$ \rref=\left(\begin{array}{rrrr|r} 1 & 0 & 0 & 4 & 0 \\ 0 & 1 & 0 & -2 & 0 \\ 0 & 0 & 1 & -7 & 0 \\ 0 & 0 & 0 & 0 & 1 \end{array}\right) $$ %% eq:=x1-2*x2+x3+x4=2,3*x1+2*x3-2*x4=-8,4*x2-x3-x4=1,5*x1+3*x3-x4=0; %% a:=genmatrix([eq],[x1,x2,x3,x4],1); rref(a); } % end solution \prob{3.}{($4\times 3$ Linear System)} Find all solutions to the $4\times 3$ system $x_1+x_2-x_3=0$, $4x_1-x_2+5x_3=0$, $-2x_1+x_2-2x_3=0$, $3x_1+2x_2-6x_3=0$. \SOL{3.}{ The augmented matrix and its reduced row echelon form are: $$ \text{aug}=\left(\begin{array}{rrr|r} 1 & 1 & -1 & 0 \\ 4 & -1 & 5 & 0 \\ -1 & 1 & -2 & 0 \\ 3 & 2 & -6 & 0 \\ \end{array}\right),~~ \rref=\left(\begin{array}{rrr|r} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 0 \\ \end{array}\right) $$ It follows that $X=0$ is the only solution. } % end solution \probx{3a.}{($3\times 4$ Linear System)} Find all solutions to $x_1-x_2+x_3-x_4= -2$, $-2x_1+3x_2-x_3+2x_4=5$, $4x_1-2x_2+2x_3-3x_4=6$. \SOL{3a.}{ The augmented matrix and its reduced row echelon form are: $$ \text{aug}=\left(\begin{array}{rrrr|r} 1 & -1 & 1 & -1 & -2 \\ -2 & 3 & -1 & 2 & 5 \\ 4 & -2 & 2 & -3 & 6 \\ \end{array}\right),~~ \rref=\left(\begin{array}{rrrr|r} \FF & 0 & 0 & -1/2 & 5 \\ 0 & \FF & 0 & 1/4 & 4 \\ 0 & 0 & \FF & -1/4 & -3 \\ \end{array}\right) $$ The {\bf standard form of the solution} is obtained by identifying the {\bf lead variables} and the {\bf arbitrary variables}: \begin{quote} \sf Variables $x_1$, $x_2$ and $x_3$ are the {\bf lead variables}, because they correspond to a leading $1$ in the RREF. See the boxed $1$'s above. Variable $x_4$ is the {\bf arbitrary variable}, because arbitrary variables are the variables left over after removal of the lead variables. \end{quote} The standard form of the solution $X$ is obtained by replacing each lead variable with its corresponding equation, obtained from the RREF. The arbitrary variables are left untouched. Then: $$ X=\colvector{x_1 \\ x_2 \\ x_3 \\ x_4} =\colvectorC{\frac{1}{2}x_4 +5 \\ \frac{-1}{4}x_4 +4\\ \frac{1}{4}x_4-3 \\ x_4} =x_4\colvectorC{\frac{1}{2} \\ \frac{-1}{4} \\ 3 \\ 1} +\colvector{5 \\ 4\\ -3 \\ 0} $$ } % end solution \probx{3b.}{($3\times 4$ Linear System)} Use Gauss-Jordan elimination to find the general solution: $$\begin{array}{rcrcrcrcr} x_1&+&2x_2&+&4x_3&-&x_4&=&3 \\ 3x_1&+&4x_2&+&5x_3&-&x_4&=&7 \\ x_1&+&3x_2&+&4x_3&+&5x_4&=&4 \end{array} $$ \SOL{3b.}{ The augmented matrix and its reduced row echelon form are: $$ aug=\left(\begin{array}{rrrr|r} 1 & 2 & 4 & -1 & 3 \\ 3 & 4 & 5 & -1 & 7 \\ 1 & 3 & 4 & 5 & 4 \\ \end{array}\right),~~ \rref=\left(\begin{array}{rrrr|r} \FF & 0 & 0 & -5 & 1 \\ 0 & \FF & 0 & 6 & 1 \\ 0 & 0 & \FF & -2 & 0 \\ \end{array}\right) $$ \begin{quote} \sf Variables $x_1$, $x_2$ and $x_3$ are the {\bf lead variables}, because they correspond to a leading $1$ in the RREF. See the boxed $1$'s above. Variable $x_4$ is the {\bf arbitrary variable}, because arbitrary variables are the variables left over after removal of the lead variables. \end{quote} The {\bf standard form of the solution} is obtained by replacing the lead variables $x_1$, $x_2$, $x_3$ by their equations ($x_1=5x_4+1$, $x_2=-6x_4+1$, $x_3=2x_4$), but the arbitrary variable $x_4$ is untouched. Then: $$ X=\colvector{x_1 \\ x_2 \\ x_3 \\ x_4} =\colvectorC{5x_4 +1 \\ -6x_4 + 1\\ 2x_4 \\ x_4} =x_4\colvector{5 \\ -6 \\ 2 \\ 1} +\colvector{1 \\ 1\\ 0 \\ 0} $$ {\bf Important}: This {\em method} for writing out $X$ applies {\em only} in case the equations are in reduced echelon form. A matrix $C$ is in {\bf reduced row echelon form} provided each nonzero row starts with a {\bf leading} $1$, and above and below that leading $1$ appear only zeros. } % end solution \prob{4.}{(Inverse Matrix)} Compute the inverse of the matrix whose rows are $[1,0,0]$, $[-2,0,1]$, $[4,6,1]$. \SOL{4.}{ The inverse of the matrix $A$ whose rows are $[1,0,0]$, $[-2,0,1]$, $[4,6,1]$ is obtained by forming the augmented matrix $B=\aug{A}{I}$ and then the RREF of $B$. Alternatively, it can be computed from the adjugate or adjoint formula, using cofactors of $A$. The answer: \def\AA{1 & 0 & 0} \def\BB{-1 & -1/6 & 1/6} \def\CC{2 & 1 & 0} $$ A^{-1} = \threebythree{\AA}{\BB}{\CC} $$ } % end solution \probx{4a.}{(Invertible Matrices)} Explain why the matrix whose rows are $[1,0,0]$, $[-2,0,0]$, $[4,6,1]$ is not invertible. \SOL{4a.}{ The matrix whose rows are $[1,0,0]$, $[-2,0,0]$, $[4,6,1]$ is not invertible, because the rows are dependent (the first and second rows are dependent). Alternatively, the rank is less than the row dimension. Finally, a third way to analyze it comes from the theory of determinants: a matrix is invertible if and only if its determinant is nonzero. } % end solution \prob{5.}{(Rank, Nullity and Nullspace)} Find the rank, nullity and a basis for the null space, given $A$ has rows $[1,-1,2,3]$, $[0,1,4,3]$, $[1,0,6,6]$. \SOL{5.}{ Given $A$ has rows $[1,-1,2,3]$, $[0,1,4,3]$, $[1,0,6,6]$, then the reduced row echelon form of the augmented matrix $\aug{A}{0}$ is %% a:=matrix([[1,-1,2,3],[0,1,4,3],[1,0,6,6]]); \def\AA{\FF & 0 & 6 & 6 & 0} \def\BB{ 0 & \FF & 4 & 3 & 0} \def\CC{0 & 0 & 0 & 0 & 0} $$ \rref(A)=\left(\begin{array}{rrrr|r} \AA \\ \BB \\ \CC \end{array}\right) $$ The rank and nullity are both $2$. The standard form of the solution $X$ to the equation $AX=0$ is obtained using lead variables $x_1$, $x_2$ and arbitrary variables $x_3$, $x_4$ as follows: $$ X=\colvector{x_1 \\ x_2 \\ x_3 \\ x_4} =\colvectorC{-6x_3 -6x_4 \\ -4x_3-3x_4 \\ x_3 \\ x_4} =x_3\colvector{-6 \\ -4 \\ 1 \\ 0} +x_4\colvector{-6 \\ -3 \\ 0 \\ 1} $$ A basis for the null space can be read off from this answer: $$ \colvector{-6 \\ -4 \\ 1 \\ 0},~~~ \colvector{-6 \\ -3 \\ 0 \\ 1}. $$ } % end solution \probx{5a.}{(Rank, Nullity and Nullspace)} Find the rank, nullity and a basis for the solution space of $AX=0$, where $$ A=\left(\begin{array}{rrrrr} 1 & 2 & 1 & -1 & 3 \\ 1 & 2 & 2 & 1 & 2 \\ 2 & 4 & 2 & -1 & 7 \end{array}\right),~~ X=\colvector{x \\ y \\ z \\ u \\ v}. $$ \SOL{5a.}{ Rather than form $\aug{A}{0}$, we work with $A$ itself and find the RREF and also the standard form of the solution: %% a:=matrix([[1,2,1,-1,3],[1,2,2,1,2],[2,4,2,-1,7]]); $$ A=\left(\begin{array}{rrrrr} 1 & 2 & 1 & -1 & 3 \\ 1 & 2 & 2 & 1 & 2 \\ 2 & 4 & 2 & -1 & 7 \end{array}\right),~~ \rref(A)= \left(\begin {array}{ccccc} \FF & 2& 0 & 0 & 7\\\noalign{\medskip} 0& 0&\FF & 0 &-3\\\noalign{\medskip} 0& 0& 0 &\FF & 1\end {array}\right) . $$ The rank is 3, the nullity 2. The solution is read off from the RREF by observing that the {\bf lead variables} are $x$, $z$, $u$ and the {\bf arbitrary variables} are $y$, $v$. Then $$ X =\colvector{x \\ y \\ z \\ u \\ v} =\colvectorC{-2y-7v \\ y \\ 3v \\ -v \\ v} =y\colvector{-2 \\ 1 \\ 0 \\ 0 \\ 0} +v\colvector{-7 \\ 0 \\ 3 \\ -1 \\ 1} . $$ A basis for the solution space of $AX=0$ is given by $$ \colvector{-2 \\ 1 \\ 0 \\ 0 \\ 0},~~~ \colvector{-7 \\ 0 \\ 3 \\ -1 \\ 1} . $$ } % end solution \probx{5b.}{(Rank, Nullity and Nullspace)} Find the rank, nullity and a basis for the solution space of $AX=0$, given $$A=\left(\begin{array}{rrrr} 1 &-1 &2 &3 \\ -2 &2 &-4 &-6 \\ 2 &-2 &4 &6 \\ 3 &-3 &6 &9 \end{array}\right),~~~~ X=\left(\begin{array}{r} x \\ y \\ z \\ w \end{array}\right). $$ \SOL{5b.}{ Rather than form $\aug{A}{0}$, we work with $A$ itself and find the RREF and also the standard form of the solution: %% a:=matrix([[1,-1,2,3],[-2,2,-4,-6],[2,-2,4,6],[3,-3,6,9]]); $$ A= \left(\begin{array}{rrrr} 1 &-1 & 2 & 3 \\ -2 & 2 &-4 &-6 \\ 2 &-2 & 4 & 6 \\ 3 &-3 & 6 & 9 \end{array}\right),~~ \rref(A)= \left(\begin {array}{cccc} \FF &-1 & 2 & 3 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \end{array}\right). $$ The solution is read off from the RREF by observing that the {\bf lead variable} is $x$ and the {\bf arbitrary variables} are $y$, $z$, $w$. Then $$ X =\colvector{x \\ y \\ z \\ w} =\colvectorC{y-2z-3w \\ y \\ z \\ w} =y\colvector{1 \\ 1 \\ 0 \\ 0} +z\colvector{-2 \\ 0 \\ 1 \\ 0} +w\colvector{-3 \\ 0 \\ 0 \\ 1} . $$ The rank is 1, the nullity is 3 and a basis for the null space is $$ \colvector{1 \\ 1 \\ 0 \\ 0},~~ \colvector{-2 \\ 0 \\ 1 \\ 0},~~ \colvector{-3 \\ 0 \\ 0 \\ 1} . $$ } % end solution \end{exercises} \NOTES{ {\large\bf Maple notes on problems \FIRSTPROB--\LASTPROB}. Linear equations can be entered into {\tt maple} without converting to matrix form. While this form is not convenient for solving the equations, there is a conversion routine called {\tt genmatrix} which creates either the coefficient matrix or the augmented matrix. An example: \footnotesize\rm \begin{verbatim} # Maple V.3 [Augmented matrix errors in V.1, V.2] eq:=[2*x+3*y-z=0,3*x-4*y+5*z=8,y-z=9]; ans:=[x,y,z]; a:=genmatrix(eq,ans); # [ 2 3 -1 ] # a:= [ 3 -4 5 ] # [ 0 1 -1 ] # aug:=genmatrix(eq,ans,1); # This is what your [ 2 3 -1 0 ] # book calls the aug:= [ 3 -4 5 8 ] # augmented matrix. [ 0 1 -1 9 ] \end{verbatim} \normalsize\rm The fraction--free Gauss-Jordan forms are not unique. The preferred form is given by the {\tt maple} function {\tt ffgausselim}. This form combined with the {\tt maple} command {\tt backsub} can be used to find the solution to a linear system. The Reduced Row Echelon form (RREF) is unique. In {\tt maple}, the command is called {\tt rref} or {\tt gaussjord}, one being a synonym for the other. From this form the general solution of a linear system can determined by back-substitution using {\tt backsub}. The RREF method is preferred for most applications done by hand computation. This form identifies the dependent variables as those corresponding to the leading 1's. The other variables will appear as arbitrary constants in the general solution. For example, if the reduced form of an augmented matrix is (leading 1's boxed) $$\left(\begin{array}{rrrr|r} \FF &-1 &0 &0 & 4\\ 0 &0 &\FF &0 & -5\\ 0 &0 &0 &\FF & 2\\ 0 &0 &0 &0 &0 \end{array}\right),$$ then $x_1$, $x_3$ and $x_4$ are dependent variables while $x_2$ appears in the answer as an arbitrary constant: $$ \left(\begin{array}{r} x_1 \\ x_2 \\ x_3 \\ x_4 \end{array}\right) =\left(\begin{array}{r} 4\\ 0 \\ -5 \\ 2 \end{array}\right) +x_2\left(\begin{array}{r} 1 \\ 1 \\ 0 \\ 0 \end{array}\right). $$ The actual form of the answer in {\tt maple} will contain variable names starting with the letter {\tt t}. The factorization of the answer into basis elements for the kernel is not automatic. Here is an example of how to pass from the output of {\tt gausselim} to the general solution, using {\tt backsub}: \footnotesize\rm \begin{verbatim} with(linalg): a:=matrix([[1,3,-2,0,2,0],[2,6,-5,-2,4,-3],[0,0,5,10,0,15],[2,6,0,8,4,18]]); b:=matrix([[0],[-1],[5],[6]]); c:=augment(a,b); M:=gausselim(c); backsub(M); # # [ 1 3 -2 0 2 0 ] [ 0 ] # [ 2 6 -5 -2 4 -3 ] [ -1 ] #a:= [ ] b := [ ] # [ 0 0 5 10 0 15 ] [ 5 ] # [ 2 6 0 8 4 18 ] [ 6 ] # # [ 1 3 -2 0 2 0 0 ] [ 1 3 -2 0 2 0 0 ] # [ 2 6 -5 -2 4 -3 -1 ] [ 0 0 -1 -2 0 -3 -1 ] #c := [ ] M := [ ] # [ 0 0 5 10 0 15 5 ] [ 0 0 0 0 0 6 2 ] # [ 2 6 0 8 4 18 6 ] [ 0 0 0 0 0 0 0 ] # # [ - 3 t2 - 4 t4 - 2 t5, t2, - 2 t4, t4, t5, 1/3 ] \end{verbatim} \normalsize\rm The format of the general solution obtained above is not what is normally written in a hand computation. Below is the general solution in the usual hand--written format: \footnotesize\rm \begin{verbatim} # [ x1 ] [ 0 ] [ -3 ] [ -4 ] [ -2 ] # [ x2 ] [ 0 ] [ 1 ] [ 0 ] [ 0 ] # [ x3 ] [ 0 ] [ 0 ] [ -2 ] [ 0 ] # [ ] = [ ] + t2 [ ] + t4 [ ] + t5 [ ] # [ x4 ] [ 0 ] [ 0 ] [ 1 ] [ 0 ] # [ x5 ] [ 0 ] [ 0 ] [ 0 ] [ 1 ] # [ x6 ] [ 1/3 ] [ 0 ] [ 0 ] [ 0 ] \end{verbatim} \normalsize\rm A {\tt maple} procedure can be written to display the above general solution. The source file: \begin{center}{\tt /u/cl/maple/gensol}.\end{center} \footnotesize\rm \begin{verbatim} # file "gensol" with(linalg): # Uses linalg package gensolution:=proc(M) # M:=rref(augment(A,b)): local x,y,n,w,v,s,u,i: x:=backsub(M): # Solve Ax=b n:=coldim(matrix([eval(x)])): # Get number of vars y:=[seq(x[i],i=1..n)]: # Make list of ans w:=[seq(t.i,i=1..n)]: # Make list of vars v:=[seq(x.i,i=1..n)]: # Make list of vars s:=matrix(n,1,subs(seq(w[i]=0,i=1..n),eval(y)) ): # Particular solution for i from 1 to n do u:=matrix(n,1,map(diff,eval(y),w[i])): # basis vector if norm(u) <> 0 then s:=eval(s)+w[i]*eval(u): fi: # for variable w[i] od: s:=matrix(n,1,v)=eval(s): # Write out general solution RETURN(s): # as an equation. end: \end{verbatim} \normalsize\rm As an example of how to use this procedure consider the following: \footnotesize\rm \begin{verbatim} read gensol: a:=matrix([[4,-1,2,6],[-1,5,-1,-3],[3,4,1,3] ]); b:=matrix([[b1],[b2],[b3]]); c:=augment(a,b); M:=gausselim(c); M[3,5]:=0: # The system is consistent if and only if M[3,5]:=0 gensolution(M); # # [ 4 -1 2 6 ] [ b1 ] [ 4 -1 2 6 b1 ] # a := [ -1 5 -1 -3 ] b := [ b2 ] c := [ -1 5 -1 -3 b2 ] # [ 3 4 1 3 ] [ b3 ] [ 3 4 1 3 b3 ] # # # [ 4 -1 2 6 b1 ] This system is inconsistent # M := [ 0 19/4 -1/2 -3/2 b2 + 1/4 b1 ] unless the last row is all # [ 0 0 0 0 b3 - b1 - b2 ] zeros: b3 - b1 - b2=0. # # [ x1 ] [ 1/19 b2 + 5/19 b1 ] [ -9/19 ] [ -27/19 ] # [ x2 ] [ 4/19 b2 + 1/19 b1 ] [ 2/19 ] [ 6/19 ] # [ ] = [ ] + t3 [ ] + t4 [ ] # [ x3 ] [ 0 ] [ 1 ] [ 0 ] # [ x4 ] [ 0 ] [ 0 ] [ 1 ] \end{verbatim} \normalsize\rm \medskip }% end notes \begin{exercises} \prob{6.}{(Row Space and Column Space)} Find bases for the row space, the column space and the null space of the matrix $$ A=\left(\begin{array}{rrrr} 4 & 1 & -3 & 5 \\ 2 & 0 & 0 & -2 \\ 6 & 2 & -6 & 12 \end{array}\right). $$ \global\edef\FIRSTPROB{\LASTPROB} \SOL{6.}{ Row reduction to RREF will give a basis for the row space of $A$. Row reduction for the transpose of $A$ will give a basis for the column space of $A$. %% a:=matrix([[4,1,-3,5],[2,0,0,-2],[6,2,-6,12]]); %% b:=matrix([[ 4 ,2 ,6] , [1 , 0 , 2 ], [ -3 , 0 , -6], [ 5 ,-2 ,12]]); $$ A=\left(\begin{array}{rrrr} 4 & 1 & -3 & 5 \\ 2 & 0 & 0 & -2 \\ 6 & 2 & -6 & 12 \end{array}\right),~~ \rref(A)=\left(\begin{array}{rrrr} 1 & 0 & 0 & -1 \\ 0 & 1 & -3 & 9 \\ 0 & 0 & 0 & 0 \\ \end{array}\right). $$ $$ A^{t}=\left(\begin{array}{rrr} 4 & 2 & 6 \\ 1 & 0 & 2 \\ -3 & 0 & -6 \\ 5 & -2 & 12 \end{array}\right),~~ \rref(A^t)=\left(\begin{array}{rrr} 1 & 0 & 2 \\ 0 & 1 & -1 \\ 0 & 0 & 0 \\ 0 & 0 & 0 \\ \end{array}\right). $$ The row space is generated by the basis $$ \def\AA{1,0,0,-1} \def\BB{0,1,-3,9} \rowvectorC{\AA},~~~ \rowvectorC{\BB}. $$ The column space is generated by the basis $$ \def\AA{1 \\ 0 \\ 2} \def\BB{0 \\ 1 \\ -1} \colvector{\AA},~~~ \colvector{\BB}. $$ The null space is generated from $\rref(\aug{A}{0})$ by solving the equation $AX=0$ for the standard form of the solution $X$, as follows: $$ \rref(\aug{A}{0})=\left(\begin{array}{rrrr|r} \FF & 0 & 0 & -1 & 0 \\ 0 & \FF & -3 & 9 & 0 \\ 0 & 0 & 0 & 0 & 0 \\ \end{array}\right), $$ $$ X=\colvector{x_1 \\ x_2 \\ x_3 \\ x_4} =\colvectorC{x_4 \\ 3x_3-9x_4\\ x_3 \\ x_4} =x_3\colvector{0 \\ 3 \\ 1 \\ 0} +x_4\colvector{1 \\ -9 \\ 0 \\ 1}. $$ A basis for the nullspace is therefore $$ \colvector{0 \\ 3 \\ 1 \\ 0},~~~ \colvector{1 \\ -9 \\ 0 \\ 1}. $$ } % end solution \probx{8b.}{(Transpose of a Product of Symmetric Matrices)} Prove that $(AB)^t = BA$ for symmetric $n \times n$ matrices $A$ and $B$. \SOL{8b.}{ To prove that $(AB)^t = BA$ for symmetric $n \times n$ matrices $A$ and $B$, begin with the theorem $(AB)^t=B^tA^t$ and use the hypothesis $A^t=A$, $B^t=B$. } % end solution \prob{9.}{(False Determinant Rules)} Give an example in dimension 2 where $\det(A+B) \ne \det(A)+\det(B)$. \SOL{9.}{ There are many examples in dimension 2 where $\det(A+B) \ne \det(A)+\det(B)$. The easiest to find is $A=B=I$. There are many others. } % end solution \prob{10.}{(Determinant of a Symmetric Matrix)} Assume $A^{-1} = A^t$. Prove that $\det(A) = \pm 1$. \SOL{10.}{ Assume $A^{-1} = A^t$. We are to prove that $\det(A) = \pm 1$. The determinant rules $\det(AB)=\det(A)\det(B)$ and $\det(A)=\det(A^t)$ can be combined to show $\det(A)^2=1$, hence the claimed result. } % end solution \probx{10a.}{(Permutation Matrices} Let $P$ be a $3\times 3$ matrix obtained from the identity matrix by interchanging columns. Argue from the cofactor expansion rule that $\det(P)=\pm 1$. \SOL{10a.}{ Let $P$ be a $3\times 3$ matrix obtained from the identity matrix by interchanging columns. Then $P$ must be one of the six matrices below: $$ \def\AA{1 & 0 & 0} \def\BB{0 & 1 & 0} \def\CC{0 & 0 & 1} \threebythree{\AA}{\BB}{\CC}, ~~ \threebythree{\AA}{\CC}{\BB}, ~~ \threebythree{\BB}{\AA}{\CC}, $$ $$ \def\AA{1 & 0 & 0} \def\BB{0 & 1 & 0} \def\CC{0 & 0 & 1} \threebythree{\BB}{\CC}{\AA}, ~~ \threebythree{\CC}{\AA}{\BB}, ~~ \threebythree{\CC}{\BB}{\AA}. $$ The cofactor expansion rule can be used to evaluate each of the six determinants and verify in each case that $\det(P)=\pm 1$. } % end solution \prob{11.}{(Characteristic Equation)} Prove that $\dd\det(A-\lambda I)$ equals $$(-\lambda)^3 + \mbox{trace}(A)(-\lambda)^2 + (\sum_{i=1}^3 M_{ii})(-\lambda) + \det(A)$$ where $M_{ij}$ is the minor determinant of element $ij$ of matrix $A$ and trace$(A)$ is the sum of the diagonal elements of $A$. \SOL{11.}{ The expansion of $$ \det(A-\lambda I) = \left| \begin{array}{ccc} a_{11}-\lambda & a_{12} & a_{13} \\ a_{21} & a_{22}-\lambda & a_{23} \\ a_{31} & a_{32} & a_{33}-\lambda \end{array} \right| $$ using the cofactor expansion rule is long and tedious, but direct. } % end solution \probx{11a.}{(Vandermonde Determinant)} Let $$A=\left(\begin{array}{rrrr} 1 &1 &1 \\ x &y &z \\ x^2 &y^2 &z^2 \end{array}\right). $$ Prove that $\det(A) = (y-x)(z-x)(z-y)$, by viewing the determinant as a quadratic polynomial in $x$ having roots $y$ and $z$. \SOL{11a.}{ Let $$f(x)=\det\left(\begin{array}{rrrr} 1 &1 &1 \\ x &y &z \\ x^2 &y^2 &z^2 \end{array}\right). $$ Then $$f(y)=\det\left(\begin{array}{rrrr} 1 &1 &1 \\ y &y &z \\ y^2 &y^2 &z^2 \end{array}\right) =0, ~~ f(z)=\det\left(\begin{array}{rrrr} 1 &1 &1 \\ z &y &z \\ z^2 &y^2 &z^2 \end{array}\right) =0, $$ because a determinant vanishes if two columns are the same. By the cofactor expansion rule, $f(x)$ is a quadratic polynomial in $x$, and since two roots are known, $f(x)=c(x-y)(x-z)$, for some $c$, by the factor theorem of college algebra. Its easy to check that $c$ is the coefficient of $x^2$ in the cofactor expansion, and this can be evaluated directly as $(z-y)$. Left out of this solution are the required page references to relevant textbooks (for the theorems used) and display of the required calculations . } % end solution \probx{12.}{(Determinants)} Prove that the determinant of any product of upper and lower triangular matrices is the product of the diagonal entries of all the matrices involved. \SOL{12.}{ The result depends upon the formula $\det(AB)=\det(A)\det(B)$, valid for $n\times n$ matrices $A$ and $B$. The formula implies that the determinant of a product of matrices is the product of their individual determinants. To finish the proof, it suffices to show that the determinant of a triangular matrix is the product of its diagonal elements. This last result is done by appeal to the cofactor expansion rule. } % end solution \prob{11b.}{(Determinants)} Evaluate $\det(A)$: $$A=\left(\begin{array}{rrrr} a &b &0 &0 \\ c &d &0 &0 \\ 0 &0 &a &-b \\ 0 &0 &c &d \end{array}\right) $$ \SOL{11b.}{ The answer: $$ \det\left(\begin{array}{rrrr} a &b &0 &0 \\ c &d &0 &0 \\ 0 &0 &a &-b \\ 0 &0 &c &d \end{array}\right) = (ad)^2-(bc)^2 $$ because $$ \det(A)= \det\left(\begin{array}{rr} a &b \\ c &d \end{array}\right) \cdot \det\left(\begin{array}{rr} a &-b \\ c &d \end{array}\right) =(ad-bc)(ad+bc) $$ } % end solution \prob{13.}{(Cramer's Rule)} Solve by Cramer's rule: $x_1+x_2+x_3=6$, $2x_1-x_2=0$, $2x_1+x_3=1$. \probx{13a.}{(Cramer's Rule)} Solve by Cramer`s Rule: $$\left(\begin{array}{rrrr} 1 &1 &1 &1 \\ 2 &0 &-1 &-1 \\ 0 &0 &3 &6 \\ 0 &0 &0 &-1 \end{array}\right) \left(\begin{array}{r} x \\ y \\ z \\ w \end{array}\right) = \left(\begin{array}{r} 6 \\ 4 \\ 3 \\ 5 \end{array}\right). $$ \probx{13b.}{(Cramer's Rule)} Use Cramer's rule to calculate the unknown $x_3$ if $x_1, x_2$ and $x_3$ satisfy the following system of linear equations: $$\begin{array}{rcrcrcr} 2x_1&-&x_2&+&2x_3&=&2 \\ x_1&+&10x_2&-&3x_3&=&5 \\ -x_1&+&x_2&+&5x_3&=&-7 \end{array} $$ \SOL{13b.}{ By Cramer's rule, the unknown $x_3$ is given by $\det(A_3)/\det(A)$, where %% a:=matrix([[2,-1,2],[1,10,-3],[-1,1,5]]); b:=vector([2,5,-7]); $$ A= \left(\begin {array}{ccc} 2&-1&2\\\noalign{\medskip} 1&10&-3\\\noalign{\medskip} -1&1&5\end {array}\right),~~ A_3= \left(\begin {array}{ccc} 2&-1&2\\\noalign{\medskip}1&10&5 \\\noalign{\medskip}-1&1&-7\end {array}\right). $$ Therefore, $x_3=-1$. } % end solution \prob{8.}{(Inverse by Two Methods)} Show that the matrix $$ A=\left(\begin{array}{rrr} -2 & 1 & 1 \\ 0 & 1 & 1 \\ -3 & 0 & 6 \end{array}\right) $$ is invertible and find the inverse matrix $A^{-1}$ by two methods. \SOL{8.}{ The answer is $$ A^{-1}=\left(\begin{array}{rrr} -2 & 1 & 1 \\ 0 & 1 & 1 \\ -3 & 0 & 6 \end{array}\right)^{-1} = \left(\begin {array}{ccc} -1/2&1/2&0 \\\noalign{\medskip}1/4&3/4&-1/6 \\\noalign{\medskip}-1/4&1/4&1/6 \end {array}\right). $$ The first method is the RREF method, in which $\dd \rref(\aug{A}{I})$ is computed, giving $\dd\aug{I}{A^{-1}}$. The second method is the adjoint method, which amounts to computing one $3\times 3$ determinant and six $2\times 2$ determinants. Kindly show all details, by hand. The adjoint matrix (transpose of the matrix of cofactors) and the $3\times 3$ determinant are given by $$ \adjoint(A)= \left(\begin {array}{ccc} 6&-6&0 \\\noalign{\medskip}-3&-9&2 \\\noalign{\medskip}3&-3&-2 \end {array}\right), ~~ \det(A)=-12. $$ } % end solution \prob{8a.}{(Inverse of a Matrix)} %% a:=matrix([[1,0,2,3],[-1,1,0,4],[2,1,-1,3],[-1,0,0,0]]); inverse(a); Calculate $A^{-1}$ if it exists: $$A=\left(\begin{array}{rrrr} 1 &0 &2 &3 \\ -1 &1 &0 &4 \\ 2 &1 &-1 &3 \\ -1 &0 &0 &0 \end{array}\right) $$ \SOL{8a.}{ %% a:=matrix([[1,0,2,3],[-1,1,0,4],[2,1,-1,3],[-1,0,0,0]]); inverse(a); The answer, obtained from $\rref(\aug{A}{I})$: $$A=\left(\begin{array}{rrrr} 1 &0 &2 &3 \\ -1 &1 &0 &4 \\ 2 &1 &-1 &3 \\ -1 &0 &0 &0 \end{array}\right),~~ A^{-1}= \left(\begin {array}{cccc} 0&0&0&-1 \\\noalign{\medskip}-4&9&-8&-29 \\\noalign{\medskip}-1&3&-3&-10 \\\noalign{\medskip}1&-2&2&7 \end {array}\right). $$ } % end solution \probx{7.}{(Inverse by the Adjoint Method)} Compute by the adjoint method the inverse of the $4 \times 4$ Hilbert matrix $$ A= \left(\begin {array}{cccc} 1&1/2&1/3& 1/4\\\noalign{\medskip}1/2&1/3&1/4&1/5 \\\noalign{\medskip}1/3&1/4&1/5&1/6 \\\noalign{\medskip}1/4&1/5&1/6&1/7 \end {array}\right) $$ %% $H=(h_{ij})$, $h_{ij}=1/(i+j-1)$ Show all steps in the evolution of the solution, in particular, show explicitly the computation of $\det(A)$ and the $16$ cofactors, and exhibit the final transposition of the matrix of cofactors. \SOL{7.}{ The {\bf adjoint method for the inverse} refers to the formula $$\dd A^{-1}=\frac{1}{\det(A)}\adjoint(A),$$ where $\adjoint(A)$ is the transpose of the matrix of cofactors of $A$. This problem is tedious without a computer algebra system, therefore, it is in your best interest to use {\tt maple} for many of the steps. The determinant of $A$ is $\dd \det(A)={\frac {1}{6048000}}$. Please show the computation steps for this determinant, using the cofactor expansion rule. The adjoint matrix is given by $$ \adjoint(A)= \left(\begin {array}{cccc} {\frac {1} {378000}}&-{\frac {1}{50400}}&{\frac { 1}{25200}}&-{\frac {1}{43200}} \\\noalign{\medskip}-{\frac {1}{50400} }&{\frac {1}{5040}}&-{\frac {1}{2240}} &{\frac {1}{3600}}\\\noalign{\medskip} {\frac {1}{25200}}&-{\frac {1}{2240}}& {\frac {3}{2800}}&-{\frac {1}{1440}} \\\noalign{\medskip}-{\frac {1}{43200} }&{\frac {1}{3600}}&-{\frac {1}{1440}} &{\frac {1}{2160}}\end {array}\right). $$ Show all 16 steps in computing this matrix. Do the first one by hand and the others by machine. For example, the matrix formed from $A$ by deleting row 1 and column 1 produces the first cofactor $C_{11}$ as follows: $$ M_{11}= \left(\begin {array}{ccc} 1/3&1/4&1/5 \\\noalign{\medskip}1/4&1/5&1/6 \\\noalign{\medskip}1/5&1/6&1/7 \end {array}\right), ~~ \det(M_{11})=\frac{1}{378000},~~ C_{11}=(-1)^{1+1}\det(M_{11})=\frac{1}{378000}. $$ The matrix of cofactors is $[C_{ij}]$, but $\adjoint(A)$ is not this matrix, but instead the transpose! Leaving out a step produces the wrong matrix, but for this example, the inverse is symmetric, and the classic mistake (forgetting the transpose) does not surface. The answer for the inverse is $$ A^{-1}=\frac{1}{\det(A)}\adjoint(A)= \left(\begin {array}{cccc} 16&-120& 240&-140\\\noalign{\medskip}-120&1200& -2700&1680\\\noalign{\medskip}240&- 2700&6480&-4200\\\noalign{\medskip}- 140&1680&-4200&2800\end {array}\right) . $$ Some {\tt maple} hints. The command {\tt A:=hilbert(4);} enters the matrix. And {\tt minor(A,1,1);} produces the matrix formed by deleting row 1 and column 1. Evaluate determinants with {\tt det(B);} where $B$ is a square matrix. The maple command {\tt transpose(B);} is used to form the transpose of a matrix $B$. The command {\tt adjoint(A);} computes the transpose of the matrix of cofactors of $A$. The purpose of the exercise is to learn how to deal with large problems. Do enough hand computation to feel comfortable. Be driven to {\tt maple} by tedium, after you have already obtained many of the correct answers by hand. } % end solution \prob{14.}{(Independence of Vectors)} Use determinants and textbook theorems to determine whether the following vectors are linearly dependent. $$ a_1=\left(\begin{array}{r} -5 \\ 1 \\ 0 \end{array}\right), ~~ a_2=\left(\begin{array}{r} 1 \\ -1 \\ -1\end{array}\right), ~~ a_3=\left(\begin{array}{r} 6 \\ 1 \\ 0 \end{array}\right). $$ \SOL{14.}{ According to Cramer's Rule, the vectors are linearly independent if and only if the matrix $A$ whose columns are $a_1$, $a_2$, $a_3$ has rank $3$. This can be tested effectively with the RREF. %% a:=matrix([[-5,1,6],[1,-1,1],[0,-1,0]]); rref(a); $$ A= \left(\begin {array}{ccc} -5&1&6\\\noalign{\medskip}1&-1&1 \\\noalign{\medskip}0&-1&0\end {array}\right) ,~~ \rref(A)= \left(\begin {array}{ccc} 1&0&0\\\noalign{\medskip}0&1&0 \\\noalign{\medskip}0&0&1\end {array}\right) . $$ The rank is 3, therefore the vectors are linearly independent. } % end solution \probx{14a.}{(Divisibility)} Let the $3\times 3$ matrix $A$ be formed by taking as its rows the $9$ digits of three $3$-digit integers, e.g., for $228$, $266$, $323$ the matrix is $$A=\left(\begin{array}{ccc} 2 & 2 & 8\\ 2 & 6 & 6 \\ 3 & 2 & 3 \end{array}\right).$$ Prove using Cramer's rule: if an integer $m$ divides each number, then $m$ divides $\det(A)$ (e.g., $19$ divides $228$, $266$ and $323$ implies $19$ divides $\det(A)$ in the illustration). \SOL{14a.}{ As an illustration, let $$A= \left(\begin{array}{ccc} 2 & 2 & 8\\ 2 & 6 & 6 \\ 3 & 2 & 3 \end{array}\right),~~ b=\colvector{228 \\ 266 \\ 323},~~ X=\colvectorC{100 \\ 10 \\ 1}. $$ Then the system of equations $AX=b$ is satisfied. According to Cramer's rule, each of the integer entries of $X$ is a quotient $\det(C)/\det(A)$ for some matrix $C$. In particular, the third component of $X$ equal to one implies that $\det(A)=\det(C)$, where $C$ is the matrix $A$ with the last column replaced by $b$. Write the last column of $C$ in factored form (19 factors out of each entry) and see what it says. } % end solution \end{exercises} \NOTES{ \medskip {\large\bf Maple notes on problem \FIRSTPROB-\LASTPROB}. The method for computing an inverse matrix suggested in most linear algebra texts involves augmentation of the original matrix $A$ with the identity matrix $I$ of the same size to create a new matrix $C$. This matrix $C$ is subjected to $RREF$ to determine the inverse $A^{-1}$. A second method is possible, which is based upon Cramer's Rule. The formula $$A^{-1}=(1/\det(A))\mbox{adjoint}(A)$$ is reproduced by these hand calculations: \begin{itemize} \item[(a)] Compute the matrix $M$ of {\bf minors} of $A$, i.e., $M_{ij}$ is the minor determinant of element $a_{ij}$ in matrix $A$. \item[(b)] Introduce signs into the elements of $M$ by the {\bf Checkerboard Rule}: element $M_{ij}$ gets a negative sign if $i+j$ is odd. The new matrix is called $C$; it is the matrix of {\bf cofactors} of $A$. \item[(c)] Transpose the matrix $C$ to obtain the {\bf adjoint} matrix of $A$, called $D$. {\tt Maple} can produce this matrix directly from $A$ by the command {\tt D:=adjoint(A)}. \item[(d)] Compute the {\bf determinant} $\det(A)$ and divide it into each element of the adjoint $D$; this is the inverse $A^{-1}$. \end{itemize} {\tt Maple} can compute the determinant with command {\tt det(A)} and the inverse in one step with the command {\tt inverse(A)}. The classical hand computation of Cramer's Rule should be learned by everyone because it appears often in scientific literature. To this end we consider the following {\tt maple} example: \footnotesize\rm \begin{verbatim} with(linalg): a:=matrix([[1,0,-1],[-1,1,0],[0,0,-1]]); b:=vector([3,-5,2]); #The column b replaces columns 1, 2, 3 of matrix a: a1:=augment(b,col(a,2),col(a,3)); # Replace col 1 by b a2:=augment(col(a,1),b,col(a,3)); # Replace col 2 by b a3:=augment(col(a,1),col(a,2),b); # Replace col 3 by b #The answers x, y, z are quotients of determinants: x:=det(a1)/det(a); y:=det(a2)/det(a); z:=det(a3)/det(a); [ 1 0 -1 ] a := [ -1 1 0 ] b := [ 3, -5, 2 ] [ 0 0 -1 ] [ 3 0 -1 ] [ 1 3 -1 ] [ 1 0 3 ] a1 := [ -5 1 0 ] a2 := [ -1 -5 0 ] a3 := [ -1 1 -5 ] [ 2 0 -1 ] [ 0 2 -1 ] [ 0 0 2 ] # Solve aX=b for X=vector([x,y,z]): x := 1 y := -4 z := -2 \end{verbatim} \normalsize\rm The {\bf rank} of a matrix $A$ is the number of nonzero rows in the Reduced Row Echelon form of $A$. The {\bf nullity} of $A$ is the number of variables minus the rank, which in the case of a square matrix, equals the number of zero rows in the RREF. It is a common and fatal error to compute the nullity as the number of zero rows in the RREF (it only applies when the matrix happens to be square)! There is a maple command {\tt rank} which applies to compute the rank of a matrix $A$: {\tt rank(A)}. There is presently no command for the nullity, because it depends upon the number of variables, and only you can know if the given $A$ is augmented or not. Be warned that application of the {\tt rank} command to an augmented matrix can fail to give the correct answer: the augmented column may produce an inconsistent RREF and hence an incorrect count of nonzero rows! There are various {\tt maple} commands available for computing the {\bf rank} and {\bf kernel} (or {\tt nullspace}) of a matrix $A$. They are: {\tt rref}, {\tt gausselim}, {\tt rank}, {\tt kernel}. The first two produce forms from which the rank can be deduced. The command {\tt rank(A)} gives this number directly. A basis for the solutions of $Ax=0$ can be found by the command {\tt kernel(A)}. A common error with the latter command is to apply it to an augmented matrix, which casts the problem $Ax=0$ into the wrong space dimension. {\tt Maple} can be used to compute a basis for the {\bf row space} of a matrix $A$. The command is {\tt rowspace(A)}. The {\bf column space} of a matrix $A$ has a basis which can be obtained by the {\tt maple} command {\tt colspace(A)}. The {\bf nullspace} of matrix $A$ is the set of all solutions $x$ of the equation $Ax=0$. A basis for the nullspace is obtained by the {\tt maple} command {\tt nullspace(A)} or {\tt kernel(A)} (the terms {\em nullspace} and {\em kernel} are equivalent). Here is an example: \footnotesize\rm \begin{verbatim} with(linalg): A := matrix(3,2,[2,0,3,4,0,5]); rowspace(A); colspace(A); kernel(A); kernel(transpose(A)); A1:=rref(A); A2:=rref(transpose(A)); # # [ 2 0 ] # A := [ 3 4 ] # [ 0 5 ] # # {[ 1, 0 ], [ 0, 1 ]} # # {[1, 0, -15/8 ], [ 0, 1, 5/4 ]} # # {} # # {[ 15/8, -5/4, 1 ]} # # [ 1 0 ] [ 1 0 -15/8 ] # A1 := [ 0 1 ] A2 := [ ] # [ 0 0 ] [ 0 1 5/4 ] \end{verbatim} \normalsize\rm As is apparent from this example, the commands {\tt colspace} and {\tt rowspace} can be replaced by extraction of nonzero rows from the reduced row echelon forms for $A$ and $A^t$. \bigskip }%% END NOTES {\large\bf Eigenvalues and Eigenvectors}. Find the eigenvalues, eigenvectors, geometric multiplicity and algebraic multiplicity. \begin{exercises} \prob{15.}{($2$ Eigenvalues and $3$ Eigenvectors)} $$A=\left(\begin{array}{rrr} 5 &4 &2 \\ 4 &5 &2 \\ 2 &2 &2 \end{array}\right).$$ \global\edef\FIRSTPROB{\LASTPROB} \SOL{15.}{ To find the eigenvalues of the matrix %% a:=matrix([[5,4,2],[4,5,2],[2,2,2]]); $$A=\left(\begin{array}{rrr} 5 &4 &2 \\ 4 &5 &2 \\ 2 &2 &2 \end{array}\right)$$ it is required to solve for the roots of the characteristic equation $$0=\det(A-\lambda I)= 10-21\,\lambda+12\,{\lambda}^{2}-{\lambda}^{3} =\left (10-\lambda\right )\left (\lambda-1\right )^{2} $$ The eigenvalues are therefore $10$, $1$ and $1$. The eigenvector for $\lambda=10$ is found by solving $AX=10X$ or equivalently $(A-10I)X=0$, which is a null space problem. The RREF of $A-(10)I$ is found (with some effort) and we obtain eigenvalue, eigenvector pair $$\lambda=10,~~~ X=\colvector{2 \\ 2 \\ 1}.$$ In a similar way, we find the RREF of $A-(1)I$ has a basis of two elements, giving the eigenvalue, eigenvector pairs $$ \lambda=1,~~ X=\colvector{0\\ 1\\ -2 }, ~~~~ \lambda=1,~~ X= \colvector{ 1\\ 0\\ -2 }. $$ Left out of these notes is the tedious task of finding RREF's and standard forms of the solution for two null space problems (one problem for each eigenvalue). In a student solution, these details must {\em not} be left out! } % end solution \probx{15a.}{($1$ Eigenvalue and $1$ Eigenvector)} $$A=\left(\begin{array}{rrr} 0 &1 &0 \\ 0 &0 &1 \\ 1 &-3 &3 \end{array}\right).$$ \SOL{15a.}{ To find the eigenvalues of the matrix %% a:=matrix([[0,1,0],[0,0,1],[1,-3,3]]); $$A=\left(\begin{array}{rrr} 0 &1 &0 \\ 0 &0 &1 \\ 1 &-3 &3 \end{array}\right)$$ requires solving for the roots of the characteristic equation $$0=\det(A-\lambda I)= 3\,{\lambda}^{2}-{\lambda}^{3}-3\,\lambda+1 =(1-\lambda)^3. $$ There is only one eigenvalue and therefore only one null space problem to solve, namely $AX=X$ or $(A-I)X=0$. The RREF of $A-I$ is found to have rank 2, nullity 1, so there is only one eigenvalue, eigenvector pair: $$ \lambda=1,~~ X=\colvector{1\\ 1\\ 1}.$$ } % end solution \prob{16.}{($1$ Eigenvalue and $1$ Eigenvector)} $$A=\left(\begin{array}{rrr} -3 &-7 &-5 \\ 2 &4 &3 \\ 1 &2 &2 \end{array}\right).$$ \SOL{16.}{ To find the eigenvalues of the matrix %% a:=matrix([[-3,-7,-5],[2,4,3],[1,2,2]]); $$A=\left(\begin{array}{rrr} -3 &-7 &-5 \\ 2 &4 &3 \\ 1 &2 &2 \end{array}\right)$$ requires solving for the roots of the characteristic equation $$0=\det(A-\lambda I)=(1-\lambda)^3.$$ There is only one eigenvalue, eigenvector pair: $$ \lambda=1,~~ X=\colvector{-3\\ 1\\ 1}.$$ } % end solution \probx{16a.}{($3$ Eigenvalues and $3$ Eigenvectors)} $$A=\left(\begin{array}{rrr} 5 & 0 & 2 \\ 0 & 7 & -2 \\ 2 & -2 & 6 \end{array}\right).$$ \SOL{16a.}{ To find the eigenvalues of the matrix %% a:=matrix([[5,0,2],[0,7,-2],[2,-2,6]]); $$A=\left(\begin{array}{rrr} 5 & 0 & 2 \\ 0 & 7 & -2 \\ 2 & -2 & 6 \end{array}\right).$$ requires solving for the roots of the characteristic equation $$0=\det(A-\lambda I)= \left (\lambda-3\right ) \left (6-\lambda\right ) \left (\lambda-9 \right ) .$$ The eigenpairs are $$ \lambda=3,~~ X=\colvector{ -2\\ 1\\ 2 }, ~~~~ \lambda=6,~~ X=\colvector{ 2\\ 2\\ 1 }, ~~~~ \lambda=9,~~ X=\colvector{ 1\\ -2\\ 2 }. $$ } % end solution \end{exercises} \NOTES{ {\large\bf Maple notes on problems \FIRSTPROB--\LASTPROB}. The notions of {\bf algebraic multiplicity} and {\bf geometric multiplicity} are integer counts taken from certain calculations. Both require that the {\bf characteristic equation} be solved: $$ \det(A - \lambda I) = 0.$$ The {\em algebraic multiplicity} of $\lambda$ is the number of times root $\lambda$ is repeated. For example, in the equation $$ (\lambda - 1)^3(\lambda + 3)^2(\lambda^2+16)=0$$ the roots $1$, $-3$, $4i$, $-4i$ are repeated 3, 2, 1, 1 times respectively, hence their algebraic multiplicities are 3, 2, 1, 1. The {\em geometric multiplicity} of a root $\lambda$ of the characteristic equation is the number of independent eigenvectors for $\lambda$, that is, the number of independent solutions $x$ to the equation $(A - \lambda I)x = 0$. This number can be found {\em without} computing eigenvectors. Precisely, the {\bf geometric multiplicity} of root $\lambda$ is the number of arbitrary variables in the general solution. This number is exactly the {\bf nullity} of the matrix $A - \lambda I$, which is the number of variables minus the {\bf rank}. If the eigenvectors are not needed, then it suffices to compute {\tt rank(A-$\lambda$ I)}. If the eigenvectors are actually needed, then {\tt maple} determines the count as the number of basis vectors in the calculation {\tt kernel(D)} where $D = A - \lambda I$. A third way to obtain the count is to apply the {\tt maple} command {\tt eigenvects(A)}, which contains the desired count in an encrypted syntax (along with additional information). Here is an example which shows how to compute the eigenvalues and eigenvectors and incidentally calculate the algebraic and geometric multiplicities: \footnotesize\rm \begin{verbatim} with(linalg): A:=matrix([[1,3,-2,0],[2,6,-5,-2],[0,0,5,10],[2,6,0,8]]); J:=diag(1,1,1,1); # # [ 1 3 -2 0 ] [ 1 0 0 0 ] # [ 2 6 -5 -2 ] [ 0 1 0 0 ] # A := [ ] J:= [ ] # [ 0 0 5 10 ] [ 0 0 1 0 ] # [ 2 6 0 8 ] [ 0 0 0 1 ] # u:=[eigenvals(A)]; # Make an eigenvalue list # # Brackets are not a mistake! # 1/2 1/2 # u := [0, 0, 10 + I 43 , 10 - I 43 ] # # Algebraic multiplicities are 2,1,1 because 0 is repeated. # # Now solve the kernel problems for all eigenvalues u[1]..u[4]. # v1:=kernel(evalm(A-u[1]*J)); v2:=kernel(evalm(A-u[2]*J)); # Duplicate computation! v3:=kernel(evalm(A-u[3]*J)); v4:=kernel(evalm(A-u[4]*J)); # v1:={[ -3, 1, 0, 0 ], [ -4, 0, -2, 1 ]} # v2:={[ -3, 1, 0, 0 ], [ -4, 0, -2, 1 ]} evalf(map(evalc,v3[1]),3); # [ - .150 - .328 I, - .500 - .656 I, 1., .500 - .656 I ] evalf(map(evalc,v4[1]),3); # [ - .150 - .328 I, - .500 - .656 I, 1., .500 - .656 I ] # # Geometric multiplicities: ev=0 mult=2 # ev=10.00 + 6.56 I mult=1 # ev=10.00 - 6.56 I mult=1 \end{verbatim} \normalsize\rm It is possible to compute the eigenvalues of a matrix $A$ numerically by the maple command {\tt Eigenvals}, rather than symbolically, as is done by {\tt eigenvals(A)}. This is recommended for those cases when the {\tt maple} output from {\tt eigenvals(A)} is too complicated to read. The answers usually require interpretation, as in the following, where one eigenvalue of 0 is computed as $-0.874\times 10^{-9}$: \footnotesize\rm \begin{verbatim} with(linalg): A:=matrix([[1,3,-2,0],[2,6,-5,-2],[0,0,5,10],[2,6,0,8]]); evalf(Eigenvals(A),3); # Numeric eigenvalues # are not used for computations! # -9 # [ -.874*10 , 10.00 + 6.56 I, 10.00 - 6.56 I, 0 ] # \end{verbatim} \normalsize\rm } %% END NOTES {\large\bf Differential Equations}. The general solution of a matrix differential equation $x'(t)=Ax(t)$ can be written as $x(t)=\sum_{k=1}^n c_kX_ke^{\lambda_k t}$ where $c_1$ to $c_n$ are arbitrary constants, $\lambda_1$ to $\lambda_n$ are the distinct eigenvalues of $A$ with corresponding eigenvectors $X_1$ to $X_n$. This solution works only when $A$ has $n$ distinct eigenvalues. \begin{exercises} \prob{17.}{(Differential Equations)} Find the general solution of the system of differential equations $x'(t) =A x(t)$ where $$ A=\left(\begin{array}{rrr} 2 & -4 & 4 \\ 0 & -2 & 1 \\ 0 & 0 & -1 \end{array}\right). $$ %%A:=matrix([[2,-4,4],[0,-2,1],[0,0,-1]]); eigenvalues=2,-2,-1 \global\edef\FIRSTPROB{\LASTPROB} \SOL{17.}{ To solve differential equations $x'=Ax$ where $A$ is a square matrix we apply the standard theorem that says, for dimension 3, $$x = c_1v_1\exp(\lambda_1 t) + c_2v_2\exp(\lambda_2 t) + c_3v_3\exp(\lambda_3 t)$$ where $\lambda_1$, $\lambda_2$, $\lambda_3$ are the distinct eigenvalues of the matrix $A$ with corresponding eigenvectors $v_1$, $v_2$, $v_3$. The symbols $c_1$, $c_2$, $c_3$ represent arbitrary constants in the general solution. The above method is applicable only in the case where $A$ has distinct eigenvalues. Methods exist to solve the problem for a general matrix $A$, however, the theory is beyond the scope of the linear algebra already developed. The general solution of the system of differential equations $x'(t) =A x(t)$ where $$ A=\left(\begin{array}{rrr} 2 & -4 & 4 \\ 0 & -2 & 1 \\ 0 & 0 & -1 \end{array}\right) $$ %%A:=matrix([[2,-4,4],[0,-2,1],[0,0,-1]]); eigenvalues=2,-2,-1 %%[2, 1, {[ 1, 0, 0 ]}], [-2, 1, {[ 1, 1, 0 ]}], [-1, 1, {[ 0, 1, 1 ]}] is given by $$x(t) = c_1\exp(2 t)\colvector{1\\ 0 \\ 0} + c_2\exp(-2t)\colvector{1 \\ 1 \\ 0} + c_3\exp(-t)\colvector{0 \\ 1 \\ 1}$$ } % end solution \probx{17a.}{(Differential Equations)} Solve for the vector solution $x(t)$ in $x'(t) = Ax(t)$, given $$A=\left(\begin{array}{rrr} 4 &0 &1 \\ 0 &2 &1 \\ 0 &0 &3 \end{array}\right).$$ %A:=matrix([[4,0,1],[0,2,1],[0,0,3]]); \SOL{17a.}{ The vector solution $x(t)$ in $x'(t) = Ax(t)$, given $$A=\left(\begin{array}{rrr} 4 &0 &1 \\ 0 &2 &1 \\ 0 &0 &3 \end{array}\right),$$ %A:=matrix([[4,0,1],[0,2,1],[0,0,3]]); eigenvalues=2,4,3 %% [2, 1, {[ 0, 1, 0 ]}], [4, 1, {[ 1, 0, 0 ]}], [3, 1, {[ 1, -1, -1 ]}] is given by $$x(t) = c_1\exp(2 t)\colvector{0\\ 1 \\ 0} + c_2\exp(4t)\colvector{1 \\ 0 \\ 0} + c_3\exp(3t)\colvector{1 \\ -1 \\ -1}$$ } % end solution \prob{18.}{(Diagonalization)} Test for diagonalizability and find the diagonal form. $$A=\left(\begin{array}{rrr} 3 &-1 &-1 \\ 1 &1 &-1 \\ 1 &-1 &1 \end{array}\right).$$ \SOL{18.}{ A practical test for diagonalizability of a square matrix $A$ is as follows: \begin{quote} {\bf (a)} Compute all eigenvalues of $A$. If they are distinct, then $A$ is diagonalizable.\\ {\bf (b)} If test (a) fails, then compute the eigenvectors of $A$. If the number of independent eigenvectors equals the dimension of $A$ then $A$ is diagonalizable. \end{quote} If either case (a) or (b) holds, then $A$ is diagonalizable and its diagonal form is the diagonal matrix of eigenvalues. The {\tt maple} command {\tt Eigenvals(A)} (Cap E, not lowercase e) is not very useful for deciding case (a) because the numerical values may be distinct but the actual values identical --- see the example above. The command {\tt eigenvects(A)} can be used to decide case (b). By standard theory, case (b) holds whenever case (a) holds, so you might deduce that case (a) can be eliminated. However, computational complexity often dictates that (a) be checked first. Diagonalizability for $$A=\left(\begin{array}{rrr} 3 &-1 &-1 \\ 1 &1 &-1 \\ 1 &-1 &1 \end{array}\right)$$ %% A:=matrix([[3,-1,-1],[1,1,-1],[1,-1,1]]); %% [1, 1, {[ 1, 1, 1 ]}], [2, 2, {[ 1, 0, 1 ], [ 1, 1, 0 ]}] is tested by first computing the eigenvalues. They are $1$, $2$, $2$, not distinct, so we have to check the geometric multiplicity of $\lambda=2$. If it's $2$, then the total geometric multiplicity is $3$ and $A$ is diagonalizable with diagonal form $$\threebythree{1 & 0 & 0}{0 & 2 & 0}{0 & 0 & 2}.$$ Otherwise, its not diagonalizable. The geometric multiplicity question is equivalent to finding the nullity of $A-2I$. So we find the rank of $A-2I$ and subtract from $3$ (the number of variables). A quick check gives rank $1$, so the nullity is $2$ and $A$ is diagonalizable. } % end solution \probx{18a.}{(Diagonalization)} Test for diagonalizability and find the diagonal form. $$A=\left(\begin{array}{rrrr} -2 &-2 &0 &0 \\ -5 &1 &0 &0 \\ 0 &0 &2 &-1 \\ 0 &0 &5 &-2 \end{array}\right).$$ \SOL{18a.}{ Diagonalizability for $$A=\left(\begin{array}{rrrr} -2 &-2 &0 &0 \\ -5 &1 &0 &0 \\ 0 &0 &2 &-1 \\ 0 &0 &5 &-2 \end{array}\right).$$ %% A:=matrix([[-2,-2,0,0],[-5,1,0,0],[0,0,2,-1],[0,0,5,-2]]); %% [3., 1., {[ 1., -2.5, 0, 0 ]}], [-4., 1., {[ 1., 1., 0, 0 ]}], %% [ - 1. I, 1., {[ 0, 0, .4 - .2 I, 1. ]}] is tested by first computing the eigenvalues. They are $3$, $-4$, $i$ and $-i$ ($i=\sqrt{-1}$). So the eigenvalues are distinct and $A$ is diagonalizable with diagonal form $$\fourbyfour{3 & 0 & 0 & 0}{0 & -4 & 0 & 0}{0 & 0 & i & 0}{0 & 0 & 0 & -i}.$$ } % end solution \prob{19.}{(Orthogonal Matrices)} Find an orthogonal matrix $Q$ such that $Q^tAQ$ is diagonal: $$A=\left(\begin{array}{rrrr} 1 &-1 &0 &0 \\ -1 &3 &0 &0 \\ 0 &0 &0 &0 \\ 0 &0 &0 &2 \end{array}\right).$$ \SOL{19.}{ A square matrix $Q$ is {\bf orthogonal} if each column has length 1 and the columns of $Q$ are pairwise orthogonal, that is, $X\cdot Y=0$ for any pair of columns $X$ and $Y$ of $Q$. The matrix $P$ of eigenvectors of a symmetric matrix $A$ already satisfies $P^{-1}AP=D$, where $D$ is the diagonal matrix of eigenvalues. An orthogonal matrix $Q$ is constructed from $P$ by changing its columns to unit vectors, accomplished by dividing each column by its length. Then $Q$ is orthogonal, $Q^{-1}=Q^t$, and the equation $Q^tAQ=D$ holds. The eigenvalues and eigenvectors of the matrix $$A=\left(\begin{array}{rrrr} 1 &-1 &0 &0 \\ -1 &3 &0 &0 \\ 0 &0 &0 &0 \\ 0 &0 &0 &2 \end{array}\right)$$ %% a:=matrix([[1,-1,0,0],[-1,3,0,0],[0,0,0,0],[0,0,0,2]]); eigenvals(a); %% eigenvalues = 0,2,2+sqrt(2),2-sqrt(2) are found to be $$ \lambda=2+\sqrt{2},~~ X=\colvector{1-\sqrt{2}\\ 1\\ 0\\ 0} , ~~~~ \lambda=2-\sqrt{2},~~ X=\colvector{ 1+\sqrt{2}\\ 1\\ 0\\ 0} , $$ $$ \lambda=0,~~ X=\colvector{ 0\\ 0\\ 1\\ 0 }, ~~~~ \lambda=2,~~ X=\colvector{ 0\\ 0\\ 0\\ 1 }. $$ Let $a=\sqrt{4-2\sqrt{2}}$, $b=\sqrt{4+2\sqrt{2}}$ be the norms of the first two eigenvectors. The matrix $Q$ of {\em normalized eigenvectors} is given by $$ Q= \left( \begin{array}{cccc} (1-\sqrt{2})/a & (1+\sqrt{2})/b & 0 & 0 \\ 1/a & 1/b & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \end{array} \right) $$ %% Q:=matrix([[(1-sqrt(2))/a,(1+sqrt(2))/b,0,0],[1/a,1/b,0,0], %% [0,0,1,0], [0,0,0,1]]); It is possible to show directly that the columns of $Q$ have unit length and are pairwise orthogonal. Therefore, $Q$ is orthogonal, which means $Q^{-1}=Q^t$. Finally, $Q^tAQ$ is the diagonal matrix of eigenvalues: $$ \left( \begin{array}{cccc} 2+\sqrt2 & 0 & 0 & 0 \\ 0 & 2-\sqrt2 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 2 \end{array} \right). $$ } % end solution \end{exercises} \NOTES{ \medskip {\large\bf Maple notes on \FIRSTPROB--\LASTPROB}. A symmetric matrix $A$ with distinct eigenvalues can be transformed to diagonal form $D=\mbox{diag}(\lambda_1,\ldots,\lambda_n)$ by the matrix $Q$ of its normalized eigenvectors: $Q^{-1}AQ = D$. The algorithm for finding $Q$ is as follows: \begin{itemize} \item[(a)] Let {\tt u:=[eigenvals(A)]}. The values should be distinct. \item[(b)] Evaluate $$\mbox{\tt v:=kernel(evalm(A-u[i]*J))}$$ for $i=1,\ldots,n$. Save the $n$ columns $$\mbox{\tt evalm(v/sqrt(dotprod(v,v)))}$$ in a matrix $Q$ using initially {\tt Q:=v} and then {\tt Q:=augment(Q,v)} to add columns one at a time until $n$ columns are filled. \end{itemize} There is the possibility of using {\tt eigenvects(A)} to produce the values {\tt v} of step (b) above. Sometimes {\tt norm(v,2)} produces unevaluated absolute values --- use {\tt sqrt(dotprod(v,v))} instead of {\tt norm(v,2)}. It is remarked that the above algorithm applies only to symmetric matrices. Application to nonsymmetric matrices is considered a logical error (the calculation may produce no error message but the answer is likely incorrect). \footnotesize\rm \begin{verbatim} with(linalg): A:=matrix([[16,-3/2],[-3/2,4]]); v:=[eigenvals(A)]; n:=coldim(A): J:=diag(seq(1,i=1..n)): for i from 1 to n do w:=kernel(evalm(A-v[i]*J)); y:=evalm(w[1]/sqrt(dotprod(w[1],w[1]))): y:=evalf(map(evalc,y),3): if i=1 then Q:=matrix(n,1,y): else Q:=augment(Q,matrix(n,1,y)): fi: od: Q = evalf(eval(Q),3); # # Applies only to symmetric matrices! # # [ 16 -3/2 ] [ .990 .122 ] # A := [ ] Q = [ ] # [ -3/2 4 ] [ -.119 .991 ] # # 1/2 1/2 # v := [10 + 3/2 17 , 10 - 3/2 17 ] # \end{verbatim} \normalsize\rm \medskip } % END NOTES {\large\bf Quadratic Forms}. Write the given quadratic as as $AX\cdot X$ for some symmetric matrix $A$. \begin{exercises} \prob{20.}{(Quadratic Form in $\RR^4$)} $x_1^2 - x_2^2 + x_1x_3 - x_2x_4 + x_3^2 + x_4^2$ \global\edef\FIRSTPROB{\LASTPROB} \SOL{20.}{ The quadratic $x_1^2 - x_2^2 + x_1x_3 - x_2x_4 + x_3^2 + x_4^2$ transforms to $AX \cdot X$ where $$ \def\AA{1 &0 & 1/2 & 0 } \def\BB{0 &-1 & 0 & -1/2 } \def\CC{1/2 &0 & 1 & 1 } \def\DD{0 &-1/2& 0 & 1 } X=\colvector{x_1 \\ x_2 \\ x_3 \\ x_4},~~ A=\fourbyfour{\AA}{\BB}{\CC}{\DD}. $$ {\large\bf Example}. Write the quadratic form $$x_1^2 - x_2^2 + x_1x_3 - 3x_2x_4 + 4x_3^2 + x_4^2=10$$ in the matrix form $X^t A X = 10$ for some symmetric matrix $A$. Define $$ X=\left(\begin{array}{r} x_1 \\ x_2 \\ x_3 \\ x_4 \end{array}\right),~~~ A=\left(\begin{array}{rrrr} 1 &0 & 1/2 & 0 \\ 0 &-1 & 0 & -3/2 \\ 1/2 &0 & 4 & 0 \\ 0 &-3/2 & 0 & 1 \end{array}\right). $$ The trick in defining $A$ is is to assign diagonal entries in $A$ to corresponding square terms in the quadratic form, but to cross terms like $-3x_2x_4$ assign {\bf two} off--diagonal entries (e.g., assign $-3/2$ to symmetric entries $a_{24}$ and $a_{42}$ of $A$). } % end solution \probx{20a.}{(Quadratic Form in $\RR^3$)} $-x^2 + xy + y^2 - 4xz + 4yz + z^2$ \SOL{20a.}{ The quadratic $-x^2 + xy + y^2 - 4xz + 4yz + z^2$ transforms to $AX \cdot X$ where $$ \def\AA{-1 &1/2 & -2 } \def\BB{1/2 &1 & 2 } \def\CC{-2 &2 & 1 } X=\colvector{x \\ y \\ z},~~ A=\threebythree{\AA}{\BB}{\CC}. $$ } % end solution \end{exercises} \NOTES{ {\large\bf Maple notes on \FIRSTPROB--\LASTPROB}. To calculate the orthogonal matrix $R$ (the {\bf rotation} matrix) that transforms a quadratic form $x^tAx=c$ into its {\bf normal form} $X^tDX=c$ ($D$ is a diagonal matrix), the algorithm for symmetric matrices is followed to create an orthogonal matrix $Q$ of eigenvectors of $A$ such that $D=Q^{-1}AQ$ is the diagonal matrix of eigenvalues of $A$. Then $R$ is the matrix $Q$ of that algorithm and the normal form results by taking the change of variables $X=Qx$. \medskip {\large\bf Example}. Find the rotation matrix $Q$ and the standard form for the quadratic $16x^2-3xy + 4y^2=10$, using {\tt maple}. {\bf Solution}: \footnotesize\rm \begin{verbatim} with(linalg): k:=10:eq:= 16*x^2 - 3*x*y + 4*y^2=k; # The problem A:=matrix([[16,-3/2],[-3/2,4]]); v:=[eigenvals(A)]; n:=coldim(A): J:=array(identity,1..n,1..n): for i from 1 to n do w:=kernel(evalm(A-v[i]*J)); y:=evalm(w[1]/sqrt(dotprod(w[1],w[1]))): y:=evalf(map(evalc,y),3): if i=1 then Q:=matrix(n,1,y): else Q:=augment(Q,matrix(n,1,y)): fi: od: X:=matrix(n,1,[X1,X2]): # New variables of standard form eval(X)=evalf(eval(Q),3)*matrix(n,1,[x1,x2]); # Rotation formulas DD:=diag(seq(v[j],j=1..n)); FORM:=evalm(transpose(X) &* DD &* X); # A 1 by 1 matrix collect(FORM[1,1],[X1,X2])=k; # Write in standard form # 2 2 # eq := 16 x - 3 x y + 4 y = 10 # # [ 16 -3/2 ] # A := [ ] # [ -3/2 4 ] # # 1/2 1/2 # v := [10 + 3/2 17 , 10 - 3/2 17 ] # # [ X1 ] [ .990 .119 ] [ x1 ] # [ ] = [ ] [ ] # [ X2 ] [ -.119 .990 ] [ x2 ] # # [ 1/2 ] # [ 10 + 3/2 17 0 ] # D := [ ] # [ 1/2 ] # [ 0 10 - 3/2 17 ] # # 1/2 2 1/2 2 # (10 + 3/2 17 ) X1 + (10 - 3/2 17 ) X2 = 10 # \end{verbatim} \normalsize\rm } %% END NOTES \medskip {\large\bf Standard Form of a Quadratic}. Write the quadratic in standard form $\lambda_1 X^2 + \lambda_2 Y^2=c$ and display both the rotation matrix $R$ and the standard form. \begin{exercises} \prob{21.}{(Quadratic Forms)} $3x^2 - 2xy = 5$ \global\edef\FIRSTPROB{\LASTPROB} \SOL{21.}{ %% eq:= 3*x^2 - 2*x*y + 0*y^2=5; # The problem %% A:=matrix([[3,-2/2],[-2/2,0]]); The normal form of $3x^2 - 2xy = 5$ is $$ \left (3/2+{\frac {\sqrt {13}}{2}} \right ){{\it X1}}^{2}+\left (3/2-{ \frac {\sqrt {13}}{2}}\right ){{\it X2 }}^{2}=5, ~~ \def\AA{0.96& 0.29} \def\BB{- 0.29& 0.96} \colvector{X1 \\ X2}= \twobytwo{\AA}{\BB}\colvector{x \\ y}. $$ } % end solution \probx{21a.}{(Quadratic Forms)} $x^2 - 3xy + 4y^2=1$ \SOL{21a.}{ %% eq:= x^2 - 3*x*y + 4*y^2=1; # The problem %% A:=matrix([[1,-3/2],[-3/2,4]]); The normal form of $x^2 - 3xy + 4y^2=1$ is $$ \left (5/2+{\frac {3\,\sqrt {2}}{2}} \right ){{\it X1}}^{2}+\left (5/2-{ \frac {3\,\sqrt {2}}{2}}\right ){{\it X2}}^{2}=1, ~~ \def\AA{- 0.380& 0.923} \def\BB{0.926& 0.383} \colvector{X1 \\ X2}= \twobytwo{\AA}{\BB}\colvector{x \\ y}. $$ } % end solution \probx{21b.}{(Quadratic Forms)} $2x^2 + xy + y^2 = 4$ \SOL{21b.}{ %% k:=4: eq:= 2*x^2 + x*y + y^2=k; # The problem %% A:=matrix([[2,1/2],[1/2,1]]); The normal form of $2x^2 + xy + y^2 = 4$ is $$ \left (3/2+{\frac {\sqrt {2}}{2}} \right ){{\it X1}}^{2}+\left (3/2-{ \frac {\sqrt {2}}{2}}\right ){{\it X2} }^{2}=4, ~~ \def\AA{0.923&- 0.380} \def\BB{0.383& 0.926} \colvector{X1 \\ X2}= \twobytwo{\AA}{\BB}\colvector{x \\ y}. $$ } % end solution \prob{22.}{(Linear Combinations)} Compute the result of the linear combination $2u+v-3w$ where $$u=\colvector{1\\ 1\\ -2},~~~ v=\colvector{0\\ 0\\ 2},~~~ w=\colvector{9\\ 1\\ -4}.$$ \SOL{22.}{ The result of the linear combination is $$2u+v-3w =2\colvector{1\\ 1\\ -2}+\colvector{0\\ 0\\ 2}- 3\colvector{9\\ 1\\ -4} =\colvector{-25 \\ -1 \\ 10} .$$ } % end solution \probx{22a.}{(Equality of Vectors)} Let $$u=\colvector{1\\ x\\ -2},~ v=\colvectorC{x+1\\ 0\\ 2},~ w=\colvectorC{9\\ 1\\ -4x}$$ The linear combination $p=2u+v-3w$ depends upon $x$. Is there a value of $x$ such that $$p=\colvector{ -22.9\\ -0.8\\ 11.2}?$$ \SOL{22a.}{ %% p:=2*vector([1, x, -2]) +vector([x+1, 0, 2]) -3*vector([9, 1, -4*x]); %% evalm(p); The linear combination $p$ is given by $$p=2\colvector{1\\ x\\ -2} +\colvectorC{x+1\\ 0\\ 2} -3\colvectorC{9\\ 1\\ -4x} =\colvectorC{x-24\\ 2x-3 \\ 12x-2} .$$ There is a value of $x$ such that $$p=\colvector{ -22.9\\ -0.8\\ 11.2}$$ exactly when the components agree, i.e., $x-24=-22.9$, $2x-3=-0.8$, $12x-3=11.2$. This happens for $x=1.1$ (check all three equations!). } % end solution \prob{23.}{(Largest Linearly Independent Set)} Extract from the list $$\colvector{-1\\ 1\\ 0}, ~~~ \colvector{-2\\ 2 \\ 0}, ~~~ \colvector{1\\ 0\\ 1}, ~~~ \colvector{0\\ 1\\ 1}$$ a largest set of linearly independent vectors. \SOL{23.}{ To extract a largest linearly independent set of vectors from a list $v_1$, $v_2$, $v_3$, $v4$ requires an algorithm be followed. The algorithm begins by choosing the set to be the single element $v_1$, already an independent set. Try to add $v_2$ to the set. This will fail if $v_2$ is a multiple of $v_1$. Then try to add $v_3$ to the set. It will fail if $v_3$ is a combination of vectors already in the set. Finally, try to add $v_4$ to the set, which fails if $v_4$ is a combination of vectors already in the set. When complete, the set is independent by construction and of largest size. The largest independent set from the list $$\colvector{-1\\ 1\\ 0}, ~~~ \colvector{-2\\ 2 \\ 0}, ~~~ \colvector{1\\ 0\\ 1}, ~~~ \colvector{0\\ 1\\ 1}$$ has size $2$. The algorithm gives a basis $$\colvector{-1\\ 1\\ 0}, ~~~ \colvector{1\\ 0\\ 1}. $$ An effective way to test independence of vectors is to find the RREF of the matrix with these vectors as columns. If the rank is the same as the number of columns, then the set is independent. } % end solution \probx{23a.}{(Vector Spaces)} Extract from the list $x$, $x-1$, $2x+1$, $x^2-x$, $x^2+x$ a largest list of linearly independent functions. \SOL{23a.}{ The list $x$, $x-1$, $2x+1$, $x^2-x$, $x^2+x$ can be mapped to a list of column vectors $$\colvector{0\\ 1\\ 0}, ~~~ \colvector{-1\\ 1 \\ 0}, ~~~ \colvector{1\\ 2\\ 0}, ~~~ \colvector{0\\ -1\\ 1}, ~~~ \colvector{0\\ 1\\ 1}$$ where $$ \colvector{c_1\\ c_2\\ c_3} ~~~ \leftrightarrow ~~~ c_1+c_2x+c_3x^2. $$ Then the largest independent list if column vectors corresponds exactly to the largest set of independent polynomials. The RREF method gives the largest list as $$\colvector{0\\ 1\\ 0}, ~~~ \colvector{-1\\ 1 \\ 0}, ~~~ \colvector{0\\ -1\\ 1} $$ and therefore the largest list of independent polynomials is $x$, $x-1$, $x^2-x$ (the size is 3, there are many independent lists of size 3). } % end solution \prob{24a.}{(Basis for a Subspace)} Find a basis for the set of vectors $ \vec{v}=\colvector{x\\ y\\ z}$ in $\RR^3$ that satisfy the equation $ 5x+6y-2z=0 $. \SOL{24a.}{ To find a basis for the set of vectors $ X=\colvector{x\\ y\\ z}$ in $\RR^3$ that satisfy the equation $ 5x+6y-2z=0 $, find the RREF of the matrix $A$ given above and write out the complete solution $X$ of the linear system $AX=0$: $$ X=\colvector{x\\ y\\ z} =\colvectorC{(-6/5)y + (2/5)z \\ y\\ z} =y\colvectorC{-6/5 \\ 1 \\ 0} +z\colvectorC{2/5 \\ 0\\ 1}. $$ A basis can be read off the standard form of the solution $X$: $$ \colvectorC{-6/5 \\ 1 \\ 0}, ~~~ \colvectorC{2/5 \\ 0\\ 1}. $$ } % end solution \probx{24.}{(Subspace Criterion)} Show that the set of vectors $\overrightarrow v=\colvector{x\\ y\\ z}$ in $\RR^3$ that satisfy the equation $ 5x+6y-2z=0 $ is a subspace of $\RR^3$. \SOL{24.}{ To prove a set is a subspace of $\RR^3$ it suffices to establish the conditions in the {\bf Subspace Criterion}: \begin{quote} {\bf (a)} If $X$ and $Y$ are in the set, then so is $X+Y$.\\ {\bf (b)} If $X$ is in the set and $k$ is a constant, then $kX$ is in the set. \end{quote} A set already known to be a subspace of $\RR^3$ has a basis of one, two or three elements (unless the set is the origin). A plane given by an equation $ax+by+cz=0$ represents a subspace with basis of two elements, because two independent vectors determine a plane. A line through the origin given by a vector equation $$\dd \colvector{x\\ y\\ z}=t\colvector{v_1\\ v_2\\ v_3}$$ is a subspace with a basis of one element. To show that the set of vectors $X=\colvector{x\\ y\\ z}$ in $\RR^3$ that satisfy the equation $ 5x+6y-2z=0 $ is a subspace of $\RR^3$, write the equation as a matrix equation $AX=0$ where $$ \def\AA{5 & 6 & -2} A=\left(\begin{array}{rrr} \AA \end{array}\right). $$ It is routine to check (a) and (b) for the equation $AX=0$. For example, to check (a), let $AX=0$ and $BY=0$, then $A(X+Y)=AX+AY=0+0=0$, so $X+Y$ is in the set. Item (b) is similar. } % end solution \probx{24b.}{(Vector Space Basis)} Find a basis for the solution space of the linear differential equation $y''' -6y''+11y' -6y=0$. \SOL{24b.}{ A basis for the solution space of a homogeneous linear differential equation can be extracted from the {\bf General Solution} of the equation. The basis is obtained formally from the general solution by identifying the functions multiplying the arbitrary constants in the general solution. From the general solution $$y(x) = C_1 \exp(x) + C_2 \exp(- 3 x) + C_3 \exp(- 2 x)$$ we can infer that $\exp(x)$, $\exp(-3x)$, $\exp(-2x)$ is a basis for the solution space of the differential equation $y''' + 4y'' + y' - 6y=0$. %% x:='x':y:='y':u:=y(x): u1:=diff(y(x),x): %% u2:=diff(y(x),x,x): u3:=diff(y(x),x,x,x): %% de:=u3-6*u2+11*u1-6*u = 0: %% dsolve(de,y(x)); A basis for the solution space of the linear differential equation $y''' -6y''+11y' -6y=0$ is $$e^x,~~~ e^{2x}, ~~~ e^{3x}.$$ } % end solution \end{exercises} \NOTES{ {\bf Maple Notes on Differential Equations}. The general solution of a differential equation can be found by generalizing this example: \footnotesize\rm \begin{verbatim} u:=y(x): u1:=diff(y(x),x): u2:=diff(y(x),x,x): u3:=diff(y(x),x,x,x): de:=u3+4*u2+u1-6*u = 0: dsolve(de,y(x)); # y(x) = _C1 exp(x) + _C2 exp(- 3 x) + _C3 exp(- 2 x) \end{verbatim} \normalsize\rm \medskip } % END NOTES \medskip {\large\bf Shadow Projection and Dot Product}. Let $$u=\colvectorC{t \\ e^t\\ e^{2t}}, ~~~ v=\colvector{t+1\\ e^{t-1}\\ e^{1-t}}$$ and let $t=0.6$. Compute the following answers and display the result in 3-digit accuracy. \begin{exercises} \prob{25.}{(Vector Projection)} The vector projection of $u$ onto the direction of $v$, Proj$_v(u)$. \SOL{25.}{ The vector projection of $u$ onto the direction of $v$ is $$ \mbox{\bf Proj}_v (u)=\colvectorC{2.2 \\ 0.9 \\ 2.0}.$$ The intent of this problem is to examine various situations where the idea of shadow projection is useful. } \end{exercises} \NOTES{ {\bf Maple notes on vector algebra}. The {\tt maple} commands {\tt vector}, {\tt evalm}, {\tt norm} can be used. Here is an example: \footnotesize\rm \begin{verbatim} with(linalg): u:=vector([1,x,-2]): v:=vector([x+1,0,2]): w:=vector([9,1,-4*x]): dotprod(u,v); # x - 3 p:=evalm(2*u-v+3*w); # p := [ 28 - x, 2 x + 3, - 6 - 12 x ] q:=subs(x=2.1,eval(p)); # [ 25.9, 7.2, -31.2 ] norm(q,2); # 41.18361325 sqrt(q[1]^2+q[2]^2+q[3]^2); # 41.18361325 sqrt(dotprod(q,q)); # 41.18361325 \end{verbatim} \normalsize\rm It is remarked that {\tt norm(q,2)} stands for the Euclidean norm of the vector {\tt q}, whereas {\tt norm(q)} is the maximum norm. The components of vector {\tt q} are {\tt q[1], q[2], q[3]}. Some basic formulas: \bigskip \begin{tabular}{l|l} \bf Math Formula & \tt Maple \bf Formula \\ \hline Proj$_{\vec{b}}(\vec{a})=\displaystyle\frac{\vec{a}\cdot \vec{b}}{\vec{b}\cdot \vec{b}}\vec{b}$ & \tt p:=evalm((dotprod(a,b)/dotprod(b,b))*b) \\[2em] $\vec{a}\cdot \vec{b} = |\vec{a}||\vec{b}|\cos(\theta)$ & \tt theta:=arccos(dotprod(a,b)/(norm(a,2)*norm(b,2))) \end{tabular} } % END NOTES \begin{exercises} \probx{25a.}{(Altitudes of a Triangle)} The three altitudes of the triangle determined by $u$ and $v$. \SOL{25a.}{ The three altitudes of the triangle determined by $u$ and $v$ are approximately $1.33$, $2.23$ and $2.14$. } % end solution \prob{26.}{(Angle between Vectors)} The acute angle between $u$ and $v$. \SOL{26.}{ The acute angle between $u$ and $v$ is $35.6$ degrees. } % end solution \probx{26a.}{(Angles of a Parallelogram)} All angles in the parallelogram determined by $u$ and $v$. \SOL{26a.}{ The angles in the parallelogram determined by $u$ and $v$ are $35.6$ and $144.4$ degrees. } % end solution \probx{26b.}{(Angle in a Triangle)} A triangle in $\RR^2$ has vertices $P=(3,-1), Q=(4,5)$ and $R=(-1,-2)$. Find the cosine of the angle $\theta$ at the vertex $P$. \SOL{26b.}{ The cosine of the angle $\theta=113.5^\circ$ is $-0.3987$. } \end{exercises} \NOTES{ A maple example: \footnotesize\rm \begin{verbatim} with(linalg): # Find cosine of the angle at vertex P in a triangle P:=vector([1,4]): Q:=vector([3,7]): R:=vector([2,12]): a:=evalm(R-P): b:=evalm(Q-P): ans:=evalf(dotprod(a,b)/(norm(a,2)*norm(b,2))); \end{verbatim} \normalsize\rm } % END NOTES \medskip {\large\bf Cross Product and Scalar Triple Product}. Let $$u=\colvectorC{t^2\\ e^t\\ e^{-t}}, ~~~ v=\colvectorC{t\\ e^{2t}\\ e^{-2t}}, ~~~ w=\colvector{1\\ -1\\ 0}$$ and let $t=0.5$. Compute the following answers and display the result in 3-digit accuracy. \begin{exercises} \prob{27.}{(Area)} The area of the parallelogram determined by $u$ and $v$. \SOL{27.}{ The area of the parallelogram determined by $u$ and $v$ is $1.07$. } \end{exercises} \NOTES{ A maple example: \footnotesize\rm \begin{verbatim} with(linalg): u:=vector([t^2,exp(t),t*exp(t)]): v:=vector([t,exp(t),exp(-t)]): w:=vector([1,1,-1]): t:=0.55: area:=norm(crossprod(u,v),2); # area := .8554853324 \end{verbatim} \normalsize\rm } % END NOTES \begin{exercises} \probx{27a.}{(Volume)} The volume of the parallelepiped determined by $u$, $v$, $w$. \SOL{27a.}{ The volume of the parallelepiped determined by $u$, $v$, $w$ is $1.25$. } \end{exercises} \NOTES{ A maple example: \footnotesize\rm \begin{verbatim} with(linalg): u:=vector([t^2,exp(t),t*exp(t)]): v:=vector([t,exp(t),exp(-t)]): w:=vector([1,1,-1]): t:=0.55: volume:=abs(dotprod(crossprod(u,v),w)); # volume := .1264705293 \end{verbatim} \normalsize\rm } % END NOTES \begin{exercises} \prob{28.}{(Cross Product)} The cross product of $u \times w$ and $v \times w$. \SOL{28.}{ The cross product of $u \times w$ and $v \times w$ is $$(u \times w) \times (v \times w) = \colvector{-1.25\\ 1.25\\ 0.00}.$$ } % end solution \probx{28a.}{(Cross Product Magnitude)} The magnitude of the cross product of the diagonals of the parallelogram determined by $u$ and $v$. \SOL{28a.}{ The magnitude of the cross product of the diagonals is $2.145$. } % end solution \prob{29.}{(Near Point)} Find the point $R$ on the line $L$ through $P=(1,-4,6)$ and $Q=(3,1,2)$ that is closest to the origin. \SOL{29.}{ The point $R$ on the line $L$ through $P=(1,-4,6)$ and $Q=(3,1,2)$ that is closest to the origin is $R=(2.87,0.67,2.27)$. It is found from $R\cdot T=0$ where $T$ is tangent to the line. Since $R=P+tT$ for some $t$, it follows that $R=P-\mbox{\bf Proj}_T(P)$. } % end solution \prob{30.}{(Equation of a Plane)} Find the equation of the plane passing through the three points $P=(t,t^2,t^3)$, $Q=(t\sin(t),t\cos(t),t^2)$, $R=(\sin(t),\cos(t),t)$ where $t=0.886$. \SOL{30.}{ The equation of the plane passing through the three points $P=(t,t^2,t^3)$, $Q=(t\sin(t),t\cos(t),t^2)$, $R=(\sin(t),\cos(t),t)$ where $t=0.886$ is $$0.256x -0.246y -0.0476z = 0.$$ The tricky part of this problem is to observe that the origin is in the plane, therefore the cross product of $P$ and $R$ is a normal $N$ to the plane, whose equation must be $N\cdot X=0$. There are other ways to do the problem, giving equivalent answers. } \end{exercises} \NOTES{ {\tt Maple} produces varying answers containing powers $10^{-10}$, which for practical purposes are zero terms. A {\tt maple} example: \footnotesize\rm \begin{verbatim} with(linalg): # Plane equation given three points P, Q, R P:=vector([1,0,1]): Q:=vector([1,1,-2]): R:=vector([0,1,1]): dotprod(evalm(vector([x,y,z])-P),crossprod(evalm(Q-P),evalm(R-P)))=0; \end{verbatim} \normalsize\rm The idea of this example can be carried out for the present problem, however, beware of stuffing in the value of $t$ too early in the computation. } % END NOTES \begin{exercises} \probx{30a.}{(Intersection of Planes)} Find an algebraic description of the intersection of the two planes $3x-y+4z=3$ and $-4x-2y+7z=8$. Plot the two planes on one set of axes and show the intersection set. \SOL{30a.}{ The planes $3x-y+4z=3$ and $-4x-2y+7z=8$ intersect along a line whose direction is $T=(1,-37,-10)$. A point in the intersection set is $P=(0,-11,-2)$. The parametric form of the line of intersection is $X=P+tT$. One mystery is how to find $P$. Solve the mystery by solving for $x$, $y$, $z$ in the set of two equations in three unknowns. This is an RREF problem for the augmented matrix. Another mystery is how to find $T$. Solve this one with the cross product of the normals to the two planes. The cross product of the normals is a vector whose direction is the intersection of the planes. } % end solution \end{exercises} \NOTES{ A {\tt maple} example: \footnotesize\rm \begin{verbatim} with(linalg): # Find the equation of intersection for planes x+y-z=1 and x+2y-4z=2 # Solving simultaneously, y-3z=1, so x=0,z=0,y=1 is in both planes. n1:=vector([1,1,-1]): n2:=vector([1,2,-4]): # Normals to the planes v:=crossprod(n1,n2): vector([x,y,z]) = evalm(vector([0,1,0]) + t*v); # Line equation parametric form # [ x, y, z ] = [ - 2 t, 1 + 3 t, t ] \end{verbatim} \normalsize\rm A {\tt maple} plotting example. \footnotesize\rm \begin{verbatim} with(linalg): # Plot planes 3x-y+z=4 and 2x+y+3z=1 on same axes, show intersection plot3d({4-3*x+y,(1-3*x-y)/3},x=0..10,y=0..10); \end{verbatim} \normalsize\rm \medskip } % END NOTES \begin{exercises} \prob{31.}{(Angle between Planes)} Find the angle in degrees between the two planes $3x-y+4z=3$ and $-4x-2y+7z=8$. \SOL{31.}{ The angle in degrees between the two planes $3x-y+4z=3$ and $-4x-2y+7z=8$ is $64.9$. Use the angle between the normals to the two planes. The angle between two planes is not uniquely determined because the angle could be either obtuse or acute. It is for this reason that the angle between two planes is often taken to be the {\em acute angle between the normals}. } % end solution \end{exercises} \NOTES{ Some {\tt maple} examples: \footnotesize\rm \begin{verbatim} with(linalg): # Find the angle in radians between x+y-z=1 and x+2y-4z=2 n1:=vector([1,1,-1]): n2:=vector([1,2,-4]): # Normals to the planes theta:=evalf(arccos(dotprod(n1,n2)/(norm(n1,2)*norm(n2,2)))); # theta := .4908826781 \end{verbatim} \normalsize\rm } % END NOTES \begin{exercises} \prob{32.}{(Parametric form of a Line)} Use vector methods to find a parametric representation of the line that passes through the point $(3,4,7)$ and is parallel to the line of intersection of the planes defined by the equations $ x+2y+11z=0$ and $ 5x+11y+58z=0$. \SOL{32.}{ The parametric representation of the line that passes through the point $(3,4,7)$ and is parallel to the line of intersection of the planes defined by the equations $ x+2y+11z=0$ and $ 5x+11y+58z=0$ is $$ X=\colvector{3\\ 4 \\ 7} + (-t)\colvector{5\\ 3 \\ -1}. $$ } % end solution \prob{37.}{(Circular Helix Tangents and Normals)} Sketch the circular helix $f(t)=\colvectorC{\cos 2t\\ \sin 2t\\ t}$ for $0 \le t \le 4\pi$ and draw on the curve at $t=\pi/4$ the tangent and normal vectors to the curve. \SOL{37.}{ The circular helix $\dd f(t)=\colvectorC{\cos 2t\\ \sin 2t\\ t}$ for $0 \le t \le 4\pi$ is a space curve on the cylinder $x^2+y^2=1$. The $z$-component is $z=t$ which moves at speed 1. The curve wraps around the cylinder like a coil spring. The tangent vector points in the direction of the curve. The normal points to the center of the circle of curvature. } % end solution \prob{38.}{(Unit Tangent and Acceleration)} Compute the unit tangent vector and the (non-unit) acceleration vector at $t=1$ for the vector function $$f(t)=\colvectorC{t\\ t^2\\ t^3\\ \sin \pi t\\ \cos \pi t}.$$ \SOL{38.}{ The unit tangent vector and the (non-unit) acceleration vector at $t=1$ for the vector function $$f(t)=\colvectorC{t\\ t^2\\ t^3\\ \sin \pi t\\ \cos \pi t}$$ are obtained by calculations that parallel 3D and 2D methods: find vectors $f'(1)/|f'(1)|$ and $f''(1)$. The answers: $$\frac{f'(1)}{|f'(1)|}= \frac{1}{\sqrt{14+\pi^2}}\colvectorC{1\\ 2 \\ 3 \\ -\pi \\ 0}, ~~~~ f''(1)=\colvectorC{0\\ 2 \\ 6 \\ 0 \\ \pi^2}$$ For comparison, $\sqrt{14+\pi^2}=4.89$. } % end solution \end{exercises} \NOTES{ {\bf Maple notes on \LASTPROB}. Vector notation from maple's {\tt linalg} package is assumed. The position, velocity and acceleration vectors can be directly computed in symbolic form. Here is an example: \footnotesize\rm \begin{verbatim} with(linalg): f:= t -> vector([cos(t),sin(t),t^2]): v:= t -> map(diff,f(t),t): a:= t -> map(diff,f(t),t,t): f(t); # 2 # [ cos(t), sin(t), t ] v(t); # [ - sin(t), cos(t), 2 t ] a(t); # [ - cos(t), - sin(t), 2 ] \end{verbatim} \normalsize\rm } % END NOTES \begin{exercises} \prob{39.}{(Arc Length)} Compute the arc length on $[0,2]$ for $$f(t)=\colvectorC{2t^3/3\\ 1+t^{4.5}\\ 1-t^{4.5}}.$$ \SOL{39.}{ Arc length is computed from $\int_a^b |f'(t)|dt$. The answer: $$\int_0^2 \sqrt{4t^4+4.5^2t^7+4.5^2t^7}dt=32.55.$$ The secret seems to be factoring $t^4$ out of the square root and then making a change of variable in the integral. } % end solution \prob{40.}{(Curvature and Radius of Curvature)} Find the curvature and radius of curvature for $$f(t)=\colvectorC{3\sin t\\ 4\cos t}.$$ \SOL{40.}{ There are separate formulas for the curvature in space dimensions 2 and 3. This one uses the planar formula: $$\kappa=\frac{|x'y''-x''y'|}{|x'^2+y'^2|^{3/2}},~~ x=3\sin t,~~ y=4\cos t.$$ The answer: $$R=\frac{1}{\kappa} =\frac{1}{12}\left( 16- 7\cos^2(t) \right)^{3/2} =\frac{1}{12}\left(9+7\sin^2 t\right)^{3/2}.$$ } % end solution \probx{40a.}{(Circle of Curvature)} Sketch the circle of curvature for $y= \ln \cos x$ at $x=\pi/4$. \SOL{40a.}{ To sketch the circle of curvature it is necessary to compute its radius $R=1/\kappa$ and the center of the circle of curvature, whose location has the form $\vec{r}+R \vec{n}$ where $\vec{r}$ is position and $\vec{n}$ is the standard normal vector. The curve $y= \ln \cos x$ at $x=\pi/4$ has curvature $\kappa=1/\sqrt2$, obtained by the special 2D formula. Also, $\vec{r}=(\pi/4,-\log 2/2)$ and $\vec{n}=(-1/\sqrt2, -1/\sqrt2)$. The center of the circle of curvature is $$C=\left( \frac{\pi}{4} - 1,~ -\frac{1}{2} \log 2 - 1 \right).$$ Use {\tt maple} to produce the curve plot and then sketch the circle of curvature by hand. } % end solution \probx{40b.}{(Curvature and Torsion)} For the space curve $\dd f(t)=\colvectorC{a\cos t\\ a\sin t\\ bt}$, where $a>0$, calculate the the curvature $\kappa$ and the torsion $\tau$. \SOL{40b.}{ For the space curve $\dd f(t)=\colvectorC{a\cos t\\ a\sin t\\ bt}$, where $a>0$, calculate the the curvature $\kappa$ and the torsion $\tau$. Some calculations: $$ T=\frac{1}{\sqrt{a^2+b^2}}\colvectorC{-a\sin t\\ a\cos t \\ b}, ~~~ N=\colvectorC{-\cos t\\ -\sin t \\ 0}, ~~~ f'(t)\times f''(t) = \colvectorC{ab\sin t \\ -ab\cos t \\ a^2}. $$ The curvature is $\kappa=a/(a^2+b^2)$ and the torsion is $\tau=b/(a^2+b^2)$. } % end solution \end{exercises} \NOTES{ {\bf Maple notes on \LASTPROB}. The plotting of space curves in {\tt maple} will be illustrated in the example below. The problem is to plot a circular helix space curve. \footnotesize\rm \begin{verbatim} with(plots): spacecurve([cos(t),sin(t),t],t=0..4*Pi); \end{verbatim} \normalsize\rm \medskip } % END NOTES \begin{exercises} \prob{41.}{(Unit Normal and Unit Binormal)} Find the unit normal and unit binormal at $t=0$ for the vector function $$f(t)=\colvectorC{1\\ t\\ t^2}.$$ \SOL{41.}{ The unit normal $N$ and unit binormal $B$ at $t=0$ for the vector function $$f(t)=\colvectorC{1\\ t\\ t^2}$$ are $$N=\colvector{0\\ 0\\ 1}, ~~~ B=\colvector{1\\ 0 \\ 0}.$$ The unit normal is calculated by differentiation of the formula for the unit tangent followed by division to make it a unit vector. Don't try to apply general formulas: it is the process you are trying to learn; this is not a memorization project! } % end solution \probx{41a.}{(Binormal)} Given $a>0$, calculate the unit binormal vector $B$ for the space curve $f(t)=\colvectorC{a\cos t\\ a\sin t\\ bt}$. \SOL{41a.}{ The calculations were done above in connection with the curvature and torsion for this curve. The unit binormal vector $B$ is $$B=\frac{1}{\sqrt{a^2+b^2}}\colvectorC{b\sin t\\ -b\cos t \\ a}.$$ } % end solution \prob{42.}{(Kinematics)} Compute the velocity, speed, acceleration at $t=\pi/4$ for the space curve $$f(t)=\colvectorC{\cos t\\ \sin t\\ t^5}.$$ \SOL{42.}{ The speed, velocity and acceleration at $t=\pi/4$ for the space curve $$f(t)=\colvectorC{\cos t\\ \sin t\\ t^5}$$ are $$ v=2.15,~~~ {\bf v}= \colvectorC{-1/\sqrt2 \\ 1/\sqrt2 \\ 1.9}, ~~~ {\bf a} = \colvectorC{-1/\sqrt2 \\ -1/\sqrt2 \\ 9.69}. $$ } % end solution \probx{42a.}{(Kinematics)} An airplane in level flight at 600mph releases a bomb at 30000 feet. At what speed and elapsed time does the bomb hit the ground? \SOL{42a.}{ The speed of the airplane does affect the answer. The time is $43.2$ seconds, the speed of impact is $1389.4$ feet per second. } % end solution \probx{42b.}{(Kinematics)} For the plane trajectory $$ f(t)=\colvectorC{A(t\cos t - \sin t) \\ A(t\sin t + \cos t)}, ~~~ A>0,$$ calculate the velocity vector ${\bf \vec{v}}$, the speed $v$, and the acceleration vector ${\bf \vec{a}}$. \SOL{42b.}{ For the plane trajectory $$ f(t)=\colvectorC{A(t\cos t - \sin t) \\ A(t\sin t + \cos t)}, ~~~ A>0,$$ the velocity is and acceleration are $$ {\bf \vec{v}}=f'(t)=A\colvectorC{-t\sin t \\ t\cos t}, ~~~ {\bf \vec{a}}=f''(t)=A\colvectorC{-\sin t-t\cos t \\ \cos t -t\sin t}. $$ The speed is $v=|f'(t)|=At$. } % end solution \prob{43.}{(Tangential and Normal Components)} For the plane trajectory $$ f(t)=\colvectorC{A(t\cos t - \sin t) \\ A(t\sin t + \cos t)}, ~~~ A>0,$$ calculate the tangential component of the acceleration $a_T$ and the normal component of the acceleration $a_N$. \SOL{43.}{ The previous problem did most of the computations. The tangential component is $a_T=T\cdot {\bf a}$. The normal component $a_N$ is obtained from ${\bf a}= a_T T + a_N N$ by solving for $a_N$ in the identity $|{\bf a}|^2=a_T^2+a_N^2$. The answers: $a_T=A$, $a_N=A|t|$. } % end solution \prob{33.}{(Level Curve Sketch} Sketch the level curves of $z=2x^2+3y^2$ for $z=1$, $2$, $4$, $10$. \SOL{33.}{ The project is to sketch the ellipses $2x^2+3y^2=1$, $2x^2+3y^2=2$, $2x^2+3y^2=4$, $2x^2+3y^2=10$. All have center at the origin and they are concentric. This is best done by hand. } % end solution \probx{33a.}{(Sketch Curves of Constant Voltage)} Sketch the curves of constant voltage $v$ for $v=1$, $0.75$, $0.5$, $0.25$, $0.125$: $$\displaystyle v=\sqrt{1-4x^2-9y^2}.$$ \SOL{33a.}{ The curves of constant voltage $v$ are the concentric ellipses $4x^2+9y^2=1-1$, $4x^2+9y^2=1-0.75^2$, $4x^2+9y^2=1-0.5^2$, $4x^2+9y^2=1-0.25^2$, $4x^2+9y^2=1-0.125^2$. All have center at the origin. Best done by hand. } % end solution \probx{33b.}{(Domain and Level Curves)} Find the domain and range of $f(x,y,z)=xy/z$ and plot the level surface $f(x,y,z)=1$. \SOL{33b.}{ The domain of $f(x,y,z)=xy/z$ is all $(x,y,z)$ with $z\ne 0$. The range is all of $\RR^1$. A plot of the level surface $f(x,y,z)=1$ is the surface plot $z=xy$, which is done in {\tt maple} with the command {\tt plot3d(x*y,x=-10..10,y=-10..10)}. General surfaces are plotted in {\tt maple} by similar methods using the command {\tt plot3d}. Here is an example: \footnotesize\rm {\tt plot3d(sin(x*y),x=-Pi..Pi,y=-Pi..Pi,style=PATCH);} \normalsize\rm A distinguishing feature of this example is that $z$ does not appear as a variable in the plot command! It is a common mistake to write the mathematical equation into the {\tt maple} plot command. This error results in the plot command dealing with variables $x$, $y$, $z$: no plot structure is generated and no plot appears. No error messages are emitted, however, so you can't tell there is a mistake! The idea of using the variable $z$ is as follows: \footnotesize\rm {\tt z:= sin(x*y):}\newline {\tt plot3d(z,x=-Pi..Pi,y=-Pi..Pi,style=PATCH);} \normalsize\rm The unix mouse and dialog box interface allows printing the figure to a postscript file. The postscript file is sent to the laser printer with the command {\tt print -p plotoutput} (in a terminal window). For most purposes, a screen print or hand copy will suffice. } % end solution \prob{34.}{(Computer Plot of a Surface)} Plot by computer $\displaystyle z=(2x^2+3y^2)e^{1-x^2-y^2}$. Part of the problem is to determine the optimum plot domain. \SOL{34.}{ A {\tt maple} computer plot is done with } % \NOTES{ \begin{center} \verb|plot3d((2*x^2+3*y^2)*exp(1-x^2-y^2),x=a..b,y=c..d)| \end{center} where $a$, $b$, $c$, $d$ are adjusted to make the plot domain correct. The idea behind choosing the values is the observation that $e^x$ is zero for plotting purposes when $x$ is very negative. } % END NOTES \prob{35.}{(Gradient Field Magnitude)} Compute the gradient field of $z=y\tan(y-x)$ and determine its magnitude along the line $y=x$. \SOL{35.}{ The magnitude of the gradient field is the vector norm of the gradient vector. Along $y=x$ the vector simplifies and effectively depends only upon $x$. The gradient field of $z=y\tan(y-x)$ is $$ \grad(z) = \colvectorC{ \frac{\partial z}{\partial x} \\ \frac{\partial z}{\partial y} } = \colvectorC{ -y\sec^2(y-x) \\ \tan(y-x)+y\sec^2(y-x) } . $$ Its value along the line $y=x$ is $$ \grad(z) = \colvectorC{ -y\sec^2(0) \\ \tan(0)+y\sec^2(0) } . $$ Since $\sec(0)=1$ and $\tan(0)=0$, the above can be simplified to give magnitude $|y|\sqrt2$ (or $|x|\sqrt2$). } % end solution \prob{36.}{(Limits of Vector Functions)} Compute $$\displaystyle \lim_{t\to 1} f(t), ~~~ \lim_{t\to 1} g(t)$$ for the vector functions $$f(t)=\colvectorC{\frac{\sin (t-1)}{t-1}\\ e^{1-t}}, ~~ g(t)=\colvectorC{\tan(\frac{1}{t})\\t\\ t^2\\ t^3}.$$ \SOL{36.}{ Limits of vector quantities are taken by component: each 3D vector limit problem is in disguise three normal limit problems from calculus. The answers: $$\displaystyle \lim_{t\to 1} f(t)=\colvector{1\\ 1}, ~~~ \lim_{t\to 1} g(t) = \colvectorC{\tan 1\\ 1 \\ 1 \\ 1}.$$ } % end solution \prob{44.}{(Directional Derivative)} Compute the directional derivative of $f(x,y,z)=xe^y - ye^z + xyz$ at $(0,0,1)$ in direction $\colvector{1\\ 1\\-3}$. \SOL{44.}{ The directional derivative is $\dd \frac{1-e}{\sqrt{11}}$. } % end solution \probx{44a.}{(Continuity of Partials)} Let $\displaystyle f(x,y) = (x^2+y^2)\sin(1/\sqrt{x^2+y^2})$ except at the origin, where $f(0,0)=0$. Show that $f$ is differentiable but the partials are not continuous at $x=0$, $y=0$. \SOL{44a.}{ This ``proof'' is constructed by computing the partial derivatives of $f(x,y) = (x^2+y^2)\sin(1/\sqrt{x^2+y^2})$ at all $x$, $y$, and then in particular at $x=y=0$. All calculations are successful, but continuity fails at $x=y=0$. Some steps: $$ f_x(x,y) = 2\,x\sin({\frac {1}{\sqrt {{x}^{2}+{y} ^{2}}}})-{\frac {\cos({\frac {1}{ \sqrt {{x}^{2}+{y}^{2}}}})x}{\sqrt {{x }^{2}+{y}^{2}}}}, ~~~~ f_y(x,y)= 2\,y\sin({\frac {1}{\sqrt {{x}^{2}+{y} ^{2}}}})-{\frac {\cos({\frac {1}{ \sqrt {{x}^{2}+{y}^{2}}}})y}{\sqrt {{x }^{2}+{y}^{2}}}} . $$ The derivatives at $x=y=0$ are found as follows: $$ \lim_{h\to 0} \frac{f(h,0)}{h} = \lim_{h\to 0} h\sin(\frac {1}{|h|}) = 0, ~~~ \lim_{k\to 0} \frac{f(0,k)}{k} = \lim_{k\to 0} k\sin(\frac {1}{|k|}) = 0. $$ The lack of continuity is verified by taking the limit along different paths approaching the origin. } % end solution \prob{45.}{(Direction of Maximum Temperature Increase)} The temperature $T=50-x^2-y^2$ on a plate increases most rapidly in some direction $\dd\colvectorC{a\\ b}$, at the point $x=1$, $y=0$. Find the direction. \SOL{45.}{ The gradient of $T$ points in the direction of maximum increase of $T$. The answer: $\dd \colvector{-2 \\ 0}$. } % end solution \probx{45a.}{(Maximum Temperature)} Find the hottest point on or inside a solid with shape $x^2+y^2+z^2 \le 1$ having temperature distribution $T=1000\exp(-x^2-y^2-z^2-10)$. \SOL{45a.}{ The hottest point is at $x=y=z=0$, because $T$ is smaller at other points, due to the form of the exponential term. } % end solution \prob{46.}{(Partials)} Let $$\begin{array}{lcl} w &=& x^2-y^2, \\ x &=& u^2-v^2, \\ y &=& 2uv. \end{array} $$ Use the chain rule to calculate $\partial w / \partial u$ and $\partial w / \partial v$. Check your answers by writing $w$ as a function of $u$ and $v$. \SOL{46.}{ The answers: $4u^3-12uv^2$ and $4v^3 - 12vu^2$. } \end{exercises} \NOTES{ {\bf Maple notes on \LASTPROB}. Partial derivatives and gradients are computed with {\tt maple} as in the example below. A gradient is computed at a given point. The symbolic value of the gradient below is {\tt z(s,t)}, which is too complicated to print here. \footnotesize\rm \begin{verbatim} with(linalg): w:=x^2*y^2 - x*y; x:=u^2+v^2; y:=u-v; z:= (s,t) -> subs({u=s,v=t},vector([diff(w,u),diff(w,v)])); # # d d # z := (s,t) -> subs({u = s, v = t}, vector([---- w, ---- w])) # du dv z(1,0); # [3, -1] z(2,3); # [-243] \end{verbatim} \normalsize\rm \medskip } % END NOTES \begin{exercises} \prob{47.}{(Tangent Plane and Normal Line)} Find the tangent plane equation and the normal line equations for $x^2-y^2+z^2=1$ at $x=1$, $y=2$, $z=2$. \SOL{47.}{ Find the tangent plane equation and the normal line equations The surface $x^2-y^2+z^2=1$ is a level surface of $F=x^2-y^2+z^2-1$, therefore $\grad(F)$ is a normal to the surface. At $x=1$, $y=2$, $z=2$, $\grad(F)=\colvector{2\\ -4 \\ 4}$. The tangent plane equation is $2(x-1)-4(y-2)+4(z-2)=0$. The normal line is given by $$X=\colvector{1 \\ 2 \\ 2} + t\colvector{2 \\ -4 \\ 4}.$$ } % end solution \probx{47a.}{(Tangent Plane and Normal Line)} Find the tangent plane equation and the normal line equations for $z=\arctan(y/x)$ at $x= -4$, $y=4$. \SOL{47a.}{ The normal line equation is given by the {\bf point} of application and the {\bf direction} of the normal to the tangent plane, i.e., the gradient of the surface at the given point. The line equation has three possible forms. When not specified, choose the vector form $\vec{r} = \vec{P} + t\vec{v}$. The plane equation can be given in several forms. The preferred form is $$a(x-x_0)+b(y-y_0)+c(z-z_0)=0$$ where $(x,y,z)$ is any point in the plane, $(x_0,y_0,z_0)$ is a fixed point in the plane and the normal to the plane is $(a,b,c)$. The answers: $$ X=\colvector{-4 \\ 4 \\ -\pi/4} + (-t)\colvector{ 1/8 \\ 1/8 \\ 1}, ~~~ \frac{1}{8}(x +4) + \frac{1}{8}(y-4) + (z+\frac{\pi}{4})=0.$$ } % end solution \probx{47b.}{(Tangent Plane and Symmetric Normal Line Equation)} For the surface defined by the equation $ 2x-\sin(xyz)=2 $ find an equation for the tangent plane, and symmetric equations for the normal line, at the point $(1, \pi ,1)$. %%% %%% End of Exam II in old format. %%% \SOL{47b.}{ The symmetric form of a line is $\frac{x-x_0}{a} = \frac{y-y_0}{b} = \frac{z-z_0}{c}$ where $\vec{v}=(a,b,c)$ is the line direction and $\vec{P}=(x_0,y_0,z_0)$ is the given point of application. The answers: $$ \frac{x-1}{2+\pi} = \frac{y-\pi}{1} = \frac{z-1}{\pi}, ~~~ (2+\pi)(x -1) + (1)(y-\pi) + \pi(z-1)=0. $$ } % end solution \prob{48.}{(Line Integral)} Evaluate $\displaystyle \int_C F({\bf x})\cdot d{\bf x}$ where ${\bf x}=(x_1,x_2)$, $F({\bf x})=\colvectorC{x_1^2-4\\ x_2^2-x_1}$ and $C$ is the upper half of the ellipse with center at the origin and semiaxes $a=4$, $b=2$. \SOL{48.}{ The real work in line integrals is to write the curve in parametric form. This task usually dominates the solution time. Each line integral is normally one or more ordinary calculus integrals. An ellipse with semiaxes $a$ and $b$ and center $(0,0)$ can be written as $x^2/a^2 + y^2/b^2 = 1$ which in parametric form is $x=a\cos(t)$, $y=b\sin(t)$. The upper half uses $0 \le t \le \pi$ while the lower half uses $\pi \le t \le 2\pi$. Use notation ${\bf x}=(x,y)$. The ellipse can be parameterized by the equations $x=4\cos t$, $y=2\sin t$, $0\le t \le \pi$. Then $$F({\bf x})d{\bf x} = -64\cos^2 t \sin t + 16\sin t + 8\sin^2 t \cos t - 8\cos^2 t.$$ Integrating over $[0,\pi]$ gives the answer $\dd -\frac{32}{3} - 4\pi$. This is the correct answer for parameterization counterclockwise. For the wrong parameterization (clockwise), the answer is the negative of this one. } % end solution \end{exercises} \NOTES{ {\bf Maple notes on \LASTPROB}. Line integrals are computed in {\tt maple} by reduction to an ordinary integral with limits. Here is an example of a clever syntax for handling the problem of line integrals: {\bf Example}: Evaluate $\oint_C Mdx+Ndy$ where $M=xy$, $N=2x^2y$ and $C$ is the curve $x=t^2$, $y=2t$ from $t=0$ to $t=2$. \footnotesize\rm \begin{verbatim} M:=x*y: N:=2*y*(x^2): x:=t^2: y:=2*t: a:=0: b:=2: dx:=diff(x,t): dy:=diff(y,t): f:=M*dx+N*dy: int(f,t=a..b); # answer: 1664/16 \end{verbatim} \normalsize\rm } % END NOTES \begin{exercises} \probx{48a.}{(Line Integrals)} Calculate the work done by the force $F=\colvectorC{2xy\\ 2y}$ when a particle moves counterclockwise around the triangle with vertices $(0,0)$, $(0,1)$ and $(1,0)$. \SOL{48a.}{ A triangle can be parameterized by {\bf the method of segments}. This is because the triangle is the union of three line segments. Each line segment has vertices $P$ and $Q$ with a direction taken from $P$ to $Q$. The parameterization method is to write any $(x,y)$ on the segment as $(x,y)=P + t(Q-P)$, $0 \le t \le 1$. This method applies easily to polygonal figures and has the advantage that ordinary integrals arising in the problem always have limits of integration $t=0$ to $t=1$. The curve $C=C_1+C_2+C_3$, where $C_1$, $C_2$, $C_3$ are the line segments making up the edges of the triangle. The first one is $x=t$, $y=0$, $0\le t \le 1$. Then $$F({\bf x})d{\bf x} = \colvector{2t(0) \\ 2(0)}\cdot \colvector{dt \\ 0} = 0, ~~~ \int_0^1 F({\bf x})d{\bf x} =0.$$ Similarly, the other two integrals are $2/3$ and $-1$. So the total answer for the work is $-1/3$. A general method emerges for parameterizing a line segment with endpoints $P$ (the tail) and $Q$ (the head). The formula is $X=P + t(Q-P)$, $0 \le \le 1$. It was applied here to each of the three line segments making up the edge of the triangle. This method is called the {\bf head minus tail rule}. } % end solution \prob{49.}{(Line Integral)} Consider the line integral $$ \int_C (3x^2e^y\,dx + x^3e^y\,dy), $$ where $C$ is the broken line curve from $(0,0)$ to $(1,0)$ to $(1,1)$. Evaluate it directly from the definition. \SOL{49.}{ We are to compute directly the line integral $ \int_C (3x^2e^y\,dx + x^3e^y\,dy) $. If $C$ is the broken line curve from $(0,0)$ to $(1,0)$ to $(1,1)$, then $C$ consists of two line segments $C_1$ and $C_2$, which can be parameterized by the {\bf head minus tail rule}. On the first one, $\colvector{x \\ y}=\colvector{0 \\ 0} + t\colvector{1 \\ 0}$, so $x=t$, $y=0$, $0 \le t \le 1$. Then $$F({\bf x})d{\bf x} = \colvector{3t^2 \\ t^3}\cdot \colvector{dt \\ 0} = 3t^2dt, ~~~ \int_0^1 F({\bf x})d{\bf x} =1.$$ The other one is $e-1$, so the total is $e$, the value of the line integral. } % end solution \prob{50.}{(Conservative Field)} Given $$F=\colvectorC{\tan^2 x - y/(x^2+y^2)\\ x/(x^2+y^2) - e^y},$$ show that $\nabla f = F$ has solution $$f=\tan x - x +\arctan(y/x) - e^y$$ and determine all possible solutions $f$. \SOL{50.}{ Given $F=(\tan^2 x - y/(x^2+y^2), x/(x^2+y^2) - e^y)$, show that $\nabla f = F$ has solution $f=\tan x - x +\arctan(y/x) - e^y$ and determine all possible solutions $f$. The first part of the solution verifies the formula $\nabla f = F$. It is done by expanding the left side: $$\nabla f = \colvector{f_x \\ f_y} =\colvector{\sec^2 x - 1 - (y/x^2)/(1+(y/x)^2) \\ (1/x)/(1+(y/x)^2) - e^y }.$$ After clearing fractions, it is clear that $\nabla f = F$. The second part of the solution show that {\em any} solution of $\nabla g = F$ must satisfy $g=f+c$ for some constant $c$. The idea is to write the vector equation $\nabla g = F$ as two equations $$g_x = \tan^2 x - y/(x^2+y^2), ~~~ g_y=x/(x^2+y^2) - e^y,$$ and then integrate them to get the relation $g=f+c$. Integrating the first on $x$ gives $g(x,y)=g(0,y)+\tan x -x - \arctan(x/y)$. Substitution into the second equation $g_y=x/(x^2+y^2) - e^y$ gives $$x/(x^2+y^2) - e^y = g_y(0,y) + (x/y^2)/(1+(x/y)^2).$$ Then $-e^y=g_y(0,y)$, so $g(0,y)=-e^y+C$ for some constant $C$. The identity $\arctan(u)+\arctan(1/u)=\pi/2$ can be proved by arguing that $\arctan(u) + \arctan(1/u)$ has derivative zero. The constant value can be evaluated as $\pi/2$ by letting $u$ approach infinity. In conclusion, $$g(x,y)=g(0,y)+\tan x -x - \arctan(x/y) = -e^y+\tan x -x +\arctan(y/x)+\frac{\pi}{2}+C,$$ in short, $g=f+c$ for some constant $c$. } % end solution \prob{51.}{(Conservative Fields and Line Integrals)} Let $F=\colvectorC{x+y^2\\ 2xy}$. Calculate $\int_C F({\bf x})\cdot d{\bf x}$ where $C$ is a smooth curve from $(1,4)$ to $(3,7)$. \SOL{51.}{ Let $F=\colvectorC{x+y^2\\ 2xy}$. The equation $\nabla f = F$ can be solved for $f$: $$f=\frac{1}{2} x^2 + xy^2 + c.$$ This is done by writing down the two equations $$f_x = x+y^2, ~~~ f_y = 2xy,$$ and then solving these equations for $f$, as done above in another example. According to the theory of conservative fields, $$\int_C F({\bf x})\cdot d{\bf x}=f(3,7)-f(1,4)=135.$$ The equation $\nabla \int_C F\cdot dx = F$ is an obvious generalization of the calculus theorem $$(d/dt)\int_0^t f(x)dx = f(t).$$ Unfortunately, the 2D result is sometimes false. It is true if the field $F$ is conservative. } % end solution \probx{51a.}{(Conservative Field and Line Integrals)} Consider the line integral $ \int_C (3x^2e^y\,dx + x^3e^y\,dy) $ where $C$ is the broken line curve from $(0,0)$ to $(1,0)$ to $(1,1)$. Show that $\vec{F}=\colvectorC{3x^2e^y\\ x^3e^y}$ is $\nabla f $ for some function $f$ and then apply the theory of conservative fields to evaluate the integral. \SOL{51a.}{ The equation $$\vec{F}=\colvectorC{3x^2e^y\\ x^3e^y}=\nabla f $$ can be solved for the function $f=x^3e^y+c$ using methods outlined in earlier examples. Then the theory of conservative fields applies to evaluate the integral: $$ \int_C (3x^2e^y\,dx + x^3e^y\,dy)=f(1,1)-f(0,0)=e.$$ It should be observed that the broken line path has nothing to do with the answer, only the endpoints are significant. } % end solution \prob{52.}{(Double Integrals)} Evaluate $$\displaystyle \int_0^1\int_{\sqrt{y}}^1 \sqrt{3-x^3} dx dy.$$ \SOL{52.}{ The inner integral in $$\displaystyle \int_0^1\int_{\sqrt{y}}^1 \sqrt{3-x^3} dx dy$$ is an {\em elliptic integral} and has no indefinite integral. The iterated integral theorem, sometimes called Fubini's Theorem, is used to switch the order of integration and make it possible to evaluate the double integral directly. View the given integral as one side of the iterated integral theorem formula. Use the other side of the formula to do the actual evaluation. Due to the iterated integral theorem, $$\displaystyle \int_0^1\int_{\sqrt{y}}^1 \sqrt{3-x^3} dx dy$$ can be rewritten in the form $$\displaystyle \int_a^b\int_{c(y)}^{d(y)} \sqrt{3-x^3} dy dx$$ and evaluated exactly by elementary methods. The exact answer is $\dd \frac{2}{9}\left(\sqrt{27} - \sqrt{8}\right)=0.526$. } % end solution \end{exercises} \NOTES{ {\bf Maple notes on double integrals}. Double integrals are computed in {\tt maple} by application of the iterated integral theorem. The results are often numeric, as shown by the following example: {\bf Example}: Evaluate $\displaystyle \int_0^1\int_{\sqrt{y}}^1 \sqrt{3-x^3} dx dy$ numerically. \footnotesize\rm \begin{verbatim} int(sqrt(3-x^3),x=sqrt(y)..1): int(",y=0..1): evalf(",3); # answer: 0.524 [yes, maple makes a small error.] # [Double integrals can be tricky!] \end{verbatim} \normalsize\rm Using {\tt maple} to compute double integrals has been criticized by students. They say that the trouble with double integrals is to determine which iterated form to use. The evaluation of the integrals is usually the easy part. The difficulty for everyone is in deciding upon the limits of integration in the two possible ways to present the iterated integral. } % END NOTES \begin{exercises} \probx{52a.}{(Volume by the Double Integral)} Find the volume of the solid bounded by the plane $x+y+2z=4$ and the three coordinate planes $x=0$, $y=0$, $z=0$. \SOL{52a.}{ Apply the theory of volumes to conclude that the answer is the integral of $z$ over some domain $D$ in the $xy$--plane. The problem is to find a description of $D$. Apply the iterated integral theorem to $D$ in order to evaluate the double integral as follows: $$\mbox{Vol}=\int\int_D z dxdy = \int_0^4 \int_0^{4-y}\left(2-\frac{x}{2} -\frac{y}{2}\right)dxdy=\frac{16}{3}.$$ } % end solution \prob{53.}{(Green's Theorem)} Find $$\displaystyle\int_{\partial\Omega} e^x\cos y dx + e^x \sin y dy$$ over the region $\Omega$ enclosed by the triangle with vertices $(0,0)$, $(0,4)$, $(4,0)$. \SOL{53.}{ By Green's theorem, $$\int_{\partial\Omega} e^x\cos y dx + e^x \sin y dy= \int\int_\Omega 2e^x\sin y dx dy.$$ The region $\Omega$ enclosed by the triangle with vertices $(0,0)$, $(0,4)$, $(4,0)$ can be described as $0\le y \le 4$, $0 \le x \le 4-y$. By the iterated integral theorem, $$\int\int_\Omega 2e^x\sin y dx dy=2\int_0^4\left( \int_0^{4-y} e^xdx\right)\sin y dy = \cos 4 -\sin 4 + e^4 -2.$$ This last integral is unpleasant to do, even with integral tables, because it reduces to $\int e^{-y}\sin y dy$. {\tt Maple} is somewhat easier than integral table methods. } % end solution \prob{54.}{(Green's Theorem)} Use Green's theorem to calculate the area of the quadrilateral with vertices $(0,0)$, $(1,0)$, $(4,4)$, $(4,7)$. \SOL{54.}{ The {\em consecutive vertices} are $(0,0)$, $(1,0)$, $(4,4)$, $(4,7)$. A figure is helpful in seeing that $\Omega$ obtained by removing a smaller right triangle from a larger right triangle. So the area is $8$, by geometry. Doing it with Green's theorem uses the area formula $\dd \int_{\partial \Omega} (-y)dx$. The boundary curve consists of four line segments, $C_1$ to $C_4$. The values of the line integral over these four curves are $0$, $-6$, $0$, $14$. The sum of the answers is again $8$. } % end solution \prob{55.}{(Green's Theorem)} Use Green's Theorem to evaluate the line integral $$ \oint_{\partial \Omega} (\cos x\sin y - xy)\,dx + \sin x\cos y\,dy $$ where $\Omega$ is the half-disk defined by $$ \Omega=\{ (x,y) : x^2+y^2 \le 1\quad \mbox{and}\quad x \ge 0 \}. $$ \SOL{55.}{ By Green's Theorem, $$ \oint_{\partial \Omega} (\cos x\sin y - xy)\,dx + \sin x\cos y\,dy = \int\int_\Omega x dxdy. $$ The half-disk $ \Omega$ can be written as $-1\le y \le 1$, $0 \le x \le \sqrt{1-y^2}$. Then the iterated integral theorem for double integrals gives the final answer $$\int\int_\Omega x dxdy=\int_{-1}^1 \int_0^{\sqrt{1-y^2}} xdx dy = \int_{-1}^1 \frac{1-y^2}{2}dy= \frac{2}{3}.$$ } % end solution \prob{56.}{(Green's Theorem)} Let $F=\colvectorC{y^3\\ x^3}$. Compute $\mbox{\rm curl}\, F$ and $\oint_{\partial \Omega} F\cdot T ds$. The set $\Omega$ is the disk $x^2+y^2 \le 1$, $T$ is the unit tangent on $\partial \Omega$ and $ds$ is the arc length increment. \SOL{56.}{ The answers: $\mbox{\rm curl}(F)=\colvectorC{0\\ 0\\ 3x^2-3y^2}$, $x=\cos t$, $y=\sin t$, $0\le t\le 2\pi$, $T=\colvectorC{-\sin t\\ \cos t}$, $ds=\sqrt{x'^2+y'^2}dt=1dt$, $$F\cdot Tds=F\cdot d{\bf x}=(-\sin^4 t + \cos^4 t)dt = \cos 2t dt,$$ $$\mbox{\rm curl}(F)\cdot \vec{k} dxdy = (3x^2-3y^2)dxdy.$$ The evaluation of the line integral gives $$\int_0^{2\pi} F\cdot Tds = \int_0^{2\pi} \cos 2t dt = 0.$$ The double integral of $\mbox{\rm curl}(F)\cdot \vec{k} dxdy $ over the disk can be done using polar coordinates, giving the new integral $$\int_0^{2\pi} \int_0^1 (3r^2\cos^2 \theta - 3r^2\sin^2\theta)rdrd\theta=\int_0^{2\pi} \cos 2\theta d\theta = 0.$$ } % end solution \probx{56a.}{(Green's Theorem)} Let $F=\colvectorC{y^3\\ x^3}$. Compute $\mbox{\rm div}\, F$ and $\oint_{\partial \Omega} F\cdot N ds$. The set $\Omega$ is the disk $x^2+y^2 \le 1$, $N$ is the unit outer normal on $\partial \Omega$ and $ds$ is the arc length increment. \SOL{56a.}{ The answers: $\mbox{\rm div}\, F=0$, $N=\colvector{x\\ y}$, $x=\cos t$, $y=\sin t$, $0\le t \le 2\pi$, $ds=dt$, $$F\cdot Nds=(xy^3+yx^3)ds=(\cos t \sin^3 t + \sin t \cos^3 t)dt,$$ $$\int_{\partial\Omega} F \cdot Nds = \int_0^{2\pi} \sin t \cos t dt=0,$$ $$\int\int_{\Omega} \mbox{\rm div}\, F \, dxdy = \int\int_{\Omega} 0dxdy = 0.$$ } % end solution \probx{56b.}{(Green's Theorem)} Prove that the flow is irrotational: $$F=(xe^{\tan x}, y\tan( e^y))$$ \SOL{56b.}{ The flow $F$ is irrotational if its curl is zero. $$\mbox{\rm curl}(F)= \left| \begin{array}{ccc} \vec{i} & \vec{j} & \vec{k} \\ \partial_x & \partial_y & \partial_z \\ xe^{\tan x} & y\tan( e^y) & 0 \end{array} \right| = 0.$$ } % end solution %% LEFTOFF \end{exercises} \ifsolutions\else\end{multicols}\fi \NOTES{ \begin{multicols}{2} \small\sf \noindent {\large\bf Instructions for Math 252, Applied Linear Algebra}. The list of \arabic{probl}{} problems is required for 100\% credit. A solution must be attempted on each of the problems. {\bf Working Together}. A group may consult with other groups to check on answers or methods. A group is to submit one solution, not multiple solutions. Group size is 1 to 3 persons. {\bf Handwritten Solutions}. All problems are to be solved at a desk with pencil and paper. Use maple as an {\em assist} when appropriate to check steps or to do details that are subject to human error (Less than 5\% of the course has anything to do with maple). In particular, maple is not to blame for wrong answers, you are to blame. Ditto for hand calculators and other assists to computation or graphing. {\bf Partial Credit}. Correct answers are worth less than 2\%, because the correct answers are published. Grading is based upon what is written on the paper, the steps shown and the logic exhibited, and references to the textbook and other notable references, such as a physics or engineering monograph. You will be taught by example what is expected in a solution. Generally, the textbook is too brief, and it is a bad example of how to write up a solution. References by theorem name and page are important. Sentences that explain mathematical steps are essential to written solutions. {\bf Format}. Submit solutions in one-sided format, stapled in the upper left corner. A problem starts on a new page, with the statment first. If you have no solution, then submit a blank page with the problem statement. Please observe the usual rules for solution: state the problem, solve it, and give all details, especially page references to the textbook. {\bf Bradley Gygi's format}. His personal method assigns 60\% of the page width on the left for the solution and the 40\% on the right is filled in later, with English sentences, page references, names of methods, theorems, formulas in the book, formulas from trig, college algebra and calculus. He leaves adequate space between lines and never, never writes irrelevant equations on the left 60\%. Figures are small but detailed. Actually, Bradley wrote the problem in BLACK, the solution in BLUE and the comments on the right in RED. The color coding can be a help during review. {\bf Extra Credit}. The supplementary problems may be used to increase your total score, up to 100\%. For example, supplementary problem 1a may be appended to take-home exam I in order to increase credit on problem 1. However, credit earned on one problem does not apply to other problems, that is, you can't use supplementary problem 1a to get credit on problem 2. If a problem has no supplemental parts, then no option exists. Supplementary problems may not be interleaved, they are in an appendix by themselves, at the end of the exam. They will be considered only in case the requested problem has less than full credit. For example, suppose problem 1 has credit 95\%, but problem 1a appears in the appendix and has credit 97\%. Then the credit on problem 1 will be 97\%, the better score of the two. Groups have time to submit more supplementary problems and therefore there is some advantage to working in a group. {\bf Graphs}. Maple graphs are expected to be presented in compressed format, about $2\times 2$ inches, preferably pasted-up onto your engineering paper, or traced. Use a screen print to get the size right (see the notes below) or else trace the graph from the screen or draw a likeness by hand. {\bf Maple and Calculator Results}. Maple session output is for you, not for the report. Keep the report handwritten and write a note that you checked it with maple or derived the result with maple commands. Pencil in the essence of the maple idea, not a log script. The same remarks apply to graphing calculators. \begin{center} \SCHEDULE \end{center} \end{multicols} \eject \begin{multicols}{2} \normalsize\rm \begin{center} {\Large\bf Notes on\\ World Wide Web \\ Documents} \end{center} The two most popular WEB readers on local unix systems are {\bf lynx} and {\bf xmosaic}. The first, {\tt lynx}, is {\tt VT-100} terminal based and it works well from a modem or xterm window. It has limited graphics. The NCSA product called {\tt xmosaic} is made for X-windows. There are versions for the PC and MAC platforms. To set up your system for either WEB reader, issue this unix command in a terminal window or else place in your {\tt .cshrc} resource file. \begin{center} {\tt setenv WWW\_HOME http://www.math.utah.edu} \end{center} In the WEB reader, you will gain access to faculty and math department pages which contain useful information. Especially useful in this course are the ones found under {\tt faculty} and {\tt Grant Gustafson}. These pages contain the source for this document, especially the {\tt maple} examples, ready to paste into maple, and also the typeset copy of this document, in case you leave yours at home. Updates to the printed copy are made periodically, to correct errors or to clarify the text. There are no manual pages for {\tt lynx} or {\tt xmosaic}. It is understood by users of the WEB readers that help and documentation are in the reader itself. In the case of {\tt lynx}, it is mapped to the question mark key. For {\tt xmosaic}, there is a radio button for help in the upper right corner of the window. Additional help is available on modems and internet connections, from home or work, under the MATH page. There are pages on unix, editors, maple and matlab. It is expected that you will scan these pages for useful information related to printing graphs, making reports and programming in languages like C, Fortran, Pascal, Maple, Matlab. \begin{center} {\Large\bf Notes on Printing\\ Multiple Graphs} \end{center} The problems suggest making several graphs. While easy on a video monitor it generates too much paper in a report. Explained here for our local unix system is {\em a method for printing several graphs on a single sheet of paper}. The idea is to {\bf display} the graphs on the screen, {\bf resize} and {\bf move} each until the screen is correct, then {\bf print the screen}. Up to 8 graphs can be displayed by this mechanism. All will appear on a single sheet of paper. The {\tt resize} and {\tt move} commands are available on mouse menu 1. Press \MOUSELEFT{} to activate the menu. With these two commands it is possible to shrink a graphic and reposition it anywhere on the screen. Once the screen is loaded with the graphics it can be printed to the {\tt b129lab1} laser printer by the following unix shell command. The command is to be issued in a console or terminal window. { \footnotesize\rm \verb|alias paw "xdpr -root -device ps -portrait -Pb129lab1| \verb|paw| } One issue of the {\tt alias} command above installs the command {\tt paw} and thereafter, until logout, you may use {\tt paw} to print the screen. The command name {\tt paw} stands for {\large\bf P}rint {\large\bf A}ll {\large\bf W}indows (sometimes called print screen). Unix wizards can install the command {\tt paw} permanently by copying the {\tt alias} line above to the unix resource file {\tt .cshrc} in their root directory. The file {\tt .cshrc} is read at login and therefore the command {\tt paw} will always be available. Another possibility is to copy the following lines into the file {\tt \$HOME/.twmrc} (the first line should already be in the source!). The commands will appear on a mouse button in X-windows. To effect the change, restart {\tt twm} or log out and log in. \footnotesize\rm \verb|"Print Window" !"xdpr -device ps &"| \verb|"Print Screen" !"xdpr -root -device ps -portrait &"| \normalsize\rm Newer accounts probably already have these features installed. If you have an old account, then the file ".twmrc" can be updated by the following steps in a local terminal window: \begin{center} \verb|mv $HOME/.twmrc $HOME/.twmrc.old| \\ \verb|cp -p /usr/skel/.twmrc $HOME/.twmrc| \end{center} \end{multicols} } \end{document}