1 | //$$ example.cpp Example of use of matrix package |
---|
2 | |
---|
3 | #define WANT_STREAM // include.h will get stream fns |
---|
4 | #define WANT_MATH // include.h will get math fns |
---|
5 | // newmatap.h will get include.h |
---|
6 | |
---|
7 | #include "newmatap.h" // need matrix applications |
---|
8 | |
---|
9 | #include "newmatio.h" // need matrix output routines |
---|
10 | |
---|
11 | #ifdef use_namespace |
---|
12 | using namespace NEWMAT; // access NEWMAT namespace |
---|
13 | #endif |
---|
14 | |
---|
15 | |
---|
16 | // demonstration of matrix package on linear regression problem |
---|
17 | |
---|
18 | |
---|
19 | void test1(Real* y, Real* x1, Real* x2, int nobs, int npred) |
---|
20 | { |
---|
21 | cout << "\n\nTest 1 - traditional, bad\n"; |
---|
22 | |
---|
23 | // traditional sum of squares and products method of calculation |
---|
24 | // but not adjusting means; maybe subject to round-off error |
---|
25 | |
---|
26 | // make matrix of predictor values with 1s into col 1 of matrix |
---|
27 | int npred1 = npred+1; // number of cols including col of ones. |
---|
28 | Matrix X(nobs,npred1); |
---|
29 | X.Column(1) = 1.0; |
---|
30 | |
---|
31 | // load x1 and x2 into X |
---|
32 | // [use << rather than = when loading arrays] |
---|
33 | X.Column(2) << x1; X.Column(3) << x2; |
---|
34 | |
---|
35 | // vector of Y values |
---|
36 | ColumnVector Y(nobs); Y << y; |
---|
37 | |
---|
38 | // form sum of squares and product matrix |
---|
39 | // [use << rather than = for copying Matrix into SymmetricMatrix] |
---|
40 | SymmetricMatrix SSQ; SSQ << X.t() * X; |
---|
41 | |
---|
42 | // calculate estimate |
---|
43 | // [bracket last two terms to force this multiplication first] |
---|
44 | // [ .i() means inverse, but inverse is not explicity calculated] |
---|
45 | ColumnVector A = SSQ.i() * (X.t() * Y); |
---|
46 | |
---|
47 | // Get variances of estimates from diagonal elements of inverse of SSQ |
---|
48 | // get inverse of SSQ - we need it for finding D |
---|
49 | DiagonalMatrix D; D << SSQ.i(); |
---|
50 | ColumnVector V = D.AsColumn(); |
---|
51 | |
---|
52 | // Calculate fitted values and residuals |
---|
53 | ColumnVector Fitted = X * A; |
---|
54 | ColumnVector Residual = Y - Fitted; |
---|
55 | Real ResVar = Residual.SumSquare() / (nobs-npred1); |
---|
56 | |
---|
57 | // Get diagonals of Hat matrix (an expensive way of doing this) |
---|
58 | DiagonalMatrix Hat; Hat << X * (X.t() * X).i() * X.t(); |
---|
59 | |
---|
60 | // print out answers |
---|
61 | cout << "\nEstimates and their standard errors\n\n"; |
---|
62 | |
---|
63 | // make vector of standard errors |
---|
64 | ColumnVector SE(npred1); |
---|
65 | for (int i=1; i<=npred1; i++) SE(i) = sqrt(V(i)*ResVar); |
---|
66 | // use concatenation function to form matrix and use matrix print |
---|
67 | // to get two columns |
---|
68 | cout << setw(11) << setprecision(5) << (A | SE) << endl; |
---|
69 | |
---|
70 | cout << "\nObservations, fitted value, residual value, hat value\n"; |
---|
71 | |
---|
72 | // use concatenation again; select only columns 2 to 3 of X |
---|
73 | cout << setw(9) << setprecision(3) << |
---|
74 | (X.Columns(2,3) | Y | Fitted | Residual | Hat.AsColumn()); |
---|
75 | cout << "\n\n"; |
---|
76 | } |
---|
77 | |
---|
78 | void test2(Real* y, Real* x1, Real* x2, int nobs, int npred) |
---|
79 | { |
---|
80 | cout << "\n\nTest 2 - traditional, OK\n"; |
---|
81 | |
---|
82 | // traditional sum of squares and products method of calculation |
---|
83 | // with subtraction of means - less subject to round-off error |
---|
84 | // than test1 |
---|
85 | |
---|
86 | // make matrix of predictor values |
---|
87 | Matrix X(nobs,npred); |
---|
88 | |
---|
89 | // load x1 and x2 into X |
---|
90 | // [use << rather than = when loading arrays] |
---|
91 | X.Column(1) << x1; X.Column(2) << x2; |
---|
92 | |
---|
93 | // vector of Y values |
---|
94 | ColumnVector Y(nobs); Y << y; |
---|
95 | |
---|
96 | // make vector of 1s |
---|
97 | ColumnVector Ones(nobs); Ones = 1.0; |
---|
98 | |
---|
99 | // calculate means (averages) of x1 and x2 [ .t() takes transpose] |
---|
100 | RowVector M = Ones.t() * X / nobs; |
---|
101 | |
---|
102 | // and subtract means from x1 and x1 |
---|
103 | Matrix XC(nobs,npred); |
---|
104 | XC = X - Ones * M; |
---|
105 | |
---|
106 | // do the same to Y [use Sum to get sum of elements] |
---|
107 | ColumnVector YC(nobs); |
---|
108 | Real m = Sum(Y) / nobs; YC = Y - Ones * m; |
---|
109 | |
---|
110 | // form sum of squares and product matrix |
---|
111 | // [use << rather than = for copying Matrix into SymmetricMatrix] |
---|
112 | SymmetricMatrix SSQ; SSQ << XC.t() * XC; |
---|
113 | |
---|
114 | // calculate estimate |
---|
115 | // [bracket last two terms to force this multiplication first] |
---|
116 | // [ .i() means inverse, but inverse is not explicity calculated] |
---|
117 | ColumnVector A = SSQ.i() * (XC.t() * YC); |
---|
118 | |
---|
119 | // calculate estimate of constant term |
---|
120 | // [AsScalar converts 1x1 matrix to Real] |
---|
121 | Real a = m - (M * A).AsScalar(); |
---|
122 | |
---|
123 | // Get variances of estimates from diagonal elements of inverse of SSQ |
---|
124 | // [ we are taking inverse of SSQ - we need it for finding D ] |
---|
125 | Matrix ISSQ = SSQ.i(); DiagonalMatrix D; D << ISSQ; |
---|
126 | ColumnVector V = D.AsColumn(); |
---|
127 | Real v = 1.0/nobs + (M * ISSQ * M.t()).AsScalar(); |
---|
128 | // for calc variance of const |
---|
129 | |
---|
130 | // Calculate fitted values and residuals |
---|
131 | int npred1 = npred+1; |
---|
132 | ColumnVector Fitted = X * A + a; |
---|
133 | ColumnVector Residual = Y - Fitted; |
---|
134 | Real ResVar = Residual.SumSquare() / (nobs-npred1); |
---|
135 | |
---|
136 | // Get diagonals of Hat matrix (an expensive way of doing this) |
---|
137 | Matrix X1(nobs,npred1); X1.Column(1)<<Ones; X1.Columns(2,npred1)<<X; |
---|
138 | DiagonalMatrix Hat; Hat << X1 * (X1.t() * X1).i() * X1.t(); |
---|
139 | |
---|
140 | // print out answers |
---|
141 | cout << "\nEstimates and their standard errors\n\n"; |
---|
142 | cout.setf(ios::fixed, ios::floatfield); |
---|
143 | cout << setw(11) << setprecision(5) << a << " "; |
---|
144 | cout << setw(11) << setprecision(5) << sqrt(v*ResVar) << endl; |
---|
145 | // make vector of standard errors |
---|
146 | ColumnVector SE(npred); |
---|
147 | for (int i=1; i<=npred; i++) SE(i) = sqrt(V(i)*ResVar); |
---|
148 | // use concatenation function to form matrix and use matrix print |
---|
149 | // to get two columns |
---|
150 | cout << setw(11) << setprecision(5) << (A | SE) << endl; |
---|
151 | cout << "\nObservations, fitted value, residual value, hat value\n"; |
---|
152 | cout << setw(9) << setprecision(3) << |
---|
153 | (X | Y | Fitted | Residual | Hat.AsColumn()); |
---|
154 | cout << "\n\n"; |
---|
155 | } |
---|
156 | |
---|
157 | void test3(Real* y, Real* x1, Real* x2, int nobs, int npred) |
---|
158 | { |
---|
159 | cout << "\n\nTest 3 - Cholesky\n"; |
---|
160 | |
---|
161 | // traditional sum of squares and products method of calculation |
---|
162 | // with subtraction of means - using Cholesky decomposition |
---|
163 | |
---|
164 | Matrix X(nobs,npred); |
---|
165 | X.Column(1) << x1; X.Column(2) << x2; |
---|
166 | ColumnVector Y(nobs); Y << y; |
---|
167 | ColumnVector Ones(nobs); Ones = 1.0; |
---|
168 | RowVector M = Ones.t() * X / nobs; |
---|
169 | Matrix XC(nobs,npred); |
---|
170 | XC = X - Ones * M; |
---|
171 | ColumnVector YC(nobs); |
---|
172 | Real m = Sum(Y) / nobs; YC = Y - Ones * m; |
---|
173 | SymmetricMatrix SSQ; SSQ << XC.t() * XC; |
---|
174 | |
---|
175 | // Cholesky decomposition of SSQ |
---|
176 | LowerTriangularMatrix L = Cholesky(SSQ); |
---|
177 | |
---|
178 | // calculate estimate |
---|
179 | ColumnVector A = L.t().i() * (L.i() * (XC.t() * YC)); |
---|
180 | |
---|
181 | // calculate estimate of constant term |
---|
182 | Real a = m - (M * A).AsScalar(); |
---|
183 | |
---|
184 | // Get variances of estimates from diagonal elements of invoice of SSQ |
---|
185 | DiagonalMatrix D; D << L.t().i() * L.i(); |
---|
186 | ColumnVector V = D.AsColumn(); |
---|
187 | Real v = 1.0/nobs + (L.i() * M.t()).SumSquare(); |
---|
188 | |
---|
189 | // Calculate fitted values and residuals |
---|
190 | int npred1 = npred+1; |
---|
191 | ColumnVector Fitted = X * A + a; |
---|
192 | ColumnVector Residual = Y - Fitted; |
---|
193 | Real ResVar = Residual.SumSquare() / (nobs-npred1); |
---|
194 | |
---|
195 | // Get diagonals of Hat matrix (an expensive way of doing this) |
---|
196 | Matrix X1(nobs,npred1); X1.Column(1)<<Ones; X1.Columns(2,npred1)<<X; |
---|
197 | DiagonalMatrix Hat; Hat << X1 * (X1.t() * X1).i() * X1.t(); |
---|
198 | |
---|
199 | // print out answers |
---|
200 | cout << "\nEstimates and their standard errors\n\n"; |
---|
201 | cout.setf(ios::fixed, ios::floatfield); |
---|
202 | cout << setw(11) << setprecision(5) << a << " "; |
---|
203 | cout << setw(11) << setprecision(5) << sqrt(v*ResVar) << endl; |
---|
204 | ColumnVector SE(npred); |
---|
205 | for (int i=1; i<=npred; i++) SE(i) = sqrt(V(i)*ResVar); |
---|
206 | cout << setw(11) << setprecision(5) << (A | SE) << endl; |
---|
207 | cout << "\nObservations, fitted value, residual value, hat value\n"; |
---|
208 | cout << setw(9) << setprecision(3) << |
---|
209 | (X | Y | Fitted | Residual | Hat.AsColumn()); |
---|
210 | cout << "\n\n"; |
---|
211 | } |
---|
212 | |
---|
213 | void test4(Real* y, Real* x1, Real* x2, int nobs, int npred) |
---|
214 | { |
---|
215 | cout << "\n\nTest 4 - QR triangularisation\n"; |
---|
216 | |
---|
217 | // QR triangularisation method |
---|
218 | |
---|
219 | // load data - 1s into col 1 of matrix |
---|
220 | int npred1 = npred+1; |
---|
221 | Matrix X(nobs,npred1); ColumnVector Y(nobs); |
---|
222 | X.Column(1) = 1.0; X.Column(2) << x1; X.Column(3) << x2; Y << y; |
---|
223 | |
---|
224 | // do Householder triangularisation |
---|
225 | // no need to deal with constant term separately |
---|
226 | Matrix X1 = X; // Want copy of matrix |
---|
227 | ColumnVector Y1 = Y; |
---|
228 | UpperTriangularMatrix U; ColumnVector M; |
---|
229 | QRZ(X1, U); QRZ(X1, Y1, M); // Y1 now contains resids |
---|
230 | ColumnVector A = U.i() * M; |
---|
231 | ColumnVector Fitted = X * A; |
---|
232 | Real ResVar = Y1.SumSquare() / (nobs-npred1); |
---|
233 | |
---|
234 | // get variances of estimates |
---|
235 | U = U.i(); DiagonalMatrix D; D << U * U.t(); |
---|
236 | |
---|
237 | // Get diagonals of Hat matrix |
---|
238 | DiagonalMatrix Hat; Hat << X1 * X1.t(); |
---|
239 | |
---|
240 | // print out answers |
---|
241 | cout << "\nEstimates and their standard errors\n\n"; |
---|
242 | ColumnVector SE(npred1); |
---|
243 | for (int i=1; i<=npred1; i++) SE(i) = sqrt(D(i)*ResVar); |
---|
244 | cout << setw(11) << setprecision(5) << (A | SE) << endl; |
---|
245 | cout << "\nObservations, fitted value, residual value, hat value\n"; |
---|
246 | cout << setw(9) << setprecision(3) << |
---|
247 | (X.Columns(2,3) | Y | Fitted | Y1 | Hat.AsColumn()); |
---|
248 | cout << "\n\n"; |
---|
249 | } |
---|
250 | |
---|
251 | void test5(Real* y, Real* x1, Real* x2, int nobs, int npred) |
---|
252 | { |
---|
253 | cout << "\n\nTest 5 - singular value\n"; |
---|
254 | |
---|
255 | // Singular value decomposition method |
---|
256 | |
---|
257 | // load data - 1s into col 1 of matrix |
---|
258 | int npred1 = npred+1; |
---|
259 | Matrix X(nobs,npred1); ColumnVector Y(nobs); |
---|
260 | X.Column(1) = 1.0; X.Column(2) << x1; X.Column(3) << x2; Y << y; |
---|
261 | |
---|
262 | // do SVD |
---|
263 | Matrix U, V; DiagonalMatrix D; |
---|
264 | SVD(X,D,U,V); // X = U * D * V.t() |
---|
265 | ColumnVector Fitted = U.t() * Y; |
---|
266 | ColumnVector A = V * ( D.i() * Fitted ); |
---|
267 | Fitted = U * Fitted; |
---|
268 | ColumnVector Residual = Y - Fitted; |
---|
269 | Real ResVar = Residual.SumSquare() / (nobs-npred1); |
---|
270 | |
---|
271 | // get variances of estimates |
---|
272 | D << V * (D * D).i() * V.t(); |
---|
273 | |
---|
274 | // Get diagonals of Hat matrix |
---|
275 | DiagonalMatrix Hat; Hat << U * U.t(); |
---|
276 | |
---|
277 | // print out answers |
---|
278 | cout << "\nEstimates and their standard errors\n\n"; |
---|
279 | ColumnVector SE(npred1); |
---|
280 | for (int i=1; i<=npred1; i++) SE(i) = sqrt(D(i)*ResVar); |
---|
281 | cout << setw(11) << setprecision(5) << (A | SE) << endl; |
---|
282 | cout << "\nObservations, fitted value, residual value, hat value\n"; |
---|
283 | cout << setw(9) << setprecision(3) << |
---|
284 | (X.Columns(2,3) | Y | Fitted | Residual | Hat.AsColumn()); |
---|
285 | cout << "\n\n"; |
---|
286 | } |
---|
287 | |
---|
288 | int main() |
---|
289 | { |
---|
290 | cout << "\nDemonstration of Matrix package\n"; |
---|
291 | cout << "\nPrint a real number (may help lost memory test): " << 3.14159265 << "\n"; |
---|
292 | |
---|
293 | // Test for any memory not deallocated after running this program |
---|
294 | Real* s1; { ColumnVector A(8000); s1 = A.Store(); } |
---|
295 | |
---|
296 | { |
---|
297 | // the data |
---|
298 | |
---|
299 | #ifndef ATandT |
---|
300 | Real y[] = { 8.3, 5.5, 8.0, 8.5, 5.7, 4.4, 6.3, 7.9, 9.1 }; |
---|
301 | Real x1[] = { 2.4, 1.8, 2.4, 3.0, 2.0, 1.2, 2.0, 2.7, 3.6 }; |
---|
302 | Real x2[] = { 1.7, 0.9, 1.6, 1.9, 0.5, 0.6, 1.1, 1.0, 0.5 }; |
---|
303 | #else // for compilers that do not understand aggregrates |
---|
304 | Real y[9], x1[9], x2[9]; |
---|
305 | y[0]=8.3; y[1]=5.5; y[2]=8.0; y[3]=8.5; y[4]=5.7; |
---|
306 | y[5]=4.4; y[6]=6.3; y[7]=7.9; y[8]=9.1; |
---|
307 | x1[0]=2.4; x1[1]=1.8; x1[2]=2.4; x1[3]=3.0; x1[4]=2.0; |
---|
308 | x1[5]=1.2; x1[6]=2.0; x1[7]=2.7; x1[8]=3.6; |
---|
309 | x2[0]=1.7; x2[1]=0.9; x2[2]=1.6; x2[3]=1.9; x2[4]=0.5; |
---|
310 | x2[5]=0.6; x2[6]=1.1; x2[7]=1.0; x2[8]=0.5; |
---|
311 | #endif |
---|
312 | |
---|
313 | int nobs = 9; // number of observations |
---|
314 | int npred = 2; // number of predictor values |
---|
315 | |
---|
316 | // we want to find the values of a,a1,a2 to give the best |
---|
317 | // fit of y[i] with a0 + a1*x1[i] + a2*x2[i] |
---|
318 | // Also print diagonal elements of hat matrix, X*(X.t()*X).i()*X.t() |
---|
319 | |
---|
320 | // this example demonstrates five methods of calculation |
---|
321 | |
---|
322 | Try |
---|
323 | { |
---|
324 | test1(y, x1, x2, nobs, npred); |
---|
325 | test2(y, x1, x2, nobs, npred); |
---|
326 | test3(y, x1, x2, nobs, npred); |
---|
327 | test4(y, x1, x2, nobs, npred); |
---|
328 | test5(y, x1, x2, nobs, npred); |
---|
329 | } |
---|
330 | CatchAll { cout << Exception::what(); } |
---|
331 | } |
---|
332 | |
---|
333 | #ifdef DO_FREE_CHECK |
---|
334 | FreeCheck::Status(); |
---|
335 | #endif |
---|
336 | Real* s2; { ColumnVector A(8000); s2 = A.Store(); } |
---|
337 | cout << "\n\nThe following test does not work with all compilers - see documentation\n"; |
---|
338 | cout << "Checking for lost memory: " |
---|
339 | << (unsigned long)s1 << " " << (unsigned long)s2 << " "; |
---|
340 | if (s1 != s2) cout << " - error\n"; else cout << " - ok\n"; |
---|
341 | |
---|
342 | return 0; |
---|
343 | |
---|
344 | } |
---|
345 | |
---|