GCC Code Coverage Report
Directory: ./ Exec Total Coverage
File: src/ell/mat.c Lines: 4 89 4.5 %
Date: 2017-05-26 Branches: 0 22 0.0 %

Line Branch Exec Source
1
/*
2
  Teem: Tools to process and visualize scientific data and images             .
3
  Copyright (C) 2013, 2012, 2011, 2010, 2009  University of Chicago
4
  Copyright (C) 2008, 2007, 2006, 2005  Gordon Kindlmann
5
  Copyright (C) 2004, 2003, 2002, 2001, 2000, 1999, 1998  University of Utah
6
7
  This library is free software; you can redistribute it and/or
8
  modify it under the terms of the GNU Lesser General Public License
9
  (LGPL) as published by the Free Software Foundation; either
10
  version 2.1 of the License, or (at your option) any later version.
11
  The terms of redistributing and/or modifying this software also
12
  include exceptions to the LGPL that facilitate static linking.
13
14
  This library is distributed in the hope that it will be useful,
15
  but WITHOUT ANY WARRANTY; without even the implied warranty of
16
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
  Lesser General Public License for more details.
18
19
  You should have received a copy of the GNU Lesser General Public License
20
  along with this library; if not, write to Free Software Foundation, Inc.,
21
  51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
22
*/
23
24
25
#include "ell.h"
26
27
void
28
ell_3m_mul_f(float m3[9], const float _m1[9], const float _m2[9]) {
29
  float m1[9], m2[9];
30
31
  ELL_3M_COPY(m1, _m1);
32
  ELL_3M_COPY(m2, _m2);
33
  ELL_3M_MUL(m3, m1, m2);
34
}
35
36
void
37
ell_3m_mul_d(double m3[9], const double _m1[9], const double _m2[9]) {
38
  double m1[9], m2[9];
39
40
  ELL_3M_COPY(m1, _m1);
41
  ELL_3M_COPY(m2, _m2);
42
  ELL_3M_MUL(m3, m1, m2);
43
}
44
45
void
46
ell_3m_pre_mul_f(float _m[9], const float x[9]) {
47
  float m[9];
48
  ELL_3M_MUL(m, _m, x);
49
  ELL_3M_COPY(_m, m);
50
}
51
52
void
53
ell_3m_pre_mul_d(double _m[9], const double x[9]) {
54
  double m[9];
55
  ELL_3M_MUL(m, _m, x);
56
  ELL_3M_COPY(_m, m);
57
}
58
59
void
60
ell_3m_post_mul_f(float _m[9], const float x[9]) {
61
  float m[9];
62
  ELL_3M_MUL(m, x, _m);
63
  ELL_3M_COPY(_m, m);
64
}
65
66
void
67
ell_3m_post_mul_d(double _m[9], const double x[9]) {
68
  double m[9];
69
  ELL_3M_MUL(m, x, _m);
70
  ELL_3M_COPY(_m, m);
71
}
72
73
float
74
ell_3m_det_f(float m[9]) {
75
  return ELL_3M_DET(m);
76
}
77
78
double
79
ell_3m_det_d(double m[9]) {
80
  return ELL_3M_DET(m);
81
}
82
83
void
84
ell_3m_inv_f(float i[9], const float m[9]) {
85
  float det;
86
87
  ELL_3M_INV(i, m, det);
88
}
89
90
void
91
ell_3m_inv_d(double i[9], const double m[9]) {
92
  double det;
93
94
1388
  ELL_3M_INV(i, m, det);
95
694
}
96
97
/* -------------------------------------------------------------------- */
98
/* -------------------------------------------------------------------- */
99
/* -------------------------------------------------------------------- */
100
101
void
102
ell_4m_mul_f(float m3[16], const float _m1[16], const float _m2[16]) {
103
  float m1[16], m2[16];
104
105
  ELL_4M_COPY(m1, _m1);
106
  ELL_4M_COPY(m2, _m2);
107
  ELL_4M_MUL(m3, m1, m2);
108
}
109
110
void
111
ell_4m_mul_d(double m3[16], const double _m1[16], const double _m2[16]) {
112
  double m1[16], m2[16];
113
114
  ELL_4M_COPY(m1, _m1);
115
  ELL_4M_COPY(m2, _m2);
116
  ELL_4M_MUL(m3, m1, m2);
117
}
118
119
void
120
ell_4m_pre_mul_f(float _m[16], const float x[16]) {
121
  float m[16];
122
  ELL_4M_MUL(m, _m, x);
123
  ELL_4M_COPY(_m, m);
124
}
125
126
void
127
ell_4m_pre_mMul_d(double _m[16], const double x[16]) {
128
  double m[16];
129
  ELL_4M_MUL(m, _m, x);
130
  ELL_4M_COPY(_m, m);
131
}
132
133
void
134
ell_4m_post_mul_f(float _m[16], const float x[16]) {
135
  float m[16];
136
  ELL_4M_MUL(m, x, _m);
137
  ELL_4M_COPY(_m, m);
138
}
139
140
void
141
ell_4m_post_mul_d(double _m[16], const double x[16]) {
142
  double m[16];
143
  ELL_4M_MUL(m, x, _m);
144
  ELL_4M_COPY(_m, m);
145
}
146
147
float
148
ell_4m_det_f(float m[16]) {
149
  return ELL_4M_DET(m);
150
}
151
152
double
153
ell_4m_det_d(double m[16]) {
154
  return ELL_4M_DET(m);
155
}
156
157
#define _4INV \
158
  det = ELL_4M_DET(m); \
159
  i[ 0] =  _ELL_3M_DET((m)[ 5],(m)[ 6],(m)[ 7], \
160
                       (m)[ 9],(m)[10],(m)[11], \
161
                       (m)[13],(m)[14],(m)[15])/det; \
162
  i[ 1] = -_ELL_3M_DET((m)[ 1],(m)[ 2],(m)[ 3], \
163
                       (m)[ 9],(m)[10],(m)[11], \
164
                       (m)[13],(m)[14],(m)[15])/det; \
165
  i[ 2] =  _ELL_3M_DET((m)[ 1],(m)[ 2],(m)[ 3], \
166
                       (m)[ 5],(m)[ 6],(m)[ 7], \
167
                       (m)[13],(m)[14],(m)[15])/det; \
168
  i[ 3] = -_ELL_3M_DET((m)[ 1],(m)[ 2],(m)[ 3], \
169
                       (m)[ 5],(m)[ 6],(m)[ 7], \
170
                       (m)[ 9],(m)[10],(m)[11])/det; \
171
  i[ 4] = -_ELL_3M_DET((m)[ 4],(m)[ 6],(m)[ 7], \
172
                       (m)[ 8],(m)[10],(m)[11], \
173
                       (m)[12],(m)[14],(m)[15])/det; \
174
  i[ 5] =  _ELL_3M_DET((m)[ 0],(m)[ 2],(m)[ 3], \
175
                       (m)[ 8],(m)[10],(m)[11], \
176
                       (m)[12],(m)[14],(m)[15])/det; \
177
  i[ 6] = -_ELL_3M_DET((m)[ 0],(m)[ 2],(m)[ 3], \
178
                       (m)[ 4],(m)[ 6],(m)[ 7], \
179
                       (m)[12],(m)[14],(m)[15])/det; \
180
  i[ 7] =  _ELL_3M_DET((m)[ 0],(m)[ 2],(m)[ 3], \
181
                       (m)[ 4],(m)[ 6],(m)[ 7], \
182
                       (m)[ 8],(m)[10],(m)[11])/det; \
183
  i[ 8] =  _ELL_3M_DET((m)[ 4],(m)[ 5],(m)[ 7], \
184
                       (m)[ 8],(m)[ 9],(m)[11], \
185
                       (m)[12],(m)[13],(m)[15])/det; \
186
  i[ 9] = -_ELL_3M_DET((m)[ 0],(m)[ 1],(m)[ 3], \
187
                       (m)[ 8],(m)[ 9],(m)[11], \
188
                       (m)[12],(m)[13],(m)[15])/det; \
189
  i[10] =  _ELL_3M_DET((m)[ 0],(m)[ 1],(m)[ 3], \
190
                       (m)[ 4],(m)[ 5],(m)[ 7], \
191
                       (m)[12],(m)[13],(m)[15])/det; \
192
  i[11] = -_ELL_3M_DET((m)[ 0],(m)[ 1],(m)[ 3], \
193
                       (m)[ 4],(m)[ 5],(m)[ 7], \
194
                       (m)[ 8],(m)[ 9],(m)[11])/det; \
195
  i[12] = -_ELL_3M_DET((m)[ 4],(m)[ 5],(m)[ 6], \
196
                       (m)[ 8],(m)[ 9],(m)[10], \
197
                       (m)[12],(m)[13],(m)[14])/det; \
198
  i[13] =  _ELL_3M_DET((m)[ 0],(m)[ 1],(m)[ 2], \
199
                       (m)[ 8],(m)[ 9],(m)[10], \
200
                       (m)[12],(m)[13],(m)[14])/det; \
201
  i[14] = -_ELL_3M_DET((m)[ 0],(m)[ 1],(m)[ 2], \
202
                       (m)[ 4],(m)[ 5],(m)[ 6], \
203
                       (m)[12],(m)[13],(m)[14])/det; \
204
  i[15] =  _ELL_3M_DET((m)[ 0],(m)[ 1],(m)[ 2], \
205
                       (m)[ 4],(m)[ 5],(m)[ 6], \
206
                       (m)[ 8],(m)[ 9],(m)[10])/det
207
208
void
209
ell_4m_inv_f(float i[16], const float m[16]) {
210
  float det;
211
212
  _4INV;
213
}
214
215
void
216
ell_4m_inv_d(double i[16], const double m[16]) {
217
  double det;
218
219
1388
  _4INV;
220
694
}
221
222
void
223
ell_6m_mul_d(double AB[36], const double A[36], const double B[36]) {
224
  unsigned int ll, mm, nn;
225
  double tmp;
226
227
  if (!( AB && A && B )) {
228
    return;
229
  }
230
  for (ll=0; ll<6; ll++) {
231
    for (nn=0; nn<6; nn++) {
232
      tmp = 0;
233
      for (mm=0; mm<6; mm++) {
234
        tmp += A[mm + 6*ll]*B[nn + 6*mm];
235
      }
236
      AB[nn + 6*ll] = tmp;
237
    }
238
  }
239
  return;
240
}
241
242
/*
243
** Thanks to:
244
** http://jgt.akpeters.com/papers/MollerHughes99/code.html
245
*/
246
void
247
ell_3m_rotate_between_d(double rot[9], double from[3], double to[3]) {
248
  double vv[3];
249
  double e, h, f;
250
251
  if (!( rot && from && to)) {
252
    return;
253
  }
254
  ELL_3V_CROSS(vv, from, to);
255
  e = ELL_3V_DOT(from, to);
256
  f = AIR_ABS(e);
257
  if (f > 0.9999999) {   /* "from" and "to"-vector almost parallel */
258
    double tu[3], tv[3]; /* temporary storage vectors */
259
    double xx[3];        /* vector most nearly orthogonal to "from" */
260
    double c1, c2, c3;   /* coefficients for later use */
261
    int i, j;
262
263
    xx[0] = AIR_ABS(from[0]);
264
    xx[1] = AIR_ABS(from[1]);
265
    xx[2] = AIR_ABS(from[2]);
266
267
    if (xx[0] < xx[1]) {
268
      if (xx[0] < xx[2]) {
269
        xx[0] = 1.0; xx[1] = xx[2] = 0.0;
270
      } else {
271
        xx[2] = 1.0; xx[0] = xx[1] = 0.0;
272
      }
273
    } else {
274
      if (xx[1] < xx[2]) {
275
        xx[1] = 1.0; xx[0] = xx[2] = 0.0;
276
      } else {
277
        xx[2] = 1.0; xx[0] = xx[1] = 0.0;
278
      }
279
    }
280
281
    tu[0] = xx[0] - from[0]; tu[1] = xx[1] - from[1]; tu[2] = xx[2] - from[2];
282
    tv[0] = xx[0] - to[0];   tv[1] = xx[1] - to[1];   tv[2] = xx[2] - to[2];
283
284
    c1 = 2.0 / ELL_3V_DOT(tu, tu);
285
    c2 = 2.0 / ELL_3V_DOT(tv, tv);
286
    c3 = c1 * c2  * ELL_3V_DOT(tu, tv);
287
288
    for (i = 0; i < 3; i++) {
289
      for (j = 0; j < 3; j++) {
290
        rot[3*i + j] =  - c1 * tu[i] * tu[j]
291
                     - c2 * tv[i] * tv[j]
292
                     + c3 * tv[i] * tu[j];
293
      }
294
      rot[3*i + i] += 1.0;
295
    }
296
  } else { /* the most common case, unless "from"="to", or "from"=-"to" */
297
    double hvx, hvz, hvxy, hvxz, hvyz;
298
    h = 1.0/(1.0 + e);      /* optimization by Gottfried Chen */
299
    hvx = h * vv[0];
300
    hvz = h * vv[2];
301
    hvxy = hvx * vv[1];
302
    hvxz = hvx * vv[2];
303
    hvyz = hvz * vv[1];
304
    rot[3*0 + 0] = e + hvx * vv[0];
305
    rot[3*0 + 1] = hvxy - vv[2];
306
    rot[3*0 + 2] = hvxz + vv[1];
307
308
    rot[3*1 + 0] = hvxy + vv[2];
309
    rot[3*1 + 1] = e + h * vv[1] * vv[1];
310
    rot[3*1 + 2] = hvyz - vv[0];
311
312
    rot[3*2 + 0] = hvxz - vv[1];
313
    rot[3*2 + 1] = hvyz + vv[0];
314
    rot[3*2 + 2] = e + hvz * vv[2];
315
  }
316
  return;
317
}
318