1 |
|
|
/* |
2 |
|
|
Teem: Tools to process and visualize scientific data and images . |
3 |
|
|
Copyright (C) 2013, 2012, 2011, 2010, 2009 University of Chicago |
4 |
|
|
Copyright (C) 2008, 2007, 2006, 2005 Gordon Kindlmann |
5 |
|
|
Copyright (C) 2004, 2003, 2002, 2001, 2000, 1999, 1998 University of Utah |
6 |
|
|
|
7 |
|
|
This library is free software; you can redistribute it and/or |
8 |
|
|
modify it under the terms of the GNU Lesser General Public License |
9 |
|
|
(LGPL) as published by the Free Software Foundation; either |
10 |
|
|
version 2.1 of the License, or (at your option) any later version. |
11 |
|
|
The terms of redistributing and/or modifying this software also |
12 |
|
|
include exceptions to the LGPL that facilitate static linking. |
13 |
|
|
|
14 |
|
|
This library is distributed in the hope that it will be useful, |
15 |
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 |
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
17 |
|
|
Lesser General Public License for more details. |
18 |
|
|
|
19 |
|
|
You should have received a copy of the GNU Lesser General Public License |
20 |
|
|
along with this library; if not, write to Free Software Foundation, Inc., |
21 |
|
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
22 |
|
|
*/ |
23 |
|
|
|
24 |
|
|
#include "ten.h" |
25 |
|
|
#include "privateTen.h" |
26 |
|
|
|
27 |
|
|
const char * |
28 |
|
|
tenDWMRIModalityKey = "modality"; |
29 |
|
|
|
30 |
|
|
const char * |
31 |
|
|
tenDWMRIModalityVal = "DWMRI"; |
32 |
|
|
|
33 |
|
|
const char * |
34 |
|
|
tenDWMRINAVal = "n/a"; |
35 |
|
|
|
36 |
|
|
const char * |
37 |
|
|
tenDWMRIBValueKey = "DWMRI_b-value"; |
38 |
|
|
|
39 |
|
|
const char * |
40 |
|
|
tenDWMRIGradKeyFmt = "DWMRI_gradient_%04u"; |
41 |
|
|
|
42 |
|
|
const char * |
43 |
|
|
tenDWMRIBmatKeyFmt = "DWMRI_B-matrix_%04u"; |
44 |
|
|
|
45 |
|
|
const char * |
46 |
|
|
tenDWMRINexKeyFmt = "DWMRI_NEX_%04u"; |
47 |
|
|
|
48 |
|
|
const char * |
49 |
|
|
tenDWMRISkipKeyFmt = "DWMRI_skip_%04u"; |
50 |
|
|
|
51 |
|
|
/* |
52 |
|
|
******** tenDWMRIKeyValueParse |
53 |
|
|
** |
54 |
|
|
** Parses through key-value pairs in the NRRD header to determine the |
55 |
|
|
** list of diffusion-sensitizing gradient directions, or B-matrices |
56 |
|
|
** (depending to what was found), according the NAMIC conventions. |
57 |
|
|
** This requires, among other things, that ndwi be have exactly one |
58 |
|
|
** axis with kind nrrdKindList (or nrrdKindVector), which is taken to |
59 |
|
|
** be the DWI axis. |
60 |
|
|
** |
61 |
|
|
** Either *ngradP or *nbmatP is set to a newly- allocated nrrd |
62 |
|
|
** containing this information, and the other one is set to NULL |
63 |
|
|
** The (scalar) b-value is stored in *bP. The image values that are |
64 |
|
|
** to be skipped are stored in the *skipP array (allocated here), |
65 |
|
|
** the length of that array is stored in *skipNumP. Unlike the skip |
66 |
|
|
** array used internally with tenEstimate, this is just a simple 1-D |
67 |
|
|
** array; it is not a list of pairs of (index,skipBool). |
68 |
|
|
*/ |
69 |
|
|
int |
70 |
|
|
tenDWMRIKeyValueParse(Nrrd **ngradP, Nrrd **nbmatP, double *bP, |
71 |
|
|
unsigned int **skipP, unsigned int *skipNumP, |
72 |
|
|
const Nrrd *ndwi) { |
73 |
|
|
static const char me[]="tenDWMRIKeyValueParse"; |
74 |
|
|
char tmpKey[AIR_STRLEN_MED], |
75 |
|
|
key[AIR_STRLEN_MED], *val; |
76 |
|
|
const char *keyFmt; |
77 |
|
|
int dwiAxis; |
78 |
|
|
unsigned int axi, dwiIdx, dwiNum, valNum, valIdx, parsedNum, |
79 |
|
|
nexNum, nexIdx, skipIdx, *skipLut; |
80 |
|
|
Nrrd *ninfo; |
81 |
|
|
double *info, normMax, norm; |
82 |
|
|
airArray *mop, *skipArr; |
83 |
|
|
|
84 |
|
|
if (!( ngradP && nbmatP && skipP && skipNumP && bP && ndwi )) { |
85 |
|
|
biffAddf(TEN, "%s: got NULL pointer", me); |
86 |
|
|
return 1; |
87 |
|
|
} |
88 |
|
|
|
89 |
|
|
/* check modality */ |
90 |
|
|
val = nrrdKeyValueGet(ndwi, tenDWMRIModalityKey); |
91 |
|
|
if (!val) { |
92 |
|
|
biffAddf(TEN, "%s: didn't have \"%s\" key", me, tenDWMRIModalityKey); |
93 |
|
|
return 1; |
94 |
|
|
} |
95 |
|
|
if (strncmp(tenDWMRIModalityVal, val + strspn(val, AIR_WHITESPACE), |
96 |
|
|
strlen(tenDWMRIModalityVal))) { |
97 |
|
|
biffAddf(TEN, "%s: \"%s\" value was \"%s\", not \"%s\"", me, |
98 |
|
|
tenDWMRIModalityKey, val, tenDWMRIModalityVal); |
99 |
|
|
return 1; |
100 |
|
|
} |
101 |
|
|
val = (char *)airFree(val); |
102 |
|
|
|
103 |
|
|
/* learn b-value */ |
104 |
|
|
val = nrrdKeyValueGet(ndwi, tenDWMRIBValueKey); |
105 |
|
|
if (!val) { |
106 |
|
|
biffAddf(TEN, "%s: didn't have \"%s\" key", me, tenDWMRIBValueKey); |
107 |
|
|
return 1; |
108 |
|
|
} |
109 |
|
|
if (1 != sscanf(val, "%lg", bP)) { |
110 |
|
|
biffAddf(TEN, "%s: couldn't parse float from value \"%s\" " |
111 |
|
|
"for key \"%s\"", me, |
112 |
|
|
val, tenDWMRIBValueKey); |
113 |
|
|
return 1; |
114 |
|
|
} |
115 |
|
|
val = (char *)airFree(val); |
116 |
|
|
|
117 |
|
|
/* find single DWI axis, set dwiNum to its size */ |
118 |
|
|
dwiAxis = -1; |
119 |
|
|
for (axi=0; axi<ndwi->dim; axi++) { |
120 |
|
|
/* the use of nrrdKindVector here is out of deference to how ITK's |
121 |
|
|
itkNrrdImageIO.cxx uses nrrdKindVector for VECTOR pixels */ |
122 |
|
|
if (nrrdKindList == ndwi->axis[axi].kind |
123 |
|
|
|| nrrdKindVector == ndwi->axis[axi].kind) { |
124 |
|
|
if (-1 != dwiAxis) { |
125 |
|
|
biffAddf(TEN, "%s: already saw %s or %s kind on axis %d", me, |
126 |
|
|
airEnumStr(nrrdKind, nrrdKindList), |
127 |
|
|
airEnumStr(nrrdKind, nrrdKindVector), dwiAxis); |
128 |
|
|
return 1; |
129 |
|
|
} |
130 |
|
|
dwiAxis = axi; |
131 |
|
|
} |
132 |
|
|
} |
133 |
|
|
if (-1 == dwiAxis) { |
134 |
|
|
biffAddf(TEN, "%s: did not see \"%s\" kind on any axis", me, |
135 |
|
|
airEnumStr(nrrdKind, nrrdKindList)); |
136 |
|
|
return 1; |
137 |
|
|
} |
138 |
|
|
dwiNum = ndwi->axis[dwiAxis].size; |
139 |
|
|
|
140 |
|
|
/* figure out if we're parsing gradients or b-matrices */ |
141 |
|
|
sprintf(tmpKey, tenDWMRIGradKeyFmt, 0); |
142 |
|
|
val = nrrdKeyValueGet(ndwi, tmpKey); |
143 |
|
|
if (val) { |
144 |
|
|
valNum = 3; |
145 |
|
|
} else { |
146 |
|
|
valNum = 6; |
147 |
|
|
sprintf(key, tenDWMRIBmatKeyFmt, 0); |
148 |
|
|
val = nrrdKeyValueGet(ndwi, key); |
149 |
|
|
if (!val) { |
150 |
|
|
biffAddf(TEN, "%s: saw neither \"%s\" nor \"%s\" key", me, |
151 |
|
|
tmpKey, key); |
152 |
|
|
return 1; |
153 |
|
|
} |
154 |
|
|
} |
155 |
|
|
val = (char *)airFree(val); |
156 |
|
|
|
157 |
|
|
/* set up parsing and allocate one of output nrrds */ |
158 |
|
|
if (3 == valNum) { |
159 |
|
|
keyFmt = tenDWMRIGradKeyFmt; |
160 |
|
|
ninfo = *ngradP = nrrdNew(); |
161 |
|
|
*nbmatP = NULL; |
162 |
|
|
} else { |
163 |
|
|
keyFmt = tenDWMRIBmatKeyFmt; |
164 |
|
|
*ngradP = NULL; |
165 |
|
|
ninfo = *nbmatP = nrrdNew(); |
166 |
|
|
} |
167 |
|
|
if (nrrdMaybeAlloc_va(ninfo, nrrdTypeDouble, 2, |
168 |
|
|
AIR_CAST(size_t, valNum), |
169 |
|
|
AIR_CAST(size_t, dwiNum))) { |
170 |
|
|
biffMovef(TEN, NRRD, "%s: couldn't allocate output", me); |
171 |
|
|
return 1; |
172 |
|
|
} |
173 |
|
|
info = (double *)(ninfo->data); |
174 |
|
|
|
175 |
|
|
/* set up skip list recording */ |
176 |
|
|
mop = airMopNew(); |
177 |
|
|
skipArr = airArrayNew((void**)skipP, skipNumP, sizeof(unsigned int), 16); |
178 |
|
|
airMopAdd(mop, skipArr, (airMopper)airArrayNix, airMopAlways); |
179 |
|
|
skipLut = AIR_CALLOC(dwiNum, unsigned int); |
180 |
|
|
airMopAdd(mop, skipLut, airFree, airMopAlways); |
181 |
|
|
if (!skipLut) { |
182 |
|
|
biffAddf(TEN, "%s: couldn't allocate skip LUT", me); |
183 |
|
|
airMopError(mop); return 1; |
184 |
|
|
} |
185 |
|
|
|
186 |
|
|
/* parse values in ninfo */ |
187 |
|
|
for (dwiIdx=0; dwiIdx<dwiNum; dwiIdx++) { |
188 |
|
|
sprintf(key, keyFmt, dwiIdx); |
189 |
|
|
val = nrrdKeyValueGet(ndwi, key); |
190 |
|
|
if (!val) { |
191 |
|
|
biffAddf(TEN, "%s: didn't see \"%s\" key", me, key); |
192 |
|
|
airMopError(mop); return 1; |
193 |
|
|
} |
194 |
|
|
airToLower(val); |
195 |
|
|
if (!strncmp(tenDWMRINAVal, val + strspn(val, AIR_WHITESPACE), |
196 |
|
|
strlen(tenDWMRINAVal))) { |
197 |
|
|
/* have no sensible gradient or B-matrix info here, and must skip */ |
198 |
|
|
for (valIdx=0; valIdx<valNum; valIdx++) { |
199 |
|
|
info[valIdx] = AIR_NAN; |
200 |
|
|
} |
201 |
|
|
skipIdx = airArrayLenIncr(skipArr, 1); |
202 |
|
|
(*skipP)[skipIdx] = dwiIdx; |
203 |
|
|
skipLut[dwiIdx] = AIR_TRUE; |
204 |
|
|
/* can't have NEX on a skipped gradient or B-matrix */ |
205 |
|
|
val = (char *)airFree(val); |
206 |
|
|
sprintf(key, tenDWMRINexKeyFmt, dwiIdx); |
207 |
|
|
val = nrrdKeyValueGet(ndwi, key); |
208 |
|
|
if (val) { |
209 |
|
|
biffAddf(TEN, "%s: can't have NEX of skipped DWI %u", me, skipIdx); |
210 |
|
|
airMopError(mop); return 1; |
211 |
|
|
} |
212 |
|
|
nexNum = 1; /* for "info +=" below */ |
213 |
|
|
} else { |
214 |
|
|
/* we probably do have sensible gradient or B-matrix info */ |
215 |
|
|
parsedNum = airParseStrD(info, val, AIR_WHITESPACE, valNum); |
216 |
|
|
if (valNum != parsedNum) { |
217 |
|
|
biffAddf(TEN, "%s: couldn't parse %d floats in value \"%s\" " |
218 |
|
|
"for key \"%s\" (only got %d)", |
219 |
|
|
me, valNum, val, key, parsedNum); |
220 |
|
|
airMopError(mop); return 1; |
221 |
|
|
} |
222 |
|
|
val = (char *)airFree(val); |
223 |
|
|
sprintf(key, tenDWMRINexKeyFmt, dwiIdx); |
224 |
|
|
val = nrrdKeyValueGet(ndwi, key); |
225 |
|
|
if (!val) { |
226 |
|
|
/* there is no NEX indicated */ |
227 |
|
|
nexNum = 1; |
228 |
|
|
} else { |
229 |
|
|
if (1 != sscanf(val, "%u", &nexNum)) { |
230 |
|
|
biffAddf(TEN, "%s: couldn't parse integer in value \"%s\" " |
231 |
|
|
"for key \"%s\"", me, val, key); |
232 |
|
|
airMopError(mop); return 1; |
233 |
|
|
} |
234 |
|
|
val = (char *)airFree(val); |
235 |
|
|
if (!( nexNum >= 1 )) { |
236 |
|
|
biffAddf(TEN, "%s: NEX (%d) for DWI %d not >= 1", |
237 |
|
|
me, nexNum, dwiIdx); |
238 |
|
|
airMopError(mop); return 1; |
239 |
|
|
} |
240 |
|
|
if (!( dwiIdx + nexNum - 1 < dwiNum )) { |
241 |
|
|
biffAddf(TEN, "%s: NEX %d for DWI %d implies %d DWI > real # DWI %d", |
242 |
|
|
me, nexNum, dwiIdx, dwiIdx + nexNum, dwiNum); |
243 |
|
|
airMopError(mop); return 1; |
244 |
|
|
} |
245 |
|
|
for (nexIdx=1; nexIdx<nexNum; nexIdx++) { |
246 |
|
|
sprintf(key, keyFmt, dwiIdx+nexIdx); |
247 |
|
|
val = nrrdKeyValueGet(ndwi, key); |
248 |
|
|
if (val) { |
249 |
|
|
val = (char *)airFree(val); |
250 |
|
|
biffAddf(TEN, "%s: shouldn't have key \"%s\" with " |
251 |
|
|
"NEX %d for DWI %d", me, key, nexNum, dwiIdx); |
252 |
|
|
airMopError(mop); return 1; |
253 |
|
|
} |
254 |
|
|
for (valIdx=0; valIdx<valNum; valIdx++) { |
255 |
|
|
info[valIdx + valNum*nexIdx] = info[valIdx]; |
256 |
|
|
} |
257 |
|
|
} |
258 |
|
|
dwiIdx += nexNum-1; |
259 |
|
|
} |
260 |
|
|
} |
261 |
|
|
info += valNum*nexNum; |
262 |
|
|
} |
263 |
|
|
|
264 |
|
|
/* perhaps too paranoid: see if there are extra keys, |
265 |
|
|
which probably implies confusion/mismatch between number of |
266 |
|
|
gradients and number of values */ |
267 |
|
|
sprintf(key, keyFmt, dwiIdx); |
268 |
|
|
val = nrrdKeyValueGet(ndwi, key); |
269 |
|
|
if (val) { |
270 |
|
|
biffAddf(TEN, "%s: saw \"%s\" key, more than required %u keys, " |
271 |
|
|
"likely mismatch between keys and actual gradients", |
272 |
|
|
me, key, dwiIdx); |
273 |
|
|
airMopError(mop); return 1; |
274 |
|
|
} |
275 |
|
|
|
276 |
|
|
/* second pass: see which ones are skipped, even though gradient/B-matrix |
277 |
|
|
information has been specified */ |
278 |
|
|
for (dwiIdx=0; dwiIdx<dwiNum; dwiIdx++) { |
279 |
|
|
sprintf(key, tenDWMRISkipKeyFmt, dwiIdx); |
280 |
|
|
val = nrrdKeyValueGet(ndwi, key); |
281 |
|
|
if (val) { |
282 |
|
|
airToLower(val); |
283 |
|
|
if (!strncmp("true", val + strspn(val, AIR_WHITESPACE), |
284 |
|
|
strlen("true"))) { |
285 |
|
|
skipIdx = airArrayLenIncr(skipArr, 1); |
286 |
|
|
(*skipP)[skipIdx] = dwiIdx; |
287 |
|
|
skipLut[dwiIdx] = AIR_TRUE; |
288 |
|
|
} |
289 |
|
|
} |
290 |
|
|
} |
291 |
|
|
|
292 |
|
|
/* normalize so that maximal norm is 1.0 */ |
293 |
|
|
/* Thu Dec 20 03:25:20 CST 2012 this rescaling IS in fact what is |
294 |
|
|
causing the small discrepency between ngrad before and after |
295 |
|
|
saving to KVPs. The problem is not related to how the gradient |
296 |
|
|
vector coefficients are recovered from the string-based |
297 |
|
|
representation; that is likely bit-for-bit correct. The problem |
298 |
|
|
is when everything is rescaled by 1.0/normMax: a "normalized" |
299 |
|
|
vector will not have *exactly* length 1.0. So what can be done |
300 |
|
|
to prevent pointlessly altering the lengths of vectors that were |
301 |
|
|
close enough to unit-length? Is there some more 754-savvy |
302 |
|
|
way of doing this normalization? */ |
303 |
|
|
normMax = 0; |
304 |
|
|
info = (double *)(ninfo->data); |
305 |
|
|
for (dwiIdx=0; dwiIdx<dwiNum; dwiIdx++) { |
306 |
|
|
if (!skipLut[dwiIdx]) { |
307 |
|
|
if (3 == valNum) { |
308 |
|
|
norm = ELL_3V_LEN(info); |
309 |
|
|
} else { |
310 |
|
|
norm = sqrt(info[0]*info[0] + 2*info[1]*info[1] + 2*info[2]*info[2] |
311 |
|
|
+ info[3]*info[3] + 2*info[4]*info[4] |
312 |
|
|
+ info[5]*info[5]); |
313 |
|
|
} |
314 |
|
|
normMax = AIR_MAX(normMax, norm); |
315 |
|
|
} |
316 |
|
|
info += valNum; |
317 |
|
|
} |
318 |
|
|
info = (double *)(ninfo->data); |
319 |
|
|
for (dwiIdx=0; dwiIdx<dwiNum; dwiIdx++) { |
320 |
|
|
if (!skipLut[dwiIdx]) { |
321 |
|
|
if (3 == valNum) { |
322 |
|
|
ELL_3V_SCALE(info, 1.0/normMax, info); |
323 |
|
|
} else { |
324 |
|
|
ELL_6V_SCALE(info, 1.0/normMax, info); |
325 |
|
|
} |
326 |
|
|
} |
327 |
|
|
info += valNum; |
328 |
|
|
} |
329 |
|
|
|
330 |
|
|
airMopOkay(mop); |
331 |
|
|
return 0; |
332 |
|
|
} |
333 |
|
|
|
334 |
|
|
/* |
335 |
|
|
******** tenBMatrixCalc |
336 |
|
|
** |
337 |
|
|
** given a list of gradient directions (arbitrary type), contructs the |
338 |
|
|
** B-matrix that records how each coefficient of the diffusion tensor |
339 |
|
|
** is weighted in the diffusion weighted images. Matrix will be a |
340 |
|
|
** 6-by-N 2D array of doubles. |
341 |
|
|
** |
342 |
|
|
** NOTE 1: The ordering of the elements in each row is (like the ordering |
343 |
|
|
** of the tensor elements in all of ten): |
344 |
|
|
** |
345 |
|
|
** Bxx Bxy Bxz Byy Byz Bzz |
346 |
|
|
** |
347 |
|
|
** NOTE 2: The off-diagonal elements are NOT pre-multiplied by two. |
348 |
|
|
*/ |
349 |
|
|
int |
350 |
|
|
tenBMatrixCalc(Nrrd *nbmat, const Nrrd *_ngrad) { |
351 |
|
|
static const char me[]="tenBMatrixCalc"; |
352 |
|
|
Nrrd *ngrad; |
353 |
|
|
double *bmat, *G; |
354 |
|
|
int DD, dd; |
355 |
|
|
airArray *mop; |
356 |
|
|
|
357 |
|
|
if (!(nbmat && _ngrad && !tenGradientCheck(_ngrad, nrrdTypeDefault, 1))) { |
358 |
|
|
biffAddf(TEN, "%s: got NULL pointer or invalid arg", me); |
359 |
|
|
return 1; |
360 |
|
|
} |
361 |
|
|
mop = airMopNew(); |
362 |
|
|
airMopAdd(mop, ngrad=nrrdNew(), (airMopper)nrrdNuke, airMopAlways); |
363 |
|
|
if (nrrdConvert(ngrad, _ngrad, nrrdTypeDouble) |
364 |
|
|
|| nrrdMaybeAlloc_va(nbmat, nrrdTypeDouble, 2, |
365 |
|
|
AIR_CAST(size_t, 6), ngrad->axis[1].size)) { |
366 |
|
|
biffMovef(TEN, NRRD, "%s: trouble", me); |
367 |
|
|
airMopError(mop); return 1; |
368 |
|
|
} |
369 |
|
|
|
370 |
|
|
DD = ngrad->axis[1].size; |
371 |
|
|
G = (double*)(ngrad->data); |
372 |
|
|
bmat = (double*)(nbmat->data); |
373 |
|
|
for (dd=0; dd<DD; dd++) { |
374 |
|
|
ELL_6V_SET(bmat, |
375 |
|
|
G[0]*G[0], G[0]*G[1], G[0]*G[2], |
376 |
|
|
G[1]*G[1], G[1]*G[2], |
377 |
|
|
G[2]*G[2]); |
378 |
|
|
G += 3; |
379 |
|
|
bmat += 6; |
380 |
|
|
} |
381 |
|
|
nbmat->axis[0].kind = nrrdKind3DSymMatrix; |
382 |
|
|
|
383 |
|
|
airMopOkay(mop); |
384 |
|
|
return 0; |
385 |
|
|
} |
386 |
|
|
|
387 |
|
|
/* |
388 |
|
|
******** tenEMatrixCalc |
389 |
|
|
** |
390 |
|
|
*/ |
391 |
|
|
int |
392 |
|
|
tenEMatrixCalc(Nrrd *nemat, const Nrrd *_nbmat, int knownB0) { |
393 |
|
|
static const char me[]="tenEMatrixCalc"; |
394 |
|
|
Nrrd *nbmat, *ntmp; |
395 |
|
|
airArray *mop; |
396 |
|
|
ptrdiff_t padmin[2], padmax[2]; |
397 |
|
|
unsigned int ri; |
398 |
|
|
double *bmat; |
399 |
|
|
|
400 |
|
|
if (!(nemat && _nbmat)) { |
401 |
|
|
biffAddf(TEN, "%s: got NULL pointer", me); |
402 |
|
|
return 1; |
403 |
|
|
} |
404 |
|
|
if (tenBMatrixCheck(_nbmat, nrrdTypeDefault, 6)) { |
405 |
|
|
biffAddf(TEN, "%s: problem with B matrix", me); |
406 |
|
|
return 1; |
407 |
|
|
} |
408 |
|
|
mop = airMopNew(); |
409 |
|
|
airMopAdd(mop, nbmat=nrrdNew(), (airMopper)nrrdNuke, airMopAlways); |
410 |
|
|
if (knownB0) { |
411 |
|
|
if (nrrdConvert(nbmat, _nbmat, nrrdTypeDouble)) { |
412 |
|
|
biffMovef(TEN, NRRD, "%s: couldn't convert given bmat to doubles", me); |
413 |
|
|
airMopError(mop); return 1; |
414 |
|
|
} |
415 |
|
|
} else { |
416 |
|
|
airMopAdd(mop, ntmp=nrrdNew(), (airMopper)nrrdNuke, airMopAlways); |
417 |
|
|
if (nrrdConvert(ntmp, _nbmat, nrrdTypeDouble)) { |
418 |
|
|
biffMovef(TEN, NRRD, "%s: couldn't convert given bmat to doubles", me); |
419 |
|
|
airMopError(mop); return 1; |
420 |
|
|
} |
421 |
|
|
ELL_2V_SET(padmin, 0, 0); |
422 |
|
|
ELL_2V_SET(padmax, 6, _nbmat->axis[1].size-1); |
423 |
|
|
if (nrrdPad_nva(nbmat, ntmp, padmin, padmax, nrrdBoundaryPad, -1)) { |
424 |
|
|
biffMovef(TEN, NRRD, "%s: couldn't pad given bmat", me); |
425 |
|
|
airMopError(mop); return 1; |
426 |
|
|
} |
427 |
|
|
} |
428 |
|
|
bmat = (double*)(nbmat->data); |
429 |
|
|
/* HERE is where the off-diagonal elements get multiplied by 2 */ |
430 |
|
|
for (ri=0; ri<nbmat->axis[1].size; ri++) { |
431 |
|
|
bmat[1] *= 2; |
432 |
|
|
bmat[2] *= 2; |
433 |
|
|
bmat[4] *= 2; |
434 |
|
|
bmat += nbmat->axis[0].size; |
435 |
|
|
} |
436 |
|
|
if (ell_Nm_pseudo_inv(nemat, nbmat)) { |
437 |
|
|
biffMovef(TEN, ELL, "%s: trouble pseudo-inverting B-matrix", me); |
438 |
|
|
airMopError(mop); return 1; |
439 |
|
|
} |
440 |
|
|
airMopOkay(mop); |
441 |
|
|
return 0; |
442 |
|
|
} |
443 |
|
|
|
444 |
|
|
/* |
445 |
|
|
******** tenEstimateLinearSingle_d |
446 |
|
|
** |
447 |
|
|
** estimate one single tensor |
448 |
|
|
** |
449 |
|
|
** !! requires being passed a pre-allocated double array "vbuf" which is |
450 |
|
|
** !! used for intermediate calculations (details below) |
451 |
|
|
** |
452 |
|
|
** DD is always the length of the dwi[] array |
453 |
|
|
** |
454 |
|
|
** -------------- IF knownB0 ------------------------- |
455 |
|
|
** input: |
456 |
|
|
** dwi[0] is the B0 image, dwi[1]..dwi[DD-1] are the (DD-1) DWI values, |
457 |
|
|
** emat is the (DD-1)-by-6 estimation matrix, which is the pseudo-inverse |
458 |
|
|
** of the B-matrix (after the off-diagonals have been multiplied by 2). |
459 |
|
|
** vbuf[] is allocated for (at least) DD-1 doubles (DD is fine) |
460 |
|
|
** |
461 |
|
|
** output: |
462 |
|
|
** ten[0]..ten[6] will be the confidence value followed by the tensor |
463 |
|
|
** if B0P, then *B0P is set to the B0 value used in calcs: max(b0,1) |
464 |
|
|
** -------------- IF !knownB0 ------------------------- |
465 |
|
|
** input: |
466 |
|
|
** dwi[0]..dwi[DD-1] are the DD DWI values, emat is the DD-by-7 estimation |
467 |
|
|
** matrix. The 7th column is for estimating the B0 image. |
468 |
|
|
** vbuf[] is allocated for DD doubles |
469 |
|
|
** |
470 |
|
|
** output: |
471 |
|
|
** ten[0]..ten[6] will be the confidence value followed by the tensor |
472 |
|
|
** if B0P, then *B0P is set to estimated B0 value. |
473 |
|
|
** ---------------------------------------------------- |
474 |
|
|
*/ |
475 |
|
|
void |
476 |
|
|
tenEstimateLinearSingle_d(double *ten, double *B0P, /* output */ |
477 |
|
|
const double *dwi, const double *emat, /* input .. */ |
478 |
|
|
double *vbuf, unsigned int DD, int knownB0, |
479 |
|
|
double thresh, double soft, double b) { |
480 |
|
|
double logB0, tmp, mean; |
481 |
|
|
unsigned int ii, jj; |
482 |
|
|
/* static const char me[]="tenEstimateLinearSingle_d"; */ |
483 |
|
|
|
484 |
|
|
if (knownB0) { |
485 |
|
|
if (B0P) { |
486 |
|
|
/* we save this as a courtesy */ |
487 |
|
|
*B0P = AIR_MAX(dwi[0], 1); |
488 |
|
|
} |
489 |
|
|
logB0 = log(AIR_MAX(dwi[0], 1)); |
490 |
|
|
mean = 0; |
491 |
|
|
for (ii=1; ii<DD; ii++) { |
492 |
|
|
tmp = AIR_MAX(dwi[ii], 1); |
493 |
|
|
mean += tmp; |
494 |
|
|
vbuf[ii-1] = (logB0 - log(tmp))/b; |
495 |
|
|
/* if (tenVerbose) { |
496 |
|
|
fprintf(stderr, "vbuf[%d] = %f\n", ii-1, vbuf[ii-1]); |
497 |
|
|
} */ |
498 |
|
|
} |
499 |
|
|
mean /= DD-1; |
500 |
|
|
if (soft) { |
501 |
|
|
ten[0] = AIR_AFFINE(-1, airErf((mean - thresh)/(soft + 0.000001)), 1, |
502 |
|
|
0, 1); |
503 |
|
|
} else { |
504 |
|
|
ten[0] = mean > thresh; |
505 |
|
|
} |
506 |
|
|
for (jj=0; jj<6; jj++) { |
507 |
|
|
tmp = 0; |
508 |
|
|
for (ii=0; ii<DD-1; ii++) { |
509 |
|
|
tmp += emat[ii + (DD-1)*jj]*vbuf[ii]; |
510 |
|
|
} |
511 |
|
|
ten[jj+1] = tmp; |
512 |
|
|
} |
513 |
|
|
} else { |
514 |
|
|
/* !knownB0 */ |
515 |
|
|
mean = 0; |
516 |
|
|
for (ii=0; ii<DD; ii++) { |
517 |
|
|
tmp = AIR_MAX(dwi[ii], 1); |
518 |
|
|
mean += tmp; |
519 |
|
|
vbuf[ii] = -log(tmp)/b; |
520 |
|
|
} |
521 |
|
|
mean /= DD; |
522 |
|
|
if (soft) { |
523 |
|
|
ten[0] = AIR_AFFINE(-1, airErf((mean - thresh)/(soft + 0.000001)), 1, |
524 |
|
|
0, 1); |
525 |
|
|
} else { |
526 |
|
|
ten[0] = mean > thresh; |
527 |
|
|
} |
528 |
|
|
for (jj=0; jj<7; jj++) { |
529 |
|
|
tmp = 0; |
530 |
|
|
for (ii=0; ii<DD; ii++) { |
531 |
|
|
tmp += emat[ii + DD*jj]*vbuf[ii]; |
532 |
|
|
} |
533 |
|
|
if (jj < 6) { |
534 |
|
|
ten[jj+1] = tmp; |
535 |
|
|
} else { |
536 |
|
|
/* we're on seventh row, for finding B0 */ |
537 |
|
|
if (B0P) { |
538 |
|
|
*B0P = exp(b*tmp); |
539 |
|
|
} |
540 |
|
|
} |
541 |
|
|
} |
542 |
|
|
} |
543 |
|
|
return; |
544 |
|
|
} |
545 |
|
|
|
546 |
|
|
void |
547 |
|
|
tenEstimateLinearSingle_f(float *_ten, float *_B0P, /* output */ |
548 |
|
|
const float *_dwi, const double *emat, /* input .. */ |
549 |
|
|
double *vbuf, unsigned int DD, int knownB0, |
550 |
|
|
float thresh, float soft, float b) { |
551 |
|
|
static const char me[]="tenEstimateLinearSingle_f"; |
552 |
|
|
#define DWI_NUM_MAX 256 |
553 |
|
|
double dwi[DWI_NUM_MAX], ten[7], B0; |
554 |
|
|
unsigned int dwiIdx; |
555 |
|
|
|
556 |
|
|
/* HEY: this is somewhat inelegant .. */ |
557 |
|
|
if (DD > DWI_NUM_MAX) { |
558 |
|
|
fprintf(stderr, "%s: PANIC: sorry, DD=%u > compile-time DWI_NUM_MAX=%u\n", |
559 |
|
|
me, DD, DWI_NUM_MAX); |
560 |
|
|
exit(1); |
561 |
|
|
} |
562 |
|
|
for (dwiIdx=0; dwiIdx<DD; dwiIdx++) { |
563 |
|
|
dwi[dwiIdx] = _dwi[dwiIdx]; |
564 |
|
|
} |
565 |
|
|
tenEstimateLinearSingle_d(ten, _B0P ? &B0 : NULL, |
566 |
|
|
dwi, emat, |
567 |
|
|
vbuf, DD, knownB0, |
568 |
|
|
thresh, soft, b); |
569 |
|
|
TEN_T_COPY_TT(_ten, float, ten); |
570 |
|
|
if (_B0P) { |
571 |
|
|
*_B0P = AIR_CAST(float, B0); |
572 |
|
|
} |
573 |
|
|
return; |
574 |
|
|
} |
575 |
|
|
|
576 |
|
|
/* |
577 |
|
|
******** tenEstimateLinear3D |
578 |
|
|
** |
579 |
|
|
** takes an array of DWIs (starting with the B=0 image), joins them up, |
580 |
|
|
** and passes it all off to tenEstimateLinear4D |
581 |
|
|
** |
582 |
|
|
** Note: this will copy per-axis peripheral information from _ndwi[0] |
583 |
|
|
*/ |
584 |
|
|
int |
585 |
|
|
tenEstimateLinear3D(Nrrd *nten, Nrrd **nterrP, Nrrd **nB0P, |
586 |
|
|
const Nrrd *const *_ndwi, unsigned int dwiLen, |
587 |
|
|
const Nrrd *_nbmat, int knownB0, |
588 |
|
|
double thresh, double soft, double b) { |
589 |
|
|
static const char me[]="tenEstimateLinear3D"; |
590 |
|
|
Nrrd *ndwi; |
591 |
|
|
airArray *mop; |
592 |
|
|
int amap[4] = {-1, 0, 1, 2}; |
593 |
|
|
|
594 |
|
|
if (!(_ndwi)) { |
595 |
|
|
biffAddf(TEN, "%s: got NULL pointer", me); |
596 |
|
|
return 1; |
597 |
|
|
} |
598 |
|
|
mop = airMopNew(); |
599 |
|
|
ndwi = nrrdNew(); |
600 |
|
|
airMopAdd(mop, ndwi, (airMopper)nrrdNuke, airMopAlways); |
601 |
|
|
if (nrrdJoin(ndwi, (const Nrrd*const*)_ndwi, dwiLen, 0, AIR_TRUE)) { |
602 |
|
|
biffMovef(TEN, NRRD, "%s: trouble joining inputs", me); |
603 |
|
|
airMopError(mop); return 1; |
604 |
|
|
} |
605 |
|
|
|
606 |
|
|
nrrdAxisInfoCopy(ndwi, _ndwi[0], amap, NRRD_AXIS_INFO_NONE); |
607 |
|
|
if (tenEstimateLinear4D(nten, nterrP, nB0P, |
608 |
|
|
ndwi, _nbmat, knownB0, thresh, soft, b)) { |
609 |
|
|
biffAddf(TEN, "%s: trouble", me); |
610 |
|
|
airMopError(mop); return 1; |
611 |
|
|
} |
612 |
|
|
|
613 |
|
|
airMopOkay(mop); |
614 |
|
|
return 0; |
615 |
|
|
} |
616 |
|
|
|
617 |
|
|
/* |
618 |
|
|
******** tenEstimateLinear4D |
619 |
|
|
** |
620 |
|
|
** given a stack of DWI volumes (ndwi) and the imaging B-matrix used |
621 |
|
|
** for acquisiton (_nbmat), computes and stores diffusion tensors in |
622 |
|
|
** nten. |
623 |
|
|
** |
624 |
|
|
** The mean of the diffusion-weighted images is thresholded at "thresh" with |
625 |
|
|
** softness parameter "soft". |
626 |
|
|
** |
627 |
|
|
** This takes the B-matrix (weighting matrix), such as formed by tenBMatrix, |
628 |
|
|
** or from a more complete account of the gradients present in an imaging |
629 |
|
|
** sequence, and then does the pseudo inverse to get the estimation matrix |
630 |
|
|
*/ |
631 |
|
|
int |
632 |
|
|
tenEstimateLinear4D(Nrrd *nten, Nrrd **nterrP, Nrrd **nB0P, |
633 |
|
|
const Nrrd *ndwi, const Nrrd *_nbmat, int knownB0, |
634 |
|
|
double thresh, double soft, double b) { |
635 |
|
|
static const char me[]="tenEstimateLinear4D"; |
636 |
|
|
Nrrd *nemat, *nbmat, *ncrop, *nhist; |
637 |
|
|
airArray *mop; |
638 |
|
|
size_t cmin[4], cmax[4], sx, sy, sz, II, d, DD; |
639 |
|
|
int E, amap[4]; |
640 |
|
|
float *ten, *dwi1, *dwi2, *terr, |
641 |
|
|
_B0, *B0, (*lup)(const void *, size_t); |
642 |
|
|
double *emat, *bmat, *vbuf; |
643 |
|
|
NrrdRange *range; |
644 |
|
|
float te, d1, d2; |
645 |
|
|
char stmp[2][AIR_STRLEN_SMALL]; |
646 |
|
|
|
647 |
|
|
if (!(nten && ndwi && _nbmat)) { |
648 |
|
|
/* nerrP and _NB0P can be NULL */ |
649 |
|
|
biffAddf(TEN, "%s: got NULL pointer", me); |
650 |
|
|
return 1; |
651 |
|
|
} |
652 |
|
|
if (!( 4 == ndwi->dim && 7 <= ndwi->axis[0].size )) { |
653 |
|
|
biffAddf(TEN, "%s: dwi should be 4-D array with axis 0 size >= 7", me); |
654 |
|
|
return 1; |
655 |
|
|
} |
656 |
|
|
if (tenBMatrixCheck(_nbmat, nrrdTypeDefault, 6)) { |
657 |
|
|
biffAddf(TEN, "%s: problem with B matrix", me); |
658 |
|
|
return 1; |
659 |
|
|
} |
660 |
|
|
if (knownB0) { |
661 |
|
|
if (!( ndwi->axis[0].size == 1 + _nbmat->axis[1].size )) { |
662 |
|
|
biffAddf(TEN, "%s: (knownB0 == true) # input images (%s) " |
663 |
|
|
"!= 1 + # B matrix rows (1+%s)", me, |
664 |
|
|
airSprintSize_t(stmp[0], ndwi->axis[0].size), |
665 |
|
|
airSprintSize_t(stmp[1], _nbmat->axis[1].size)); |
666 |
|
|
return 1; |
667 |
|
|
} |
668 |
|
|
} else { |
669 |
|
|
if (!( ndwi->axis[0].size == _nbmat->axis[1].size )) { |
670 |
|
|
biffAddf(TEN, "%s: (knownB0 == false) # dwi (%s) " |
671 |
|
|
"!= # B matrix rows (%s)", me, |
672 |
|
|
airSprintSize_t(stmp[0], ndwi->axis[0].size), |
673 |
|
|
airSprintSize_t(stmp[1], _nbmat->axis[1].size)); |
674 |
|
|
return 1; |
675 |
|
|
} |
676 |
|
|
} |
677 |
|
|
|
678 |
|
|
DD = ndwi->axis[0].size; |
679 |
|
|
sx = ndwi->axis[1].size; |
680 |
|
|
sy = ndwi->axis[2].size; |
681 |
|
|
sz = ndwi->axis[3].size; |
682 |
|
|
mop = airMopNew(); |
683 |
|
|
airMopAdd(mop, nbmat=nrrdNew(), (airMopper)nrrdNuke, airMopAlways); |
684 |
|
|
if (nrrdConvert(nbmat, _nbmat, nrrdTypeDouble)) { |
685 |
|
|
biffMovef(TEN, NRRD, "%s: trouble converting to doubles", me); |
686 |
|
|
airMopError(mop); return 1; |
687 |
|
|
} |
688 |
|
|
airMopAdd(mop, nemat=nrrdNew(), (airMopper)nrrdNuke, airMopAlways); |
689 |
|
|
if (tenEMatrixCalc(nemat, nbmat, knownB0)) { |
690 |
|
|
biffAddf(TEN, "%s: trouble computing estimation matrix", me); |
691 |
|
|
airMopError(mop); return 1; |
692 |
|
|
} |
693 |
|
|
vbuf = AIR_CALLOC(knownB0 ? DD-1 : DD, double); |
694 |
|
|
dwi1 = AIR_CALLOC(DD, float); |
695 |
|
|
dwi2 = AIR_CALLOC(knownB0 ? DD : DD+1, float); |
696 |
|
|
airMopAdd(mop, vbuf, airFree, airMopAlways); |
697 |
|
|
airMopAdd(mop, dwi1, airFree, airMopAlways); |
698 |
|
|
airMopAdd(mop, dwi2, airFree, airMopAlways); |
699 |
|
|
if (!(vbuf && dwi1 && dwi2)) { |
700 |
|
|
biffAddf(TEN, "%s: couldn't allocate temp buffers", me); |
701 |
|
|
airMopError(mop); return 1; |
702 |
|
|
} |
703 |
|
|
if (!AIR_EXISTS(thresh)) { |
704 |
|
|
airMopAdd(mop, ncrop=nrrdNew(), (airMopper)nrrdNuke, airMopAlways); |
705 |
|
|
airMopAdd(mop, nhist=nrrdNew(), (airMopper)nrrdNuke, airMopAlways); |
706 |
|
|
ELL_4V_SET(cmin, knownB0 ? 1 : 0, 0, 0, 0); |
707 |
|
|
ELL_4V_SET(cmax, DD-1, sx-1, sy-1, sz-1); |
708 |
|
|
E = 0; |
709 |
|
|
if (!E) E |= nrrdCrop(ncrop, ndwi, cmin, cmax); |
710 |
|
|
if (!E) range = nrrdRangeNewSet(ncrop, nrrdBlind8BitRangeState); |
711 |
|
|
if (!E) airMopAdd(mop, range, (airMopper)nrrdRangeNix, airMopAlways); |
712 |
|
|
if (!E) E |= nrrdHisto(nhist, ncrop, range, NULL, |
713 |
|
|
(int)AIR_MIN(1024, range->max - range->min + 1), |
714 |
|
|
nrrdTypeFloat); |
715 |
|
|
if (E) { |
716 |
|
|
biffMovef(TEN, NRRD, |
717 |
|
|
"%s: trouble histograming to find DW threshold", me); |
718 |
|
|
airMopError(mop); return 1; |
719 |
|
|
} |
720 |
|
|
if (_tenFindValley(&thresh, nhist, 0.75, AIR_FALSE)) { |
721 |
|
|
biffAddf(TEN, "%s: problem finding DW histogram valley", me); |
722 |
|
|
airMopError(mop); return 1; |
723 |
|
|
} |
724 |
|
|
fprintf(stderr, "%s: using %g for DW confidence threshold\n", me, thresh); |
725 |
|
|
} |
726 |
|
|
if (nrrdMaybeAlloc_va(nten, nrrdTypeFloat, 4, |
727 |
|
|
AIR_CAST(size_t, 7), sx, sy, sz)) { |
728 |
|
|
biffMovef(TEN, NRRD, "%s: couldn't allocate output", me); |
729 |
|
|
airMopError(mop); return 1; |
730 |
|
|
} |
731 |
|
|
if (nterrP) { |
732 |
|
|
if (!(*nterrP)) { |
733 |
|
|
*nterrP = nrrdNew(); |
734 |
|
|
} |
735 |
|
|
if (nrrdMaybeAlloc_va(*nterrP, nrrdTypeFloat, 3, |
736 |
|
|
sx, sy, sz)) { |
737 |
|
|
biffAddf(NRRD, "%s: couldn't allocate error output", me); |
738 |
|
|
airMopError(mop); return 1; |
739 |
|
|
} |
740 |
|
|
airMopAdd(mop, nterrP, (airMopper)airSetNull, airMopOnError); |
741 |
|
|
airMopAdd(mop, *nterrP, (airMopper)nrrdNuke, airMopOnError); |
742 |
|
|
terr = (float*)((*nterrP)->data); |
743 |
|
|
} else { |
744 |
|
|
terr = NULL; |
745 |
|
|
} |
746 |
|
|
if (nB0P) { |
747 |
|
|
if (!(*nB0P)) { |
748 |
|
|
*nB0P = nrrdNew(); |
749 |
|
|
} |
750 |
|
|
if (nrrdMaybeAlloc_va(*nB0P, nrrdTypeFloat, 3, |
751 |
|
|
sx, sy, sz)) { |
752 |
|
|
biffAddf(NRRD, "%s: couldn't allocate error output", me); |
753 |
|
|
airMopError(mop); return 1; |
754 |
|
|
} |
755 |
|
|
airMopAdd(mop, nB0P, (airMopper)airSetNull, airMopOnError); |
756 |
|
|
airMopAdd(mop, *nB0P, (airMopper)nrrdNuke, airMopOnError); |
757 |
|
|
B0 = (float*)((*nB0P)->data); |
758 |
|
|
} else { |
759 |
|
|
B0 = NULL; |
760 |
|
|
} |
761 |
|
|
bmat = (double*)(nbmat->data); |
762 |
|
|
emat = (double*)(nemat->data); |
763 |
|
|
ten = (float*)(nten->data); |
764 |
|
|
lup = nrrdFLookup[ndwi->type]; |
765 |
|
|
for (II=0; II<sx*sy*sz; II++) { |
766 |
|
|
/* tenVerbose = (II == 42 + 190*(96 + 196*0)); */ |
767 |
|
|
for (d=0; d<DD; d++) { |
768 |
|
|
dwi1[d] = lup(ndwi->data, d + DD*II); |
769 |
|
|
/* if (tenVerbose) { |
770 |
|
|
fprintf(stderr, "%s: input dwi1[%d] = %g\n", me, d, dwi1[d]); |
771 |
|
|
} */ |
772 |
|
|
} |
773 |
|
|
tenEstimateLinearSingle_f(ten, &_B0, dwi1, emat, |
774 |
|
|
vbuf, DD, knownB0, |
775 |
|
|
AIR_CAST(float, thresh), |
776 |
|
|
AIR_CAST(float, soft), |
777 |
|
|
AIR_CAST(float, b)); |
778 |
|
|
if (nB0P) { |
779 |
|
|
*B0 = _B0; |
780 |
|
|
} |
781 |
|
|
/* if (tenVerbose) { |
782 |
|
|
fprintf(stderr, "%s: output ten = (%g) %g,%g,%g %g,%g %g\n", me, |
783 |
|
|
ten[0], ten[1], ten[2], ten[3], ten[4], ten[5], ten[6]); |
784 |
|
|
} */ |
785 |
|
|
if (nterrP) { |
786 |
|
|
te = 0; |
787 |
|
|
if (knownB0) { |
788 |
|
|
tenSimulateSingle_f(dwi2, _B0, ten, bmat, DD, AIR_CAST(float, b)); |
789 |
|
|
for (d=1; d<DD; d++) { |
790 |
|
|
d1 = AIR_MAX(dwi1[d], 1); |
791 |
|
|
d2 = AIR_MAX(dwi2[d], 1); |
792 |
|
|
te += (d1 - d2)*(d1 - d2); |
793 |
|
|
} |
794 |
|
|
te /= (DD-1); |
795 |
|
|
} else { |
796 |
|
|
tenSimulateSingle_f(dwi2, _B0, ten, bmat, DD+1, AIR_CAST(float, b)); |
797 |
|
|
for (d=0; d<DD; d++) { |
798 |
|
|
d1 = AIR_MAX(dwi1[d], 1); |
799 |
|
|
/* tenSimulateSingle_f always puts the B0 in the beginning of |
800 |
|
|
the dwi vector, but in this case we didn't have it in |
801 |
|
|
the input dwi vector */ |
802 |
|
|
d2 = AIR_MAX(dwi2[d+1], 1); |
803 |
|
|
te += (d1 - d2)*(d1 - d2); |
804 |
|
|
} |
805 |
|
|
te /= DD; |
806 |
|
|
} |
807 |
|
|
*terr = AIR_CAST(float, sqrt(te)); |
808 |
|
|
terr += 1; |
809 |
|
|
} |
810 |
|
|
ten += 7; |
811 |
|
|
if (B0) { |
812 |
|
|
B0 += 1; |
813 |
|
|
} |
814 |
|
|
} |
815 |
|
|
/* not our job: tenEigenvalueClamp(nten, nten, 0, AIR_NAN); */ |
816 |
|
|
|
817 |
|
|
ELL_4V_SET(amap, -1, 1, 2, 3); |
818 |
|
|
nrrdAxisInfoCopy(nten, ndwi, amap, NRRD_AXIS_INFO_NONE); |
819 |
|
|
nten->axis[0].kind = nrrdKind3DMaskedSymMatrix; |
820 |
|
|
if (nrrdBasicInfoCopy(nten, ndwi, |
821 |
|
|
NRRD_BASIC_INFO_ALL ^ NRRD_BASIC_INFO_SPACE)) { |
822 |
|
|
biffAddf(NRRD, "%s:", me); |
823 |
|
|
return 1; |
824 |
|
|
} |
825 |
|
|
|
826 |
|
|
airMopOkay(mop); |
827 |
|
|
return 0; |
828 |
|
|
} |
829 |
|
|
|
830 |
|
|
/* |
831 |
|
|
******** tenSimulateSingle_f |
832 |
|
|
** |
833 |
|
|
** given a tensor, simulate the set of diffusion weighted measurements |
834 |
|
|
** represented by the given B matrix |
835 |
|
|
** |
836 |
|
|
** NOTE: the mindset of this function is very much "knownB0==true": |
837 |
|
|
** B0 is required as an argument (and its always copied to dwi[0]), |
838 |
|
|
** and the given bmat is assumed to have DD-1 rows (similar to how |
839 |
|
|
** tenEstimateLinearSingle_f() is set up), and dwi[1] through dwi[DD-1] |
840 |
|
|
** are set to the calculated DWIs. |
841 |
|
|
** |
842 |
|
|
** So: dwi must be allocated for DD values total |
843 |
|
|
*/ |
844 |
|
|
void |
845 |
|
|
tenSimulateSingle_f(float *dwi, |
846 |
|
|
float B0, const float *ten, const double *bmat, |
847 |
|
|
unsigned int DD, float b) { |
848 |
|
|
double vv; |
849 |
|
|
/* this is how we multiply the off-diagonal entries by 2 */ |
850 |
|
|
double matwght[6] = {1, 2, 2, 1, 2, 1}; |
851 |
|
|
unsigned int ii, jj; |
852 |
|
|
|
853 |
|
|
dwi[0] = B0; |
854 |
|
|
/* if (tenVerbose) { |
855 |
|
|
fprintf(stderr, "ten = %g,%g,%g %g,%g %g\n", |
856 |
|
|
ten[1], ten[2], ten[3], ten[4], ten[5], ten[6]); |
857 |
|
|
} */ |
858 |
|
|
for (ii=0; ii<DD-1; ii++) { |
859 |
|
|
vv = 0; |
860 |
|
|
for (jj=0; jj<6; jj++) { |
861 |
|
|
vv += matwght[jj]*bmat[jj + 6*ii]*ten[jj+1]; |
862 |
|
|
} |
863 |
|
|
dwi[ii+1] = AIR_CAST(float, AIR_MAX(B0, 1)*exp(-b*vv)); |
864 |
|
|
/* if (tenVerbose) { |
865 |
|
|
fprintf(stderr, "v[%d] = %g --> dwi = %g\n", ii, vv, dwi[ii+1]); |
866 |
|
|
} */ |
867 |
|
|
} |
868 |
|
|
|
869 |
|
|
return; |
870 |
|
|
} |
871 |
|
|
|
872 |
|
|
int |
873 |
|
|
tenSimulate(Nrrd *ndwi, const Nrrd *nT2, const Nrrd *nten, |
874 |
|
|
const Nrrd *_nbmat, double b) { |
875 |
|
|
static const char me[]="tenSimulate"; |
876 |
|
|
size_t II; |
877 |
|
|
Nrrd *nbmat; |
878 |
|
|
size_t DD, sx, sy, sz; |
879 |
|
|
airArray *mop; |
880 |
|
|
double *bmat; |
881 |
|
|
float *dwi, *ten, (*lup)(const void *, size_t I); |
882 |
|
|
char stmp[6][AIR_STRLEN_SMALL]; |
883 |
|
|
|
884 |
|
|
if (!ndwi || !nT2 || !nten || !_nbmat |
885 |
|
|
|| tenTensorCheck(nten, nrrdTypeFloat, AIR_TRUE, AIR_TRUE) |
886 |
|
|
|| tenBMatrixCheck(_nbmat, nrrdTypeDefault, 6)) { |
887 |
|
|
biffAddf(TEN, "%s: got NULL pointer or invalid args", me); |
888 |
|
|
return 1; |
889 |
|
|
} |
890 |
|
|
mop = airMopNew(); |
891 |
|
|
airMopAdd(mop, nbmat=nrrdNew(), (airMopper)nrrdNuke, airMopAlways); |
892 |
|
|
if (nrrdConvert(nbmat, _nbmat, nrrdTypeDouble)) { |
893 |
|
|
biffMovef(TEN, NRRD, "%s: couldn't convert B matrix", me); |
894 |
|
|
return 1; |
895 |
|
|
} |
896 |
|
|
|
897 |
|
|
DD = nbmat->axis[1].size+1; |
898 |
|
|
sx = nT2->axis[0].size; |
899 |
|
|
sy = nT2->axis[1].size; |
900 |
|
|
sz = nT2->axis[2].size; |
901 |
|
|
if (!(3 == nT2->dim |
902 |
|
|
&& sx == nten->axis[1].size |
903 |
|
|
&& sy == nten->axis[2].size |
904 |
|
|
&& sz == nten->axis[3].size)) { |
905 |
|
|
biffAddf(TEN, "%s: dimensions of %u-D T2 volume (%s,%s,%s) " |
906 |
|
|
"don't match tensor volume (%s,%s,%s)", me, nT2->dim, |
907 |
|
|
airSprintSize_t(stmp[0], sx), |
908 |
|
|
airSprintSize_t(stmp[1], sy), |
909 |
|
|
airSprintSize_t(stmp[2], sz), |
910 |
|
|
airSprintSize_t(stmp[3], nten->axis[1].size), |
911 |
|
|
airSprintSize_t(stmp[4], nten->axis[2].size), |
912 |
|
|
airSprintSize_t(stmp[5], nten->axis[3].size)); |
913 |
|
|
return 1; |
914 |
|
|
} |
915 |
|
|
if (nrrdMaybeAlloc_va(ndwi, nrrdTypeFloat, 4, |
916 |
|
|
AIR_CAST(size_t, DD), sx, sy, sz)) { |
917 |
|
|
biffMovef(TEN, NRRD, "%s: couldn't allocate output", me); |
918 |
|
|
return 1; |
919 |
|
|
} |
920 |
|
|
dwi = (float*)(ndwi->data); |
921 |
|
|
ten = (float*)(nten->data); |
922 |
|
|
bmat = (double*)(nbmat->data); |
923 |
|
|
lup = nrrdFLookup[nT2->type]; |
924 |
|
|
for (II=0; II<(size_t)(sx*sy*sz); II++) { |
925 |
|
|
/* tenVerbose = (II == 42 + 190*(96 + 196*0)); */ |
926 |
|
|
tenSimulateSingle_f(dwi, lup(nT2->data, II), ten, bmat, DD, |
927 |
|
|
AIR_CAST(float, b)); |
928 |
|
|
dwi += DD; |
929 |
|
|
ten += 7; |
930 |
|
|
} |
931 |
|
|
|
932 |
|
|
airMopOkay(mop); |
933 |
|
|
return 0; |
934 |
|
|
} |
935 |
|
|
|
936 |
|
|
|
937 |
|
|
|
938 |
|
|
|
939 |
|
|
|
940 |
|
|
|
941 |
|
|
|
942 |
|
|
|
943 |
|
|
|
944 |
|
|
|
945 |
|
|
|
946 |
|
|
|
947 |
|
|
|
948 |
|
|
|
949 |
|
|
|
950 |
|
|
|
951 |
|
|
|
952 |
|
|
|
953 |
|
|
/* old stuff, prior to tenEstimationMatrix .. */ |
954 |
|
|
|
955 |
|
|
|
956 |
|
|
/* |
957 |
|
|
******** tenCalcOneTensor1 |
958 |
|
|
** |
959 |
|
|
** make one diffusion tensor from the measurements at one voxel, based |
960 |
|
|
** on the gradient directions used by Andy Alexander |
961 |
|
|
*/ |
962 |
|
|
void |
963 |
|
|
tenCalcOneTensor1(float tens[7], float chan[7], |
964 |
|
|
float thresh, float slope, float b) { |
965 |
|
|
double c[7], sum, d1, d2, d3, d4, d5, d6; |
966 |
|
|
|
967 |
|
|
c[0] = AIR_MAX(chan[0], 1); |
968 |
|
|
c[1] = AIR_MAX(chan[1], 1); |
969 |
|
|
c[2] = AIR_MAX(chan[2], 1); |
970 |
|
|
c[3] = AIR_MAX(chan[3], 1); |
971 |
|
|
c[4] = AIR_MAX(chan[4], 1); |
972 |
|
|
c[5] = AIR_MAX(chan[5], 1); |
973 |
|
|
c[6] = AIR_MAX(chan[6], 1); |
974 |
|
|
sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6]; |
975 |
|
|
tens[0] = AIR_CAST(float, (1 + airErf(slope*(sum - thresh)))/2.0); |
976 |
|
|
d1 = (log(c[0]) - log(c[1]))/b; |
977 |
|
|
d2 = (log(c[0]) - log(c[2]))/b; |
978 |
|
|
d3 = (log(c[0]) - log(c[3]))/b; |
979 |
|
|
d4 = (log(c[0]) - log(c[4]))/b; |
980 |
|
|
d5 = (log(c[0]) - log(c[5]))/b; |
981 |
|
|
d6 = (log(c[0]) - log(c[6]))/b; |
982 |
|
|
tens[1] = AIR_CAST(float, d1 + d2 - d3 - d4 + d5 + d6); /* Dxx */ |
983 |
|
|
tens[2] = AIR_CAST(float, d5 - d6); /* Dxy */ |
984 |
|
|
tens[3] = AIR_CAST(float, d1 - d2); /* Dxz */ |
985 |
|
|
tens[4] = AIR_CAST(float, -d1 - d2 + d3 + d4 + d5 + d6); /* Dyy */ |
986 |
|
|
tens[5] = AIR_CAST(float, d3 - d4); /* Dyz */ |
987 |
|
|
tens[6] = AIR_CAST(float, d1 + d2 + d3 + d4 - d5 - d6); /* Dzz */ |
988 |
|
|
return; |
989 |
|
|
} |
990 |
|
|
|
991 |
|
|
/* |
992 |
|
|
******** tenCalcOneTensor2 |
993 |
|
|
** |
994 |
|
|
** using gradient directions used by EK |
995 |
|
|
*/ |
996 |
|
|
void |
997 |
|
|
tenCalcOneTensor2(float tens[7], float chan[7], |
998 |
|
|
float thresh, float slope, float b) { |
999 |
|
|
double c[7], sum, d1, d2, d3, d4, d5, d6; |
1000 |
|
|
|
1001 |
|
|
c[0] = AIR_MAX(chan[0], 1); |
1002 |
|
|
c[1] = AIR_MAX(chan[1], 1); |
1003 |
|
|
c[2] = AIR_MAX(chan[2], 1); |
1004 |
|
|
c[3] = AIR_MAX(chan[3], 1); |
1005 |
|
|
c[4] = AIR_MAX(chan[4], 1); |
1006 |
|
|
c[5] = AIR_MAX(chan[5], 1); |
1007 |
|
|
c[6] = AIR_MAX(chan[6], 1); |
1008 |
|
|
sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6]; |
1009 |
|
|
tens[0] = AIR_CAST(float, (1 + airErf(slope*(sum - thresh)))/2.0); |
1010 |
|
|
d1 = (log(c[0]) - log(c[1]))/b; |
1011 |
|
|
d2 = (log(c[0]) - log(c[2]))/b; |
1012 |
|
|
d3 = (log(c[0]) - log(c[3]))/b; |
1013 |
|
|
d4 = (log(c[0]) - log(c[4]))/b; |
1014 |
|
|
d5 = (log(c[0]) - log(c[5]))/b; |
1015 |
|
|
d6 = (log(c[0]) - log(c[6]))/b; |
1016 |
|
|
tens[1] = AIR_CAST(float, d1); /* Dxx */ |
1017 |
|
|
tens[2] = AIR_CAST(float, d6 - (d1 + d2)/2); /* Dxy */ |
1018 |
|
|
tens[3] = AIR_CAST(float, d5 - (d1 + d3)/2); /* Dxz */ |
1019 |
|
|
tens[4] = AIR_CAST(float, d2); /* Dyy */ |
1020 |
|
|
tens[5] = AIR_CAST(float, d4 - (d2 + d3)/2); /* Dyz */ |
1021 |
|
|
tens[6] = AIR_CAST(float, d3); /* Dzz */ |
1022 |
|
|
return; |
1023 |
|
|
} |
1024 |
|
|
|
1025 |
|
|
/* |
1026 |
|
|
******** tenCalcTensor |
1027 |
|
|
** |
1028 |
|
|
** Calculate a volume of tensors from measured data |
1029 |
|
|
*/ |
1030 |
|
|
int |
1031 |
|
|
tenCalcTensor(Nrrd *nout, Nrrd *nin, int version, |
1032 |
|
|
float thresh, float slope, float b) { |
1033 |
|
|
static const char me[] = "tenCalcTensor"; |
1034 |
|
|
char cmt[128]; |
1035 |
|
|
float *out, tens[7], chan[7]; |
1036 |
|
|
size_t I, sx, sy, sz; |
1037 |
|
|
void (*calcten)(float tens[7], float chan[7], |
1038 |
|
|
float thresh, float slope, float b); |
1039 |
|
|
|
1040 |
|
|
if (!(nout && nin)) { |
1041 |
|
|
biffAddf(TEN, "%s: got NULL pointer", me); |
1042 |
|
|
return 1; |
1043 |
|
|
} |
1044 |
|
|
if (!( 1 == version || 2 == version )) { |
1045 |
|
|
biffAddf(TEN, "%s: version should be 1 or 2, not %d", me, version); |
1046 |
|
|
return 1; |
1047 |
|
|
} |
1048 |
|
|
switch (version) { |
1049 |
|
|
case 1: |
1050 |
|
|
calcten = tenCalcOneTensor1; |
1051 |
|
|
break; |
1052 |
|
|
case 2: |
1053 |
|
|
calcten = tenCalcOneTensor2; |
1054 |
|
|
break; |
1055 |
|
|
default: |
1056 |
|
|
biffAddf(TEN, "%s: PANIC, version = %d not handled", me, version); |
1057 |
|
|
return 1; |
1058 |
|
|
break; |
1059 |
|
|
} |
1060 |
|
|
if (tenTensorCheck(nin, nrrdTypeDefault, AIR_TRUE, AIR_TRUE)) { |
1061 |
|
|
biffAddf(TEN, "%s: wasn't given valid tensor nrrd", me); |
1062 |
|
|
return 1; |
1063 |
|
|
} |
1064 |
|
|
sx = nin->axis[1].size; |
1065 |
|
|
sy = nin->axis[2].size; |
1066 |
|
|
sz = nin->axis[3].size; |
1067 |
|
|
if (nrrdMaybeAlloc_va(nout, nrrdTypeFloat, 4, |
1068 |
|
|
AIR_CAST(size_t, 7), sx, sy, sz)) { |
1069 |
|
|
biffMovef(TEN, NRRD, "%s: couldn't alloc output", me); |
1070 |
|
|
return 1; |
1071 |
|
|
} |
1072 |
|
|
nout->axis[0].label = airStrdup("c,Dxx,Dxy,Dxz,Dyy,Dyz,Dzz"); |
1073 |
|
|
nout->axis[1].label = airStrdup("x"); |
1074 |
|
|
nout->axis[2].label = airStrdup("y"); |
1075 |
|
|
nout->axis[3].label = airStrdup("z"); |
1076 |
|
|
nout->axis[0].spacing = AIR_NAN; |
1077 |
|
|
if (AIR_EXISTS(nin->axis[1].spacing) && |
1078 |
|
|
AIR_EXISTS(nin->axis[2].spacing) && |
1079 |
|
|
AIR_EXISTS(nin->axis[3].spacing)) { |
1080 |
|
|
nout->axis[1].spacing = nin->axis[1].spacing; |
1081 |
|
|
nout->axis[2].spacing = nin->axis[2].spacing; |
1082 |
|
|
nout->axis[3].spacing = nin->axis[3].spacing; |
1083 |
|
|
} |
1084 |
|
|
else { |
1085 |
|
|
nout->axis[1].spacing = 1.0; |
1086 |
|
|
nout->axis[2].spacing = 1.0; |
1087 |
|
|
nout->axis[3].spacing = 1.0; |
1088 |
|
|
} |
1089 |
|
|
sprintf(cmt, "%s: using thresh = %g, slope = %g, b = %g\n", |
1090 |
|
|
me, thresh, slope, b); |
1091 |
|
|
nrrdCommentAdd(nout, cmt); |
1092 |
|
|
out = (float *)nout->data; |
1093 |
|
|
for (I=0; I<(size_t)(sx*sy*sz); I++) { |
1094 |
|
|
if (tenVerbose && !(I % (sx*sy))) { |
1095 |
|
|
fprintf(stderr, "%s: z = %d of %d\n", me, (int)(I/(sx*sy)), (int)sz-1); |
1096 |
|
|
} |
1097 |
|
|
chan[0] = nrrdFLookup[nin->type](nin->data, 0 + 7*I); |
1098 |
|
|
chan[1] = nrrdFLookup[nin->type](nin->data, 1 + 7*I); |
1099 |
|
|
chan[2] = nrrdFLookup[nin->type](nin->data, 2 + 7*I); |
1100 |
|
|
chan[3] = nrrdFLookup[nin->type](nin->data, 3 + 7*I); |
1101 |
|
|
chan[4] = nrrdFLookup[nin->type](nin->data, 4 + 7*I); |
1102 |
|
|
chan[5] = nrrdFLookup[nin->type](nin->data, 5 + 7*I); |
1103 |
|
|
chan[6] = nrrdFLookup[nin->type](nin->data, 6 + 7*I); |
1104 |
|
|
calcten(tens, chan, thresh, slope, b); |
1105 |
|
|
out[0 + 7*I] = tens[0]; |
1106 |
|
|
out[1 + 7*I] = tens[1]; |
1107 |
|
|
out[2 + 7*I] = tens[2]; |
1108 |
|
|
out[3 + 7*I] = tens[3]; |
1109 |
|
|
out[4 + 7*I] = tens[4]; |
1110 |
|
|
out[5 + 7*I] = tens[5]; |
1111 |
|
|
out[6 + 7*I] = tens[6]; |
1112 |
|
|
} |
1113 |
|
|
return 0; |
1114 |
|
|
} |
1115 |
|
|
|