File: | src/nrrd/resampleNrrd.c |
Location: | line 824, column 5 |
Description: | Value stored to 'inVec' is never read |
1 | /* |
2 | Teem: Tools to process and visualize scientific data and images . |
3 | Copyright (C) 2013, 2012, 2011, 2010, 2009 University of Chicago |
4 | Copyright (C) 2008, 2007, 2006, 2005 Gordon Kindlmann |
5 | Copyright (C) 2004, 2003, 2002, 2001, 2000, 1999, 1998 University of Utah |
6 | |
7 | This library is free software; you can redistribute it and/or |
8 | modify it under the terms of the GNU Lesser General Public License |
9 | (LGPL) as published by the Free Software Foundation; either |
10 | version 2.1 of the License, or (at your option) any later version. |
11 | The terms of redistributing and/or modifying this software also |
12 | include exceptions to the LGPL that facilitate static linking. |
13 | |
14 | This library is distributed in the hope that it will be useful, |
15 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
17 | Lesser General Public License for more details. |
18 | |
19 | You should have received a copy of the GNU Lesser General Public License |
20 | along with this library; if not, write to Free Software Foundation, Inc., |
21 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
22 | */ |
23 | |
24 | #include "nrrd.h" |
25 | #include "privateNrrd.h" |
26 | |
27 | /* |
28 | (this was written before airMopSub ... ) |
29 | learned: if you start using airMop stuff, and you register a free, but |
30 | then you free the memory yourself, YOU HAVE GOT TO register a NULL in |
31 | place of the original free. The next malloc may end up at the same |
32 | address as what you just freed, and if you want this memory to NOT be |
33 | mopped up, then you'll be confused with the original registered free |
34 | goes into effect and mops it up for you, even though YOU NEVER |
35 | REGISTERED a free for the second malloc. If you want simple stupid |
36 | tools, you have to treat them accordingly (be extremely careful with |
37 | fire). |
38 | |
39 | learned: well, duh. The reason to use: |
40 | |
41 | for (I=0; I<numOut; I++) { |
42 | |
43 | instead of |
44 | |
45 | for (I=0; I<=numOut-1; I++) { |
46 | |
47 | is that if numOut is of an unsigned type and has value 0, then these |
48 | two will have very different results! |
49 | |
50 | */ |
51 | |
52 | int |
53 | nrrdSimpleResample(Nrrd *nout, const Nrrd *nin, |
54 | const NrrdKernel *kernel, const double *parm, |
55 | const size_t *samples, const double *scalings) { |
56 | static const char me[]="nrrdSimpleResample"; |
57 | NrrdResampleInfo *info; |
58 | int p, np, center; |
59 | unsigned ai; |
60 | |
61 | if (!(nout && nin && kernel && (samples || scalings))) { |
62 | biffAddf(NRRDnrrdBiffKey, "%s: not NULL pointer", me); |
63 | return 1; |
64 | } |
65 | if (!(info = nrrdResampleInfoNew())) { |
66 | biffAddf(NRRDnrrdBiffKey, "%s: can't allocate resample info struct", me); |
67 | return 1; |
68 | } |
69 | |
70 | np = kernel->numParm; |
71 | for (ai=0; ai<nin->dim; ai++) { |
72 | double axmin, axmax; |
73 | info->kernel[ai] = kernel; |
74 | if (samples) { |
75 | info->samples[ai] = samples[ai]; |
76 | } else { |
77 | center = _nrrdCenter(nin->axis[ai].center); |
78 | if (nrrdCenterCell == center) { |
79 | info->samples[ai] = (size_t)(nin->axis[ai].size*scalings[ai]); |
80 | } else { |
81 | info->samples[ai] = (size_t)((nin->axis[ai].size - 1) |
82 | *scalings[ai]) + 1; |
83 | } |
84 | } |
85 | for (p=0; p<np; p++) |
86 | info->parm[ai][p] = parm[p]; |
87 | /* set the min/max for this axis if not already set to something */ |
88 | if (!( AIR_EXISTS(nin->axis[ai].min)(((int)(!((nin->axis[ai].min) - (nin->axis[ai].min))))) && AIR_EXISTS(nin->axis[ai].max)(((int)(!((nin->axis[ai].max) - (nin->axis[ai].max))))) )) { |
89 | /* HEY: started as copy/paste of body of nrrdAxisInfoMinMaxSet, |
90 | because we wanted to enable const-correctness, and this |
91 | function had previously been setting per-axis min/max in |
92 | *input* when it hasn't been already set */ |
93 | double spacing; |
94 | center = _nrrdCenter2(nin->axis[ai].center, nrrdDefaultCenter); |
95 | spacing = nin->axis[ai].spacing; |
96 | if (!AIR_EXISTS(spacing)(((int)(!((spacing) - (spacing)))))) |
97 | spacing = nrrdDefaultSpacing; |
98 | if (nrrdCenterCell == center) { |
99 | axmin = 0; |
100 | axmax = spacing*nin->axis[ai].size; |
101 | } else { |
102 | axmin = 0; |
103 | axmax = spacing*(nin->axis[ai].size - 1); |
104 | } |
105 | } else { |
106 | axmin = nin->axis[ai].min; |
107 | axmax = nin->axis[ai].max; |
108 | } |
109 | info->min[ai] = axmin; |
110 | info->max[ai] = axmax; |
111 | } |
112 | /* we go with the defaults (enstated by _nrrdResampleInfoInit()) |
113 | for all the remaining fields */ |
114 | |
115 | if (nrrdSpatialResample(nout, nin, info)) { |
116 | biffAddf(NRRDnrrdBiffKey, "%s:", me); |
117 | return 1; |
118 | } |
119 | |
120 | info = nrrdResampleInfoNix(info); |
121 | return 0; |
122 | } |
123 | |
124 | /* |
125 | ** _nrrdResampleCheckInfo() |
126 | ** |
127 | ** checks validity of given NrrdResampleInfo *info: |
128 | ** - all required parameters exist |
129 | ** - both min[d] and max[d] for all axes d |
130 | */ |
131 | int |
132 | _nrrdResampleCheckInfo(const Nrrd *nin, const NrrdResampleInfo *info) { |
133 | static const char me[] = "_nrrdResampleCheckInfo"; |
134 | const NrrdKernel *k; |
135 | int center, p, np; |
136 | unsigned int ai, minsmp; |
137 | char stmp[2][AIR_STRLEN_SMALL(128+1)]; |
138 | |
139 | if (nrrdTypeBlock == nin->type || nrrdTypeBlock == info->type) { |
140 | biffAddf(NRRDnrrdBiffKey, "%s: can't resample to or from type %s", me, |
141 | airEnumStr(nrrdType, nrrdTypeBlock)); |
142 | return 1; |
143 | } |
144 | if (nrrdBoundaryUnknown == info->boundary) { |
145 | biffAddf(NRRDnrrdBiffKey, "%s: didn't set boundary behavior\n", me); |
146 | return 1; |
147 | } |
148 | if (nrrdBoundaryPad == info->boundary && !AIR_EXISTS(info->padValue)(((int)(!((info->padValue) - (info->padValue)))))) { |
149 | biffAddf(NRRDnrrdBiffKey, |
150 | "%s: asked for boundary padding, but no pad value set\n", me); |
151 | return 1; |
152 | } |
153 | for (ai=0; ai<nin->dim; ai++) { |
154 | k = info->kernel[ai]; |
155 | /* we only care about the axes being resampled */ |
156 | if (!k) |
157 | continue; |
158 | if (!(info->samples[ai] > 0)) { |
159 | biffAddf(NRRDnrrdBiffKey, "%s: axis %d # samples (%s) invalid", me, ai, |
160 | airSprintSize_t(stmp[0], info->samples[ai])); |
161 | return 1; |
162 | } |
163 | if (!( AIR_EXISTS(nin->axis[ai].min)(((int)(!((nin->axis[ai].min) - (nin->axis[ai].min))))) && AIR_EXISTS(nin->axis[ai].max)(((int)(!((nin->axis[ai].max) - (nin->axis[ai].max))))) )) { |
164 | biffAddf(NRRDnrrdBiffKey, "%s: input nrrd's axis %d min,max have not " |
165 | "both been set", me, ai); |
166 | return 1; |
167 | } |
168 | if (!( AIR_EXISTS(info->min[ai])(((int)(!((info->min[ai]) - (info->min[ai]))))) && AIR_EXISTS(info->max[ai])(((int)(!((info->max[ai]) - (info->max[ai]))))) )) { |
169 | biffAddf(NRRDnrrdBiffKey, "%s: info's axis %d min,max not both set", me, ai); |
170 | return 1; |
171 | } |
172 | np = k->numParm; |
173 | for (p=0; p<np; p++) { |
174 | if (!AIR_EXISTS(info->parm[ai][p])(((int)(!((info->parm[ai][p]) - (info->parm[ai][p])))))) { |
175 | biffAddf(NRRDnrrdBiffKey, "%s: didn't set parameter %d (of %d) for axis %d\n", |
176 | me, p, np, ai); |
177 | return 1; |
178 | } |
179 | } |
180 | center = _nrrdCenter(nin->axis[ai].center); |
181 | minsmp = nrrdCenterCell == center ? 1 : 2; |
182 | if (!( nin->axis[ai].size >= minsmp && info->samples[ai] >= minsmp )) { |
183 | biffAddf(NRRDnrrdBiffKey, "%s: axis %d # input samples (%s) or output samples (%s) " |
184 | " invalid for %s centering", me, ai, |
185 | airSprintSize_t(stmp[0], nin->axis[ai].size), |
186 | airSprintSize_t(stmp[1], info->samples[ai]), |
187 | airEnumStr(nrrdCenter, center)); |
188 | return 1; |
189 | } |
190 | } |
191 | return 0; |
192 | } |
193 | |
194 | /* |
195 | ** _nrrdResampleComputePermute() |
196 | ** |
197 | ** figures out information related to how the axes in a nrrd are |
198 | ** permuted during resampling: permute, topRax, botRax, passes, ax[][], sz[][] |
199 | */ |
200 | void |
201 | _nrrdResampleComputePermute(unsigned int permute[], |
202 | unsigned int ax[NRRD_DIM_MAX16][NRRD_DIM_MAX16], |
203 | size_t sz[NRRD_DIM_MAX16][NRRD_DIM_MAX16], |
204 | int *topRax, |
205 | int *botRax, |
206 | unsigned int *passes, |
207 | const Nrrd *nin, |
208 | const NrrdResampleInfo *info) { |
209 | /* char me[]="_nrrdResampleComputePermute"; */ |
210 | unsigned int bi, ai, pi; |
211 | |
212 | /* what are the first (top) and last (bottom) axes being resampled? */ |
213 | *topRax = *botRax = -1; |
214 | for (ai=0; ai<nin->dim; ai++) { |
215 | if (info->kernel[ai]) { |
216 | if (*topRax < 0) { |
217 | *topRax = ai; |
218 | } |
219 | *botRax = ai; |
220 | } |
221 | } |
222 | |
223 | /* figure out total number of passes needed, and construct the |
224 | permute[] array. permute[i] = j means that the axis in position |
225 | i of the old array will be in position j of the new one |
226 | (permute[] answers "where do I put this", not "what do I put here"). |
227 | */ |
228 | *passes = bi = 0; |
229 | for (ai=0; ai<nin->dim; ai++) { |
230 | if (info->kernel[ai]) { |
231 | do { |
232 | bi = AIR_MOD((int)bi+1, (int)nin->dim)(((int)bi+1)%((int)nin->dim) >= 0 ? ((int)bi+1)%((int)nin ->dim) : (int)nin->dim + ((int)bi+1)%((int)nin->dim) ); /* HEY scrutinize casts */ |
233 | } while (!info->kernel[bi]); |
234 | permute[bi] = ai; |
235 | *passes += 1; |
236 | } else { |
237 | permute[ai] = ai; |
238 | bi += bi == ai; |
239 | } |
240 | } |
241 | permute[nin->dim] = nin->dim; |
242 | if (!*passes) { |
243 | /* none of the kernels was non-NULL */ |
244 | return; |
245 | } |
246 | |
247 | /* |
248 | fprintf(stderr, "%s: permute:\n", me); |
249 | for (d=0; d<nin->dim; d++) { |
250 | fprintf(stderr, " permute[%d] = %d\n", d, permute[ai]); |
251 | } |
252 | */ |
253 | |
254 | /* create array of how the axes will be arranged in each pass ("ax"), |
255 | and create array of how big each axes is in each pass ("sz"). |
256 | The input to pass i will have axis layout described in ax[i] and |
257 | axis sizes described in sz[i] */ |
258 | for (ai=0; ai<nin->dim; ai++) { |
259 | ax[0][ai] = ai; |
260 | sz[0][ai] = nin->axis[ai].size; |
261 | } |
262 | for (pi=0; pi<*passes; pi++) { |
263 | for (ai=0; ai<nin->dim; ai++) { |
264 | ax[pi+1][permute[ai]] = ax[pi][ai]; |
265 | if (ai == (unsigned int)*topRax) { /* HEY scrutinize casts */ |
266 | /* this is the axis which is getting resampled, |
267 | so the number of samples is potentially changing */ |
268 | sz[pi+1][permute[ai]] = (info->kernel[ax[pi][ai]] |
269 | ? info->samples[ax[pi][ai]] |
270 | : sz[pi][ai]); |
271 | } else { |
272 | /* this axis is just a shuffled version of the |
273 | previous axis; no resampling this pass. |
274 | Note: this case also includes axes which aren't |
275 | getting resampled whatsoever */ |
276 | sz[pi+1][permute[ai]] = sz[pi][ai]; |
277 | } |
278 | } |
279 | } |
280 | |
281 | return; |
282 | } |
283 | |
284 | /* |
285 | ** _nrrdResampleMakeWeightIndex() |
286 | ** |
287 | ** _allocate_ and fill the arrays of indices and weights that are |
288 | ** needed to process all the scanlines along a given axis; also |
289 | ** be so kind as to set the sampling ratio (<1: downsampling, |
290 | ** new sample spacing larger, >1: upsampling, new sample spacing smaller) |
291 | ** |
292 | ** returns "dotLen", the number of input samples which are required |
293 | ** for resampling this axis, or 0 if there was an error. Uses biff. |
294 | */ |
295 | int |
296 | _nrrdResampleMakeWeightIndex(nrrdResample_t **weightP, |
297 | int **indexP, double *ratioP, |
298 | const Nrrd *nin, const NrrdResampleInfo *info, |
299 | unsigned int ai) { |
300 | static const char me[]="_nrrdResampleMakeWeightIndex"; |
301 | int sizeIn, sizeOut, center, dotLen, halfLen, *indx, base, idx; |
302 | nrrdResample_t minIn, maxIn, minOut, maxOut, spcIn, spcOut, |
303 | ratio, support, integral, pos, idxD, wght; |
304 | nrrdResample_t *weight; |
305 | double parm[NRRD_KERNEL_PARMS_NUM8]; |
306 | |
307 | int e, i; |
308 | |
309 | if (!(info->kernel[ai])) { |
310 | biffAddf(NRRDnrrdBiffKey, "%s: don't see a kernel for dimension %d", me, ai); |
311 | *weightP = NULL((void*)0); *indexP = NULL((void*)0); return 0; |
312 | } |
313 | |
314 | center = _nrrdCenter(nin->axis[ai].center); |
315 | sizeIn = AIR_CAST(int, nin->axis[ai].size)((int)(nin->axis[ai].size)); |
316 | sizeOut = AIR_CAST(int, info->samples[ai])((int)(info->samples[ai])); |
317 | minIn = AIR_CAST(nrrdResample_t, nin->axis[ai].min)((nrrdResample_t)(nin->axis[ai].min)); |
318 | maxIn = AIR_CAST(nrrdResample_t, nin->axis[ai].max)((nrrdResample_t)(nin->axis[ai].max)); |
319 | minOut = AIR_CAST(nrrdResample_t, info->min[ai])((nrrdResample_t)(info->min[ai])); |
320 | maxOut = AIR_CAST(nrrdResample_t, info->max[ai])((nrrdResample_t)(info->max[ai])); |
321 | spcIn = NRRD_SPACING(center, minIn, maxIn, sizeIn)(nrrdCenterCell == center ? ((maxIn) - (minIn))/((double)(sizeIn )) : ((maxIn) - (minIn))/(((double)((sizeIn)- 1)))); |
322 | spcOut = NRRD_SPACING(center, minOut, maxOut, sizeOut)(nrrdCenterCell == center ? ((maxOut) - (minOut))/((double)(sizeOut )) : ((maxOut) - (minOut))/(((double)((sizeOut)- 1)))); |
323 | *ratioP = ratio = spcIn/spcOut; |
324 | support = AIR_CAST(nrrdResample_t,((nrrdResample_t)(info->kernel[ai]->support(info->parm [ai]))) |
325 | info->kernel[ai]->support(info->parm[ai]))((nrrdResample_t)(info->kernel[ai]->support(info->parm [ai]))); |
326 | integral = AIR_CAST(nrrdResample_t,((nrrdResample_t)(info->kernel[ai]->integral(info->parm [ai]))) |
327 | info->kernel[ai]->integral(info->parm[ai]))((nrrdResample_t)(info->kernel[ai]->integral(info->parm [ai]))); |
328 | /* |
329 | fprintf(stderr, |
330 | "!%s(%d): size{In,Out} = %d, %d, support = %f; ratio = %f\n", |
331 | me, d, sizeIn, sizeOut, support, ratio); |
332 | */ |
333 | if (ratio > 1) { |
334 | /* if upsampling, we need only as many samples as needed for |
335 | interpolation with the given kernel */ |
336 | dotLen = (int)(2*ceil(support)); |
337 | } else { |
338 | /* if downsampling, we need to use all the samples covered by |
339 | the stretched out version of the kernel */ |
340 | if (info->cheap) { |
341 | dotLen = (int)(2*ceil(support)); |
342 | } else { |
343 | dotLen = (int)(2*ceil(support/ratio)); |
344 | } |
345 | } |
346 | /* |
347 | fprintf(stderr, "!%s(%d): dotLen = %d\n", me, d, dotLen); |
348 | */ |
349 | |
350 | weight = AIR_CALLOC(sizeOut*dotLen, nrrdResample_t)(nrrdResample_t*)(calloc((sizeOut*dotLen), sizeof(nrrdResample_t ))); |
351 | if (!weight) { |
352 | biffAddf(NRRDnrrdBiffKey, "%s: can't allocate weight array", me); |
353 | *weightP = NULL((void*)0); *indexP = NULL((void*)0); return 0; |
354 | } |
355 | indx = AIR_CALLOC(sizeOut*dotLen, int)(int*)(calloc((sizeOut*dotLen), sizeof(int))); |
356 | if (!indx) { |
357 | biffAddf(NRRDnrrdBiffKey, "%s: can't allocate index arrays", me); |
358 | *weightP = NULL((void*)0); *indexP = NULL((void*)0); return 0; |
359 | } |
360 | |
361 | /* calculate sample locations and do first pass on indices */ |
362 | halfLen = dotLen/2; |
363 | for (i=0; i<sizeOut; i++) { |
364 | pos = AIR_CAST(nrrdResample_t,((nrrdResample_t)((nrrdCenterCell == (center) ? ( ((double)(( (maxOut)))-(((minOut))))*((double)(((i)) + 0.5)-(0)) / ((double )(((sizeOut)))-(0)) + (((minOut)))) : ( ((double)(((maxOut))) -(((minOut))))*((double)(((i)))-(0)) / ((double)(((sizeOut))- 1)-(0)) + (((minOut))))))) |
365 | NRRD_POS(center, minOut, maxOut, sizeOut, i))((nrrdResample_t)((nrrdCenterCell == (center) ? ( ((double)(( (maxOut)))-(((minOut))))*((double)(((i)) + 0.5)-(0)) / ((double )(((sizeOut)))-(0)) + (((minOut)))) : ( ((double)(((maxOut))) -(((minOut))))*((double)(((i)))-(0)) / ((double)(((sizeOut))- 1)-(0)) + (((minOut))))))); |
366 | idxD = AIR_CAST(nrrdResample_t,((nrrdResample_t)((nrrdCenterCell == (center) ? (( ((double)( ((sizeIn)))-(0))*((double)(((pos)))-(((minIn)))) / ((double)( ((maxIn)))-(((minIn)))) + (0)) - 0.5) : ( ((double)(((sizeIn) )-1)-(0))*((double)(((pos)))-(((minIn)))) / ((double)(((maxIn )))-(((minIn)))) + (0))))) |
367 | NRRD_IDX(center, minIn, maxIn, sizeIn, pos))((nrrdResample_t)((nrrdCenterCell == (center) ? (( ((double)( ((sizeIn)))-(0))*((double)(((pos)))-(((minIn)))) / ((double)( ((maxIn)))-(((minIn)))) + (0)) - 0.5) : ( ((double)(((sizeIn) )-1)-(0))*((double)(((pos)))-(((minIn)))) / ((double)(((maxIn )))-(((minIn)))) + (0))))); |
368 | base = (int)floor(idxD) - halfLen + 1; |
369 | for (e=0; e<dotLen; e++) { |
370 | indx[e + dotLen*i] = base + e; |
371 | weight[e + dotLen*i] = idxD - indx[e + dotLen*i]; |
372 | } |
373 | /* ******** |
374 | if (!i) { |
375 | fprintf(stderr, "%s: sample locations:\n", me); |
376 | } |
377 | fprintf(stderr, "%s: %d (sample locations)\n ", me, i); |
378 | for (e=0; e<dotLen; e++) { |
379 | fprintf(stderr, "%d/%g ", indx[e + dotLen*i], weight[e + dotLen*i]); |
380 | } |
381 | fprintf(stderr, "\n"); |
382 | ******** */ |
383 | } |
384 | |
385 | /* figure out what to do with the out-of-range indices */ |
386 | for (i=0; i<dotLen*sizeOut; i++) { |
387 | idx = indx[i]; |
388 | if (!AIR_IN_CL(0, idx, sizeIn-1)((0) <= (idx) && (idx) <= (sizeIn-1))) { |
389 | switch(info->boundary) { |
390 | case nrrdBoundaryPad: |
391 | case nrrdBoundaryWeight: /* this will be further handled later */ |
392 | idx = sizeIn; |
393 | break; |
394 | case nrrdBoundaryBleed: |
395 | idx = AIR_CLAMP(0, idx, sizeIn-1)((idx) < (0) ? (0) : ((idx) > (sizeIn-1) ? (sizeIn-1) : (idx))); |
396 | break; |
397 | case nrrdBoundaryWrap: |
398 | idx = AIR_MOD(idx, sizeIn)((idx)%(sizeIn) >= 0 ? (idx)%(sizeIn) : sizeIn + (idx)%(sizeIn )); |
399 | break; |
400 | case nrrdBoundaryMirror: |
401 | idx = _nrrdMirror_32(sizeIn, idx); |
402 | break; |
403 | default: |
404 | biffAddf(NRRDnrrdBiffKey, "%s: boundary behavior %d unknown/unimplemented", |
405 | me, info->boundary); |
406 | *weightP = NULL((void*)0); *indexP = NULL((void*)0); return 0; |
407 | } |
408 | indx[i] = idx; |
409 | } |
410 | } |
411 | |
412 | /* run the sample locations through the chosen kernel. We play a |
413 | sneaky trick on the kernel parameter 0 in case of downsampling |
414 | to create the blurring of the old index space, but only if !cheap */ |
415 | memcpy(parm, info->parm[ai], NRRD_KERNEL_PARMS_NUM*sizeof(double))__builtin___memcpy_chk (parm, info->parm[ai], 8*sizeof(double ), __builtin_object_size (parm, 0)); |
416 | if (ratio < 1 && !(info->cheap)) { |
417 | parm[0] /= ratio; |
418 | } |
419 | info->kernel[ai]->EVALNevalN_d(weight, weight, dotLen*sizeOut, parm); |
420 | |
421 | /* ******** |
422 | for (i=0; i<sizeOut; i++) { |
423 | fprintf(stderr, "%s: %d (sample weights)\n ", me, i); |
424 | for (e=0; e<dotLen; e++) { |
425 | fprintf(stderr, "%d/%g ", indx[e + dotLen*i], weight[e + dotLen*i]); |
426 | } |
427 | fprintf(stderr, "\n"); |
428 | } |
429 | ******** */ |
430 | |
431 | if (nrrdBoundaryWeight == info->boundary) { |
432 | if (integral) { |
433 | /* above, we set to sizeIn all the indices that were out of |
434 | range. We now use that to determine the sum of the weights |
435 | for the indices that were in-range */ |
436 | for (i=0; i<sizeOut; i++) { |
437 | wght = 0; |
438 | for (e=0; e<dotLen; e++) { |
439 | if (sizeIn != indx[e + dotLen*i]) { |
440 | wght += weight[e + dotLen*i]; |
441 | } |
442 | } |
443 | for (e=0; e<dotLen; e++) { |
444 | idx = indx[e + dotLen*i]; |
445 | if (sizeIn != idx) { |
446 | weight[e + dotLen*i] *= integral/wght; |
447 | } else { |
448 | weight[e + dotLen*i] = 0; |
449 | } |
450 | } |
451 | } |
452 | } |
453 | } else { |
454 | /* try to remove ripple/grating on downsampling */ |
455 | /* if (ratio < 1 && info->renormalize && integral) { */ |
456 | if (info->renormalize && integral) { |
457 | for (i=0; i<sizeOut; i++) { |
458 | wght = 0; |
459 | for (e=0; e<dotLen; e++) { |
460 | wght += weight[e + dotLen*i]; |
461 | } |
462 | if (wght) { |
463 | for (e=0; e<dotLen; e++) { |
464 | /* this used to normalize the weights so that they summed |
465 | to integral ("*= integral/wght"), which meant that if |
466 | you use a very truncated Gaussian, then your over-all |
467 | image brightness goes down. This seems very contrary |
468 | to the whole point of renormalization. */ |
469 | weight[e + dotLen*i] *= AIR_CAST(nrrdResample_t, 1.0/wght)((nrrdResample_t)(1.0/wght)); |
470 | } |
471 | } |
472 | } |
473 | } |
474 | } |
475 | /* ******** |
476 | fprintf(stderr, "%s: sample weights:\n", me); |
477 | for (i=0; i<sizeOut; i++) { |
478 | fprintf(stderr, "%s: %d\n ", me, i); |
479 | wght = 0; |
480 | for (e=0; e<dotLen; e++) { |
481 | fprintf(stderr, "%d/%g ", indx[e + dotLen*i], weight[e + dotLen*i]); |
482 | wght += weight[e + dotLen*i]; |
483 | } |
484 | fprintf(stderr, " (sum = %g)\n", wght); |
485 | } |
486 | ******** */ |
487 | |
488 | *weightP = weight; |
489 | *indexP = indx; |
490 | /* |
491 | fprintf(stderr, "!%s: dotLen = %d\n", me, dotLen); |
492 | */ |
493 | return dotLen; |
494 | } |
495 | |
496 | /* |
497 | ******** nrrdSpatialResample() |
498 | ** |
499 | ** general-purpose array-resampler: resamples a nrrd of any type |
500 | ** (except block) and any dimension along any or all of its axes, with |
501 | ** any combination of up- or down-sampling along the axes, with any |
502 | ** kernel (specified by callback), with potentially a different kernel |
503 | ** for each axis. Whether or not to resample along axis d is |
504 | ** controlled by the non-NULL-ity of info->kernel[ai]. Where to sample |
505 | ** on the axis is controlled by info->min[ai] and info->max[ai]; these |
506 | ** specify a range of "positions" aka "world space" positions, as |
507 | ** determined by the per-axis min and max of the input nrrd, which must |
508 | ** be set for every resampled axis. |
509 | ** |
510 | ** we cyclically permute those axes being resampled, and never touch |
511 | ** the position (in axis ordering) of axes along which we are not |
512 | ** resampling. This strategy is certainly not the most intelligent |
513 | ** one possible, but it does mean that the axis along which we're |
514 | ** currently resampling-- the one along which we'll have to look at |
515 | ** multiple adjecent samples-- is that resampling axis which is |
516 | ** currently most contiguous in memory. It may make sense to precede |
517 | ** the resampling with an axis permutation which bubbles all the |
518 | ** resampled axes to the front (most contiguous) end of the axis list, |
519 | ** and then puts them back in place afterwards, depending on the cost |
520 | ** of such axis permutation overhead. |
521 | */ |
522 | int |
523 | nrrdSpatialResample(Nrrd *nout, const Nrrd *nin, |
524 | const NrrdResampleInfo *info) { |
525 | static const char me[]="nrrdSpatialResample", func[]="resample"; |
526 | nrrdResample_t |
527 | *array[NRRD_DIM_MAX16], /* intermediate copies of the input data |
528 | undergoing resampling; we don't need a full- |
529 | fledged nrrd for these. Only about two of |
530 | these arrays will be allocated at a time; |
531 | intermediate results will be free()d when not |
532 | needed */ |
533 | *_inVec, /* current input vector being resampled; |
534 | not necessarily contiguous in memory |
535 | (if strideIn != 1) */ |
536 | *inVec, /* buffer for input vector; contiguous */ |
537 | *_outVec; /* output vector in context of volume; |
538 | never contiguous */ |
539 | double tmpF; |
540 | double ratio, /* factor by which or up or downsampled */ |
541 | ratios[NRRD_DIM_MAX16]; /* record of "ratio" for all resampled axes, |
542 | used to compute new spacing in output */ |
543 | |
544 | Nrrd *floatNin; /* if the input nrrd type is not nrrdResample_t, |
545 | then we convert it and keep it here */ |
546 | unsigned int ai, |
547 | pi, /* current pass */ |
548 | topLax, |
549 | permute[NRRD_DIM_MAX16], /* how to permute axes of last pass to get |
550 | axes for current pass */ |
551 | ax[NRRD_DIM_MAX16+1][NRRD_DIM_MAX16], /* axis ordering on each pass */ |
552 | passes; /* # of passes needed to resample all axes */ |
553 | int i, s, e, |
554 | topRax, /* the lowest index of an axis which is |
555 | resampled. If all axes are being resampled, |
556 | then this is 0. If for some reason the |
557 | "x" axis (fastest stride) is not being |
558 | resampled, but "y" is, then topRax is 1 */ |
559 | botRax, /* index of highest axis being resampled */ |
560 | typeIn, typeOut; /* types of input and output of resampling */ |
561 | size_t sz[NRRD_DIM_MAX16+1][NRRD_DIM_MAX16]; |
562 | /* how many samples along each |
563 | axis, changing on each pass */ |
564 | |
565 | /* all these variables have to do with the spacing of elements in |
566 | memory for the current pass of resampling, and they (except |
567 | strideIn) are re-set at the beginning of each pass */ |
568 | nrrdResample_t |
569 | *weight; /* sample weights */ |
570 | unsigned int ci[NRRD_DIM_MAX16+1], |
571 | co[NRRD_DIM_MAX16+1]; |
572 | int |
573 | sizeIn, sizeOut, /* lengths of input and output vectors */ |
574 | dotLen, /* # input samples to dot with weights to get |
575 | one output sample */ |
576 | doRound, /* actually do rounding on output: we DO NOT |
577 | round when info->round but the output |
578 | type is not integral */ |
579 | *indx; /* dotLen*sizeOut 2D array of input indices */ |
580 | size_t |
581 | I, /* swiss-army int */ |
582 | strideIn, /* the stride between samples in the input |
583 | "scanline" being resampled */ |
584 | strideOut, /* stride between samples in output |
585 | "scanline" from resampling */ |
586 | L, LI, LO, numLines, /* top secret */ |
587 | numOut; /* # of _samples_, total, in output volume; |
588 | this is for allocating the output */ |
589 | airArray *mop; /* for cleaning up */ |
590 | |
591 | if (!(nout && nin && info)) { |
592 | biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer", me); |
593 | return 1; |
594 | } |
595 | if (nrrdBoundaryUnknown == info->boundary) { |
596 | biffAddf(NRRDnrrdBiffKey, "%s: need to specify a boundary behavior", me); |
597 | return 1; |
598 | } |
599 | |
600 | typeIn = nin->type; |
601 | typeOut = nrrdTypeDefault == info->type ? typeIn : info->type; |
602 | |
603 | if (_nrrdResampleCheckInfo(nin, info)) { |
604 | biffAddf(NRRDnrrdBiffKey, "%s: problem with arguments", me); |
605 | return 1; |
606 | } |
607 | |
608 | _nrrdResampleComputePermute(permute, ax, sz, |
609 | &topRax, &botRax, &passes, |
610 | nin, info); |
611 | topLax = topRax ? 0 : 1; |
612 | |
613 | /* not sure where else to put this: |
614 | (want to put it before 0 == passes branch) |
615 | We have to assume some centering when doing resampling, and it would |
616 | be stupid to not record it in the outgoing nrrd, since the value of |
617 | nrrdDefaultCenter could always change. */ |
618 | for (ai=0; ai<nin->dim; ai++) { |
619 | if (info->kernel[ai]) { |
620 | nout->axis[ai].center = _nrrdCenter(nin->axis[ai].center); |
621 | } |
622 | } |
623 | |
624 | if (0 == passes) { |
625 | /* actually, no resampling was desired. Copy input to output, |
626 | but with the clamping that we normally do at the end of resampling */ |
627 | nrrdAxisInfoGet_nva(nin, nrrdAxisInfoSize, sz[0]); |
628 | if (nrrdMaybeAlloc_nva(nout, typeOut, nin->dim, sz[0])) { |
629 | biffAddf(NRRDnrrdBiffKey, "%s: couldn't allocate output", me); |
630 | return 1; |
631 | } |
632 | numOut = nrrdElementNumber(nout); |
633 | for (I=0; I<numOut; I++) { |
634 | tmpF = nrrdDLookup[nin->type](nin->data, I); |
635 | tmpF = nrrdDClamp[typeOut](tmpF); |
636 | nrrdDInsert[typeOut](nout->data, I, tmpF); |
637 | } |
638 | nrrdAxisInfoCopy(nout, nin, NULL((void*)0), NRRD_AXIS_INFO_NONE0); |
639 | /* HEY: need to create textual representation of resampling parameters */ |
640 | if (nrrdContentSet_va(nout, func, nin, "")) { |
641 | biffAddf(NRRDnrrdBiffKey, "%s:", me); |
642 | return 1; |
643 | } |
644 | if (nrrdBasicInfoCopy(nout, nin, |
645 | NRRD_BASIC_INFO_DATA_BIT(1<< 1) |
646 | | NRRD_BASIC_INFO_TYPE_BIT(1<< 2) |
647 | | NRRD_BASIC_INFO_BLOCKSIZE_BIT(1<< 3) |
648 | | NRRD_BASIC_INFO_DIMENSION_BIT(1<< 4) |
649 | | NRRD_BASIC_INFO_CONTENT_BIT(1<< 5) |
650 | | NRRD_BASIC_INFO_COMMENTS_BIT(1<<14) |
651 | | (nrrdStateKeyValuePairsPropagate |
652 | ? 0 |
653 | : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT(1<<15)))) { |
654 | biffAddf(NRRDnrrdBiffKey, "%s:", me); |
655 | return 1; |
656 | } |
657 | return 0; |
658 | } |
659 | |
660 | mop = airMopNew(); |
661 | /* convert input nrrd to nrrdResample_t if necessary */ |
662 | if (nrrdResample_nrrdTypenrrdTypeDouble != typeIn) { |
663 | if (nrrdConvert(floatNin = nrrdNew(), nin, nrrdResample_nrrdTypenrrdTypeDouble)) { |
664 | biffAddf(NRRDnrrdBiffKey, "%s: couldn't create float copy of input", me); |
665 | airMopError(mop); return 1; |
666 | } |
667 | array[0] = (nrrdResample_t*)floatNin->data; |
668 | airMopAdd(mop, floatNin, (airMopper)nrrdNuke, airMopAlways); |
669 | } else { |
670 | floatNin = NULL((void*)0); |
671 | array[0] = (nrrdResample_t*)nin->data; |
672 | } |
673 | |
674 | /* compute strideIn; this is actually the same for every pass |
675 | because (strictly speaking) in every pass we are resampling |
676 | the same axis, and axes with lower indices are constant length */ |
677 | strideIn = 1; |
678 | for (ai=0; ai<(unsigned int)topRax; ai++) { /* HEY scrutinize casts */ |
679 | strideIn *= nin->axis[ai].size; |
680 | } |
681 | /* printf("%s: strideIn = " _AIR_SIZE_T_CNV "\n", me, strideIn); */ |
682 | |
683 | /* go! */ |
684 | for (pi=0; pi<passes; pi++) { |
685 | /* |
686 | printf("%s: --- pass %d --- \n", me, pi); |
687 | */ |
688 | numLines = strideOut = 1; |
689 | for (ai=0; ai<nin->dim; ai++) { |
690 | if (ai < (unsigned int)botRax) { /* HEY scrutinize cast */ |
691 | strideOut *= sz[pi+1][ai]; |
692 | } |
693 | if (ai != (unsigned int)topRax) { /* HEY scrutinize cast */ |
694 | numLines *= sz[pi][ai]; |
695 | } |
696 | } |
697 | sizeIn = AIR_CAST(int, sz[pi][topRax])((int)(sz[pi][topRax])); |
698 | sizeOut = AIR_CAST(int, sz[pi+1][botRax])((int)(sz[pi+1][botRax])); |
699 | numOut = numLines*sizeOut; |
700 | /* for the rest of the loop body, d is the original "dimension" |
701 | for the axis being resampled */ |
702 | ai = ax[pi][topRax]; |
703 | /* printf("%s(%d): numOut = " _AIR_SIZE_T_CNV "\n", me, pi, numOut); */ |
704 | /* printf("%s(%d): numLines = " _AIR_SIZE_T_CNV "\n", me, pi, numLines); */ |
705 | /* printf("%s(%d): stride: In=%d, Out=%d\n", me, pi, */ |
706 | /* (int)strideIn, (int)strideOut); */ |
707 | /* printf("%s(%d): sizeIn = %d\n", me, pi, sizeIn); */ |
708 | /* printf("%s(%d): sizeOut = %d\n", me, pi, sizeOut); */ |
709 | |
710 | /* we can free the input to the previous pass |
711 | (if its not the given data) */ |
712 | if (pi > 0) { |
713 | if (pi == 1) { |
714 | if (array[0] != nin->data) { |
715 | airMopSub(mop, floatNin, (airMopper)nrrdNuke); |
716 | floatNin = nrrdNuke(floatNin); |
717 | array[0] = NULL((void*)0); |
718 | /* |
719 | printf("%s: pi %d: freeing array[0]\n", me, pi); |
720 | */ |
721 | } |
722 | } else { |
723 | airMopSub(mop, array[pi-1], airFree); |
724 | array[pi-1] = (nrrdResample_t*)airFree(array[pi-1]); |
725 | /* |
726 | printf("%s: pi %d: freeing array[%d]\n", me, pi, pi-1); |
727 | */ |
728 | } |
729 | } |
730 | |
731 | /* allocate output volume */ |
732 | array[pi+1] = (nrrdResample_t*)calloc(numOut, sizeof(nrrdResample_t)); |
733 | if (!array[pi+1]) { |
734 | char stmp[AIR_STRLEN_SMALL(128+1)]; |
735 | biffAddf(NRRDnrrdBiffKey, "%s: couldn't create array of %s nrrdResample_t's for " |
736 | "output of pass %d", me, airSprintSize_t(stmp, numOut), pi); |
737 | airMopError(mop); return 1; |
738 | } |
739 | airMopAdd(mop, array[pi+1], airFree, airMopAlways); |
740 | /* |
741 | printf("%s: allocated array[%d]\n", me, pi+1); |
742 | */ |
743 | |
744 | /* allocate contiguous input scanline buffer, we alloc one more |
745 | than needed to provide a place for the pad value. That is, in |
746 | fact, the over-riding reason to copy a scanline to a local |
747 | array: so that there is a simple consistent (non-branchy) way |
748 | to incorporate the pad values */ |
749 | inVec = (nrrdResample_t *)calloc(sizeIn+1, sizeof(nrrdResample_t)); |
750 | airMopAdd(mop, inVec, airFree, airMopAlways); |
751 | inVec[sizeIn] = AIR_CAST(nrrdResample_t, info->padValue)((nrrdResample_t)(info->padValue)); |
752 | |
753 | dotLen = _nrrdResampleMakeWeightIndex(&weight, &indx, &ratio, |
754 | nin, info, ai); |
755 | if (!dotLen) { |
756 | biffAddf(NRRDnrrdBiffKey, "%s: trouble creating weight and index vector arrays", |
757 | me); |
758 | airMopError(mop); return 1; |
759 | } |
760 | ratios[ai] = ratio; |
761 | airMopAdd(mop, weight, airFree, airMopAlways); |
762 | airMopAdd(mop, indx, airFree, airMopAlways); |
763 | |
764 | /* the skinny: resample all the scanlines */ |
765 | _inVec = array[pi]; |
766 | _outVec = array[pi+1]; |
767 | memset(ci, 0, (NRRD_DIM_MAX+1)*sizeof(int))__builtin___memset_chk (ci, 0, (16 +1)*sizeof(int), __builtin_object_size (ci, 0)); |
768 | memset(co, 0, (NRRD_DIM_MAX+1)*sizeof(int))__builtin___memset_chk (co, 0, (16 +1)*sizeof(int), __builtin_object_size (co, 0)); |
769 | for (L=0; L<numLines; L++) { |
770 | /* calculate the index to get to input and output scanlines, |
771 | according the coordinates of the start of the scanline */ |
772 | NRRD_INDEX_GEN(LI, ci, sz[pi], nin->dim){ unsigned int ddd = (nin->dim); (LI) = 0; while (ddd) { ddd --; (LI) = (ci)[ddd] + (sz[pi])[ddd]*(LI); } }; |
773 | NRRD_INDEX_GEN(LO, co, sz[pi+1], nin->dim){ unsigned int ddd = (nin->dim); (LO) = 0; while (ddd) { ddd --; (LO) = (co)[ddd] + (sz[pi+1])[ddd]*(LO); } }; |
774 | _inVec = array[pi] + LI; |
775 | _outVec = array[pi+1] + LO; |
776 | |
777 | /* read input scanline into contiguous array */ |
778 | for (i=0; i<sizeIn; i++) { |
779 | inVec[i] = _inVec[i*strideIn]; |
780 | } |
781 | |
782 | /* do the weighting */ |
783 | for (i=0; i<sizeOut; i++) { |
784 | tmpF = 0.0; |
785 | /* |
786 | fprintf(stderr, "%s: i = %d (tmpF=0)\n", me, (int)i); |
787 | */ |
788 | for (s=0; s<dotLen; s++) { |
789 | tmpF += inVec[indx[s + dotLen*i]]*weight[s + dotLen*i]; |
790 | /* |
791 | fprintf(stderr, " tmpF += %g*%g == %g\n", |
792 | inVec[indx[s + dotLen*i]], weight[s + dotLen*i], tmpF); |
793 | */ |
794 | } |
795 | _outVec[i*strideOut] = tmpF; |
796 | /* |
797 | fprintf(stderr, "--> out[%d] = %g\n", |
798 | i*strideOut, _outVec[i*strideOut]); |
799 | */ |
800 | } |
801 | |
802 | /* update the coordinates for the scanline starts. We don't |
803 | use the usual NRRD_COORD macros because we're subject to |
804 | the unusual constraint that ci[topRax] and co[permute[topRax]] |
805 | must stay exactly zero */ |
806 | e = topLax; |
807 | ci[e]++; |
808 | co[permute[e]]++; |
809 | while (L < numLines-1 && ci[e] == sz[pi][e]) { |
810 | ci[e] = co[permute[e]] = 0; |
811 | e++; |
812 | e += e == topRax; |
813 | ci[e]++; |
814 | co[permute[e]]++; |
815 | } |
816 | } |
817 | |
818 | /* pass-specific clean up */ |
819 | airMopSub(mop, weight, airFree); |
820 | airMopSub(mop, indx, airFree); |
821 | airMopSub(mop, inVec, airFree); |
822 | weight = (nrrdResample_t*)airFree(weight); |
823 | indx = (int*)airFree(indx); |
824 | inVec = (nrrdResample_t*)airFree(inVec); |
Value stored to 'inVec' is never read | |
825 | } |
826 | |
827 | /* clean up second-to-last array and scanline buffers */ |
828 | if (passes > 1) { |
829 | airMopSub(mop, array[passes-1], airFree); |
830 | array[passes-1] = (nrrdResample_t*)airFree(array[passes-1]); |
831 | /* |
832 | printf("%s: now freeing array[%d]\n", me, passes-1); |
833 | */ |
834 | } else if (array[passes-1] != nin->data) { |
835 | airMopSub(mop, floatNin, (airMopper)nrrdNuke); |
836 | floatNin = nrrdNuke(floatNin); |
837 | } |
838 | array[passes-1] = NULL((void*)0); |
839 | |
840 | /* create output nrrd and set axis info */ |
841 | if (nrrdMaybeAlloc_nva(nout, typeOut, nin->dim, sz[passes])) { |
842 | biffAddf(NRRDnrrdBiffKey, "%s: couldn't allocate final output nrrd", me); |
843 | airMopError(mop); return 1; |
844 | } |
845 | airMopAdd(mop, nout, (airMopper)nrrdNuke, airMopOnError); |
846 | nrrdAxisInfoCopy(nout, nin, NULL((void*)0), |
847 | (NRRD_AXIS_INFO_SIZE_BIT(1<< 1) |
848 | | NRRD_AXIS_INFO_MIN_BIT(1<< 4) |
849 | | NRRD_AXIS_INFO_MAX_BIT(1<< 5) |
850 | | NRRD_AXIS_INFO_SPACING_BIT(1<< 2) |
851 | | NRRD_AXIS_INFO_SPACEDIRECTION_BIT(1<< 6) /* see below */ |
852 | | NRRD_AXIS_INFO_THICKNESS_BIT(1<< 3) |
853 | | NRRD_AXIS_INFO_KIND_BIT(1<< 8))); |
854 | for (ai=0; ai<nin->dim; ai++) { |
855 | if (info->kernel[ai]) { |
856 | /* we do resample this axis */ |
857 | nout->axis[ai].spacing = nin->axis[ai].spacing/ratios[ai]; |
858 | /* no way to usefully update thickness: we could be doing blurring |
859 | but maintaining the number of samples: thickness increases, or |
860 | we could be downsampling, in which the relationship between the |
861 | sampled and the skipped regions of space becomes complicated: |
862 | no single scalar can represent it, or we could be upsampling, |
863 | in which the notion of "skip" could be rendered meaningless */ |
864 | nout->axis[ai].thickness = AIR_NAN(airFloatQNaN.f); |
865 | nout->axis[ai].min = info->min[ai]; |
866 | nout->axis[ai].max = info->max[ai]; |
867 | /* |
868 | HEY: this is currently a bug: all this code was written long |
869 | before there were space directions, so min/max are always |
870 | set, regardless of whethere there are incoming space directions |
871 | which then disallows output space directions on the same axes |
872 | _nrrdSpaceVecScale(nout->axis[ai].spaceDirection, |
873 | 1.0/ratios[ai], nin->axis[ai].spaceDirection); |
874 | */ |
875 | nout->axis[ai].kind = _nrrdKindAltered(nin->axis[ai].kind, AIR_TRUE1); |
876 | } else { |
877 | /* this axis remains untouched */ |
878 | nout->axis[ai].min = nin->axis[ai].min; |
879 | nout->axis[ai].max = nin->axis[ai].max; |
880 | nout->axis[ai].spacing = nin->axis[ai].spacing; |
881 | nout->axis[ai].thickness = nin->axis[ai].thickness; |
882 | nout->axis[ai].kind = nin->axis[ai].kind; |
883 | } |
884 | } |
885 | /* HEY: need to create textual representation of resampling parameters */ |
886 | if (nrrdContentSet_va(nout, func, nin, "")) { |
887 | biffAddf(NRRDnrrdBiffKey, "%s:", me); |
888 | return 1; |
889 | } |
890 | |
891 | /* copy the resampling final result into the output nrrd, maybe |
892 | rounding as we go to make sure that 254.9999 is saved as 255 |
893 | in uchar output, and maybe clamping as we go to insure that |
894 | integral results don't have unexpected wrap-around. */ |
895 | if (info->round) { |
896 | if (nrrdTypeInt == typeOut || |
897 | nrrdTypeUInt == typeOut || |
898 | nrrdTypeLLong == typeOut || |
899 | nrrdTypeULLong == typeOut) { |
900 | fprintf(stderr__stderrp, "%s: WARNING: possible erroneous output with " |
901 | "rounding of %s output type due to int-based implementation " |
902 | "of rounding\n", me, airEnumStr(nrrdType, typeOut)); |
903 | } |
904 | doRound = nrrdTypeIsIntegral[typeOut]; |
905 | } else { |
906 | doRound = AIR_FALSE0; |
907 | } |
908 | numOut = nrrdElementNumber(nout); |
909 | for (I=0; I<numOut; I++) { |
910 | tmpF = array[passes][I]; |
911 | if (doRound) { |
912 | tmpF = AIR_CAST(nrrdResample_t, AIR_ROUNDUP(tmpF))((nrrdResample_t)(((int)(floor((tmpF)+0.5))))); |
913 | } |
914 | if (info->clamp) { |
915 | tmpF = nrrdDClamp[typeOut](tmpF); |
916 | } |
917 | nrrdDInsert[typeOut](nout->data, I, tmpF); |
918 | } |
919 | |
920 | if (nrrdBasicInfoCopy(nout, nin, |
921 | NRRD_BASIC_INFO_DATA_BIT(1<< 1) |
922 | | NRRD_BASIC_INFO_TYPE_BIT(1<< 2) |
923 | | NRRD_BASIC_INFO_BLOCKSIZE_BIT(1<< 3) |
924 | | NRRD_BASIC_INFO_DIMENSION_BIT(1<< 4) |
925 | | NRRD_BASIC_INFO_CONTENT_BIT(1<< 5) |
926 | | NRRD_BASIC_INFO_COMMENTS_BIT(1<<14) |
927 | | (nrrdStateKeyValuePairsPropagate |
928 | ? 0 |
929 | : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT(1<<15)))) { |
930 | biffAddf(NRRDnrrdBiffKey, "%s:", me); |
931 | return 1; |
932 | } |
933 | |
934 | /* enough already */ |
935 | airMopOkay(mop); |
936 | return 0; |
937 | } |