Bug Summary

File:src/nrrd/reorder.c
Location:line 771, column 3
Description:Value stored to 'outnum' is never read

Annotated Source Code

1/*
2 Teem: Tools to process and visualize scientific data and images .
3 Copyright (C) 2013, 2012, 2011, 2010, 2009 University of Chicago
4 Copyright (C) 2008, 2007, 2006, 2005 Gordon Kindlmann
5 Copyright (C) 2004, 2003, 2002, 2001, 2000, 1999, 1998 University of Utah
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License
9 (LGPL) as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11 The terms of redistributing and/or modifying this software also
12 include exceptions to the LGPL that facilitate static linking.
13
14 This library is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details.
18
19 You should have received a copy of the GNU Lesser General Public License
20 along with this library; if not, write to Free Software Foundation, Inc.,
21 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22*/
23
24#include "nrrd.h"
25#include "privateNrrd.h"
26
27/*
28******** nrrdInvertPerm()
29**
30** given an array (p) which represents a permutation of n elements,
31** compute the inverse permutation ip. The value of this function
32** is not its core functionality, but all the error checking it
33** provides.
34*/
35int
36nrrdInvertPerm(unsigned int *invp, const unsigned int *pp, unsigned int nn) {
37 static const char me[]="nrrdInvertPerm";
38 int problem;
39 unsigned int ii;
40
41 if (!(invp && pp && nn > 0)) {
42 biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer or non-positive nn (%d)", me, nn);
43 return 1;
44 }
45
46 /* use the given array "invp" as a temp buffer for validity checking */
47 memset(invp, 0, nn*sizeof(unsigned int))__builtin___memset_chk (invp, 0, nn*sizeof(unsigned int), __builtin_object_size
(invp, 0))
;
48 for (ii=0; ii<nn; ii++) {
49 if (!( pp[ii] <= nn-1)) {
50 biffAddf(NRRDnrrdBiffKey,
51 "%s: permutation element #%d == %d out of bounds [0,%d]",
52 me, ii, pp[ii], nn-1);
53 return 1;
54 }
55 invp[pp[ii]]++;
56 }
57 /* for some reason when this code was written (revision 2700 Sun Jul
58 3 04:18:33 2005 UTC) it was decided that all problems with the
59 permutation would be reported with a pile of error messages in
60 biff; rather than bailing at the first problem. Not clear if
61 this is a good idea. */
62 problem = AIR_FALSE0;
63 for (ii=0; ii<nn; ii++) {
64 if (1 != invp[ii]) {
65 biffAddf(NRRDnrrdBiffKey, "%s: element #%d mapped to %d times (should be once)",
66 me, ii, invp[ii]);
67 problem = AIR_TRUE1;
68 }
69 }
70 if (problem) {
71 return 1;
72 }
73
74 /* the skinny */
75 for (ii=0; ii<nn; ii++) {
76 invp[pp[ii]] = ii;
77 }
78
79 return 0;
80}
81
82/*
83******** nrrdAxesInsert
84**
85** like reshape, but preserves axis information on old axes, and
86** this is only for adding a "stub" axis with length 1. All other
87** axis attributes are initialized as usual.
88*/
89int
90nrrdAxesInsert(Nrrd *nout, const Nrrd *nin, unsigned int axis) {
91 static const char me[]="nrrdAxesInsert", func[]="axinsert";
92 unsigned int ai;
93
94 if (!(nout && nin)) {
95 biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer", me);
96 return 1;
97 }
98 if (!( axis <= nin->dim )) {
99 biffAddf(NRRDnrrdBiffKey, "%s: given axis (%d) outside valid range [0, %d]",
100 me, axis, nin->dim);
101 return 1;
102 }
103 if (NRRD_DIM_MAX16 == nin->dim) {
104 biffAddf(NRRDnrrdBiffKey, "%s: given nrrd already at NRRD_DIM_MAX (%d)",
105 me, NRRD_DIM_MAX16);
106 return 1;
107 }
108 if (nout != nin) {
109 if (_nrrdCopy(nout, nin, (NRRD_BASIC_INFO_COMMENTS_BIT(1<<14)
110 | (nrrdStateKeyValuePairsPropagate
111 ? 0
112 : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT(1<<15))))) {
113 biffAddf(NRRDnrrdBiffKey, "%s:", me);
114 return 1;
115 }
116 }
117 nout->dim = 1 + nin->dim;
118 for (ai=nin->dim; ai>axis; ai--) {
119 _nrrdAxisInfoCopy(&(nout->axis[ai]), &(nin->axis[ai-1]),
120 NRRD_AXIS_INFO_NONE0);
121 }
122 /* the ONLY thing we can say about the new axis is its size */
123 _nrrdAxisInfoInit(&(nout->axis[axis]));
124 if (!nrrdStateKindNoop) {
125 /* except maybe the kind */
126 nout->axis[axis].kind = nrrdKindStub;
127 }
128 nout->axis[axis].size = 1;
129 if (nrrdContentSet_va(nout, func, nin, "%d", axis)) {
130 biffAddf(NRRDnrrdBiffKey, "%s:", me);
131 return 1;
132 }
133 /* all basic info has already been copied by nrrdCopy() above */
134 return 0;
135}
136
137/*
138******** nrrdAxesPermute
139**
140** changes the scanline ordering of the data in a nrrd
141**
142** The basic means by which data is moved around is with memcpy().
143** The goal is to call memcpy() as few times as possible, on memory
144** segments as large as possible. Currently, this is done by
145** detecting how many of the low-index axes are left untouched by
146** the permutation- this constitutes a "scanline" which can be
147** copied around as a unit. For permuting the y and z axes of a
148** matrix-x-y-z order matrix volume, this optimization produced a
149** factor of 5 speed up (exhaustive multi-platform tests, of course).
150**
151** The axes[] array determines the permutation of the axes.
152** axis[i] = j means: axis i in the output will be the input's axis j
153** (axis[i] answers: "what do I put here", from the standpoint of the output,
154** not "where do I put this", from the standpoint of the input)
155*/
156int
157nrrdAxesPermute(Nrrd *nout, const Nrrd *nin, const unsigned int *axes) {
158 static const char me[]="nrrdAxesPermute", func[]="permute";
159 char buff1[NRRD_DIM_MAX16*30], buff2[AIR_STRLEN_SMALL(128+1)];
160 size_t idxOut, idxInA=0, /* indices for input and output scanlines */
161 lineSize, /* size of block of memory which can be
162 moved contiguously from input to output,
163 thought of as a "scanline" */
164 numLines, /* how many "scanlines" there are to permute */
165 szIn[NRRD_DIM_MAX16], *lszIn,
166 szOut[NRRD_DIM_MAX16], *lszOut,
167 cIn[NRRD_DIM_MAX16],
168 cOut[NRRD_DIM_MAX16];
169 char *dataIn, *dataOut;
170 int axmap[NRRD_DIM_MAX16];
171 unsigned int
172 ai, /* running index along dimensions */
173 lowPax, /* lowest axis which is "p"ermutated */
174 ldim, /* nin->dim - lowPax */
175 ip[NRRD_DIM_MAX16+1], /* inverse of permutation in "axes" */
176 laxes[NRRD_DIM_MAX16+1]; /* copy of axes[], but shifted down by lowPax
177 elements, to remove i such that i == axes[i] */
178 airArray *mop;
179
180 mop = airMopNew();
181 if (!(nin && nout && axes)) {
182 biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer", me);
183 airMopError(mop); return 1;
184 }
185 /* we don't actually need ip[], computing it is for error checking */
186 if (nrrdInvertPerm(ip, axes, nin->dim)) {
187 biffAddf(NRRDnrrdBiffKey, "%s: couldn't compute axis permutation inverse", me);
188 airMopError(mop); return 1;
189 }
190 /* this shouldn't actually be necessary .. */
191 if (!nrrdElementSize(nin)) {
192 biffAddf(NRRDnrrdBiffKey, "%s: nrrd reports zero element size!", me);
193 airMopError(mop); return 1;
194 }
195
196 for (ai=0; ai<nin->dim && axes[ai] == ai; ai++)
197 ;
198 lowPax = ai;
199
200 /* allocate output by initial copy */
201 if (nout != nin) {
202 if (nrrdCopy(nout, nin)) {
203 biffAddf(NRRDnrrdBiffKey, "%s: trouble copying input", me);
204 airMopError(mop); return 1;
205 }
206 dataIn = (char*)nin->data;
207 } else {
208 dataIn = (char*)calloc(nrrdElementNumber(nin), nrrdElementSize(nin));
209 if (!dataIn) {
210 biffAddf(NRRDnrrdBiffKey, "%s: couldn't create local copy of data", me);
211 airMopError(mop); return 1;
212 }
213 airMopAdd(mop, dataIn, airFree, airMopAlways);
214 memcpy(dataIn, nin->data, nrrdElementNumber(nin)*nrrdElementSize(nin))__builtin___memcpy_chk (dataIn, nin->data, nrrdElementNumber
(nin)*nrrdElementSize(nin), __builtin_object_size (dataIn, 0)
)
;
215 }
216 if (lowPax < nin->dim) {
217 /* if lowPax == nin->dim, then we were given the identity permutation, so
218 there's nothing to do other than the copy already done. Otherwise,
219 here we are (actually, lowPax < nin->dim-1) */
220 for (ai=0; ai<nin->dim; ai++) {
221 axmap[ai] = AIR_INT(axes[ai])((int)(axes[ai]));
222 }
223 nrrdAxisInfoGet_nva(nin, nrrdAxisInfoSize, szIn);
224 if (nrrdAxisInfoCopy(nout, nin, axmap, NRRD_AXIS_INFO_NONE0)) {
225 biffAddf(NRRDnrrdBiffKey, "%s:", me);
226 airMopError(mop); return 1;
227 }
228 nrrdAxisInfoGet_nva(nout, nrrdAxisInfoSize, szOut);
229 /* the skinny */
230 lineSize = 1;
231 for (ai=0; ai<lowPax; ai++) {
232 lineSize *= szIn[ai];
233 }
234 numLines = nrrdElementNumber(nin)/lineSize;
235 lineSize *= nrrdElementSize(nin);
236 lszIn = szIn + lowPax;
237 lszOut = szOut + lowPax;
238 ldim = nin->dim - lowPax;
239 memset(laxes, 0, sizeof(laxes))__builtin___memset_chk (laxes, 0, sizeof(laxes), __builtin_object_size
(laxes, 0))
;
240 for (ai=0; ai<ldim; ai++) {
241 laxes[ai] = axes[ai+lowPax]-lowPax;
242 }
243 dataOut = AIR_CAST(char *, nout->data)((char *)(nout->data));
244 memset(cIn, 0, sizeof(cIn))__builtin___memset_chk (cIn, 0, sizeof(cIn), __builtin_object_size
(cIn, 0))
;
245 memset(cOut, 0, sizeof(cOut))__builtin___memset_chk (cOut, 0, sizeof(cOut), __builtin_object_size
(cOut, 0))
;
246 for (idxOut=0; idxOut<numLines; idxOut++) {
247 /* in our representation of the coordinates of the start of the
248 scanlines that we're copying, we are not even storing all the
249 zeros in the coordinates prior to lowPax, and when we go to
250 a linear index for the memcpy(), we multiply by lineSize */
251 for (ai=0; ai<ldim; ai++) {
252 cIn[laxes[ai]] = cOut[ai];
253 }
254 NRRD_INDEX_GEN(idxInA, cIn, lszIn, ldim){ unsigned int ddd = (ldim); (idxInA) = 0; while (ddd) { ddd--
; (idxInA) = (cIn)[ddd] + (lszIn)[ddd]*(idxInA); } }
;
255 memcpy(dataOut + idxOut*lineSize, dataIn + idxInA*lineSize, lineSize)__builtin___memcpy_chk (dataOut + idxOut*lineSize, dataIn + idxInA
*lineSize, lineSize, __builtin_object_size (dataOut + idxOut*
lineSize, 0))
;
256 NRRD_COORD_INCR(cOut, lszOut, ldim, 0)if ((0) < (ldim)) { (cOut)[(0)]++; { unsigned int ddd; for
(ddd=0; ddd+1 < ((ldim)-(0)) && ((cOut)+(0))[ddd]
>= ((lszOut)+(0))[ddd]; ddd++) { ((cOut)+(0))[ddd] = 0; (
(cOut)+(0))[ddd+1]++; } if ((ldim)-(0)) { ((cOut)+(0))[((ldim
)-(0))-1] = ((((cOut)+(0))[((ldim)-(0))-1]) < (((lszOut)+(
0))[((ldim)-(0))-1]-1) ? (((cOut)+(0))[((ldim)-(0))-1]) : (((
lszOut)+(0))[((ldim)-(0))-1]-1)); } }; }
;
257 }
258 /* set content */
259 strcpy(buff1, "")__builtin___strcpy_chk (buff1, "", __builtin_object_size (buff1
, 2 > 1 ? 1 : 0))
;
260 for (ai=0; ai<nin->dim; ai++) {
261 sprintf(buff2, "%s%d", (ai ? "," : ""), axes[ai])__builtin___sprintf_chk (buff2, 0, __builtin_object_size (buff2
, 2 > 1 ? 1 : 0), "%s%d", (ai ? "," : ""), axes[ai])
;
262 strcat(buff1, buff2)__builtin___strcat_chk (buff1, buff2, __builtin_object_size (
buff1, 2 > 1 ? 1 : 0))
;
263 }
264 if (nrrdContentSet_va(nout, func, nin, "%s", buff1)) {
265 biffAddf(NRRDnrrdBiffKey, "%s:", me);
266 airMopError(mop); return 1;
267 }
268 if (nout != nin) {
269 if (nrrdBasicInfoCopy(nout, nin,
270 NRRD_BASIC_INFO_DATA_BIT(1<< 1)
271 | NRRD_BASIC_INFO_TYPE_BIT(1<< 2)
272 | NRRD_BASIC_INFO_BLOCKSIZE_BIT(1<< 3)
273 | NRRD_BASIC_INFO_DIMENSION_BIT(1<< 4)
274 | NRRD_BASIC_INFO_CONTENT_BIT(1<< 5)
275 | NRRD_BASIC_INFO_COMMENTS_BIT(1<<14)
276 | (nrrdStateKeyValuePairsPropagate
277 ? 0
278 : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT(1<<15)))) {
279 biffAddf(NRRDnrrdBiffKey, "%s:", me);
280 airMopError(mop); return 1;
281 }
282 }
283 }
284 airMopOkay(mop);
285 return 0;
286}
287
288/*
289******** nrrdShuffle
290**
291** rearranges hyperslices of a nrrd along a given axis according to
292** given permutation. This could be used to on a 4D array,
293** representing a 3D volume of vectors, to re-order the vector
294** components.
295**
296** the given permutation array must allocated for at least as long as
297** the input nrrd along the chosen axis. perm[j] = i means that the
298** value at position j in the _new_ array should come from position i
299** in the _old_array. The standpoint is from the new, looking at
300** where to find the values amid the old array (perm answers "what do
301** I put here", not "where do I put this"). This allows multiple
302** positions in the new array to copy from the same old position, and
303** insures that there is an source for all positions along the new
304** array.
305*/
306int
307nrrdShuffle(Nrrd *nout, const Nrrd *nin, unsigned int axis,
308 const size_t *perm) {
309 static const char me[]="nrrdShuffle", func[]="shuffle";
310 char buff2[AIR_STRLEN_SMALL(128+1)];
311 /* Sun Feb 8 13:13:58 CST 2009: There was a memory bug here caused
312 by using the same buff1[NRRD_DIM_MAX*30] declaration that had
313 worked fine for nrrdAxesPermute and nrrdReshape, but does NOT
314 work here because now samples along an axes are re-ordered, not
315 axes, so its often not allocated for long enough to hold the
316 string that's printed to it. Ideally there'd be another argument
317 that says whether to document the shuffle in the content string,
318 which would mean an API change. Or, we can use a secret
319 heuristic (or maybe later a nrrdState variable) for determining
320 when an axis is short enough to make documenting the shuffle
321 interesting. This is useful since functions like nrrdFlip()
322 probably do *not* need the shuffle (the sample reversal) to be
323 documented for long axes */
324#define LONGEST_INTERESTING_AXIS 42
325 char buff1[LONGEST_INTERESTING_AXIS*30];
326 unsigned int ai, ldim, len;
327 size_t idxInB=0, idxOut, lineSize, numLines, size[NRRD_DIM_MAX16], *lsize,
328 cIn[NRRD_DIM_MAX16+1], cOut[NRRD_DIM_MAX16+1];
329 char *dataIn, *dataOut;
330
331 if (!(nin && nout && perm)) {
332 biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer", me);
333 return 1;
334 }
335 if (nout == nin) {
336 biffAddf(NRRDnrrdBiffKey, "%s: nout==nin disallowed", me);
337 return 1;
338 }
339 if (!( axis < nin->dim )) {
340 biffAddf(NRRDnrrdBiffKey, "%s: axis %d outside valid range [0,%d]",
341 me, axis, nin->dim-1);
342 return 1;
343 }
344 len = AIR_CAST(unsigned int, nin->axis[axis].size)((unsigned int)(nin->axis[axis].size));
345 for (ai=0; ai<len; ai++) {
346 if (!( perm[ai] < len )) {
347 char stmp[AIR_STRLEN_SMALL(128+1)];
348 biffAddf(NRRDnrrdBiffKey, "%s: perm[%d] (%s) outside valid range [0,%d]", me, ai,
349 airSprintSize_t(stmp, perm[ai]), len-1);
350 return 1;
351 }
352 }
353 /* this shouldn't actually be necessary .. */
354 if (!nrrdElementSize(nin)) {
355 biffAddf(NRRDnrrdBiffKey, "%s: nrrd reports zero element size!", me);
356 return 1;
357 }
358 /* set information in new volume */
359 nout->blockSize = nin->blockSize;
360 nrrdAxisInfoGet_nva(nin, nrrdAxisInfoSize, size);
361 if (nrrdMaybeAlloc_nva(nout, nin->type, nin->dim, size)) {
362 biffAddf(NRRDnrrdBiffKey, "%s: failed to allocate output", me);
363 return 1;
364 }
365 if (nrrdAxisInfoCopy(nout, nin, NULL((void*)0), NRRD_AXIS_INFO_NONE0)) {
366 biffAddf(NRRDnrrdBiffKey, "%s:", me);
367 return 1;
368 }
369 /* the min and max along the shuffled axis are now meaningless */
370 nout->axis[axis].min = nout->axis[axis].max = AIR_NAN(airFloatQNaN.f);
371 /* do the safe thing first */
372 nout->axis[axis].kind = _nrrdKindAltered(nin->axis[axis].kind, AIR_FALSE0);
373 /* try cleverness */
374 if (!nrrdStateKindNoop) {
375 if (0 == nrrdKindSize(nin->axis[axis].kind)
376 || nrrdKindStub == nin->axis[axis].kind
377 || nrrdKindScalar == nin->axis[axis].kind
378 || nrrdKind2Vector == nin->axis[axis].kind
379 || nrrdKind3Color == nin->axis[axis].kind
380 || nrrdKind4Color == nin->axis[axis].kind
381 || nrrdKind3Vector == nin->axis[axis].kind
382 || nrrdKind3Gradient == nin->axis[axis].kind
383 || nrrdKind3Normal == nin->axis[axis].kind
384 || nrrdKind4Vector == nin->axis[axis].kind) {
385 /* these kinds have no intrinsic ordering */
386 nout->axis[axis].kind = nin->axis[axis].kind;
387 }
388 }
389 /* the skinny */
390 lineSize = 1;
391 for (ai=0; ai<axis; ai++) {
392 lineSize *= nin->axis[ai].size;
393 }
394 numLines = nrrdElementNumber(nin)/lineSize;
395 lineSize *= nrrdElementSize(nin);
396 lsize = size + axis;
397 ldim = nin->dim - axis;
398 dataIn = AIR_CAST(char *, nin->data)((char *)(nin->data));
399 dataOut = AIR_CAST(char *, nout->data)((char *)(nout->data));
400 memset(cIn, 0, sizeof(cIn))__builtin___memset_chk (cIn, 0, sizeof(cIn), __builtin_object_size
(cIn, 0))
;
401 memset(cOut, 0, sizeof(cOut))__builtin___memset_chk (cOut, 0, sizeof(cOut), __builtin_object_size
(cOut, 0))
;
402 for (idxOut=0; idxOut<numLines; idxOut++) {
403 memcpy(cIn, cOut, sizeof(cIn))__builtin___memcpy_chk (cIn, cOut, sizeof(cIn), __builtin_object_size
(cIn, 0))
;
404 cIn[0] = perm[cOut[0]];
405 NRRD_INDEX_GEN(idxInB, cIn, lsize, ldim){ unsigned int ddd = (ldim); (idxInB) = 0; while (ddd) { ddd--
; (idxInB) = (cIn)[ddd] + (lsize)[ddd]*(idxInB); } }
;
406 NRRD_INDEX_GEN(idxOut, cOut, lsize, ldim){ unsigned int ddd = (ldim); (idxOut) = 0; while (ddd) { ddd--
; (idxOut) = (cOut)[ddd] + (lsize)[ddd]*(idxOut); } }
;
407 memcpy(dataOut + idxOut*lineSize, dataIn + idxInB*lineSize, lineSize)__builtin___memcpy_chk (dataOut + idxOut*lineSize, dataIn + idxInB
*lineSize, lineSize, __builtin_object_size (dataOut + idxOut*
lineSize, 0))
;
408 NRRD_COORD_INCR(cOut, lsize, ldim, 0)if ((0) < (ldim)) { (cOut)[(0)]++; { unsigned int ddd; for
(ddd=0; ddd+1 < ((ldim)-(0)) && ((cOut)+(0))[ddd]
>= ((lsize)+(0))[ddd]; ddd++) { ((cOut)+(0))[ddd] = 0; ((
cOut)+(0))[ddd+1]++; } if ((ldim)-(0)) { ((cOut)+(0))[((ldim)
-(0))-1] = ((((cOut)+(0))[((ldim)-(0))-1]) < (((lsize)+(0)
)[((ldim)-(0))-1]-1) ? (((cOut)+(0))[((ldim)-(0))-1]) : (((lsize
)+(0))[((ldim)-(0))-1]-1)); } }; }
;
409 }
410 /* Set content. The LONGEST_INTERESTING_AXIS hack avoids the
411 previous array out-of-bounds bug */
412 if (len <= LONGEST_INTERESTING_AXIS) {
413 strcpy(buff1, "")__builtin___strcpy_chk (buff1, "", __builtin_object_size (buff1
, 2 > 1 ? 1 : 0))
;
414 for (ai=0; ai<len; ai++) {
415 char stmp[AIR_STRLEN_SMALL(128+1)];
416 sprintf(buff2, "%s%s", (ai ? "," : ""), airSprintSize_t(stmp, perm[ai]))__builtin___sprintf_chk (buff2, 0, __builtin_object_size (buff2
, 2 > 1 ? 1 : 0), "%s%s", (ai ? "," : ""), airSprintSize_t
(stmp, perm[ai]))
;
417 strcat(buff1, buff2)__builtin___strcat_chk (buff1, buff2, __builtin_object_size (
buff1, 2 > 1 ? 1 : 0))
;
418 }
419 if (nrrdContentSet_va(nout, func, nin, "%s", buff1)) {
420 biffAddf(NRRDnrrdBiffKey, "%s:", me);
421 return 1;
422 }
423 } else {
424 if (nrrdContentSet_va(nout, func, nin, "")) {
425 biffAddf(NRRDnrrdBiffKey, "%s:", me);
426 return 1;
427 }
428 }
429 if (nrrdBasicInfoCopy(nout, nin,
430 NRRD_BASIC_INFO_DATA_BIT(1<< 1)
431 | NRRD_BASIC_INFO_TYPE_BIT(1<< 2)
432 | NRRD_BASIC_INFO_BLOCKSIZE_BIT(1<< 3)
433 | NRRD_BASIC_INFO_DIMENSION_BIT(1<< 4)
434 | NRRD_BASIC_INFO_CONTENT_BIT(1<< 5)
435 | NRRD_BASIC_INFO_COMMENTS_BIT(1<<14)
436 | (nrrdStateKeyValuePairsPropagate
437 ? 0
438 : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT(1<<15)))) {
439 biffAddf(NRRDnrrdBiffKey, "%s:", me);
440 return 1;
441 }
442
443 return 0;
444#undef LONGEST_INTERESTING_AXIS
445}
446
447/* ---- BEGIN non-NrrdIO */
448
449
450/*
451******** nrrdAxesSwap()
452**
453** for when you just want to switch the order of two axes, without
454** going through the trouble of creating the permutation array
455** needed to call nrrdAxesPermute()
456*/
457int
458nrrdAxesSwap(Nrrd *nout, const Nrrd *nin, unsigned int ax1, unsigned int ax2) {
459 static const char me[]="nrrdAxesSwap", func[]="swap";
460 unsigned int ai, axmap[NRRD_DIM_MAX16];
461
462 if (!(nout && nin)) {
463 biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer", me);
464 return 1;
465 }
466 if (!( ax1 < nin->dim && ax2 < nin->dim )) {
467 biffAddf(NRRDnrrdBiffKey, "%s: ax1 (%d) or ax2 (%d) out of bounds [0,%d]",
468 me, ax1, ax2, nin->dim-1);
469 return 1;
470 }
471
472 for (ai=0; ai<nin->dim; ai++) {
473 axmap[ai] = ai;
474 }
475 axmap[ax2] = ax1;
476 axmap[ax1] = ax2;
477 if (nrrdAxesPermute(nout, nin, axmap)
478 || nrrdContentSet_va(nout, func, nin, "%d,%d", ax1, ax2)) {
479 biffAddf(NRRDnrrdBiffKey, "%s:", me);
480 return 1;
481 }
482 /* basic info already copied by nrrdAxesPermute */
483 return 0;
484}
485
486/*
487******** nrrdFlip()
488**
489** reverse the order of slices along the given axis.
490** Actually, just a wrapper around nrrdShuffle() (with some
491** extra setting of axis info)
492*/
493int
494nrrdFlip(Nrrd *nout, const Nrrd *nin, unsigned int axis) {
495 static const char me[]="nrrdFlip", func[]="flip";
496 size_t *perm, si;
497 airArray *mop;
498 unsigned int axisIdx;
499
500 mop = airMopNew();
501 if (!(nout && nin)) {
502 biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer", me);
503 airMopError(mop); return 1;
504 }
505 if (!( axis < nin->dim )) {
506 biffAddf(NRRDnrrdBiffKey, "%s: given axis (%d) is outside valid range ([0,%d])",
507 me, axis, nin->dim-1);
508 airMopError(mop); return 1;
509 }
510 if (!(perm = (size_t*)calloc(nin->axis[axis].size, sizeof(size_t)))) {
511 biffAddf(NRRDnrrdBiffKey, "%s: couldn't alloc permutation array", me);
512 airMopError(mop); return 1;
513 }
514 airMopAdd(mop, perm, airFree, airMopAlways);
515 for (si=0; si<nin->axis[axis].size; si++) {
516 perm[si] = nin->axis[axis].size-1-si;
517 }
518 /* nrrdBasicInfoCopy called by nrrdShuffle() */
519 if (nrrdShuffle(nout, nin, axis, perm)
520 || nrrdContentSet_va(nout, func, nin, "%d", axis)) {
521 biffAddf(NRRDnrrdBiffKey, "%s:", me);
522 airMopError(mop); return 1;
523 }
524 _nrrdAxisInfoCopy(&(nout->axis[axis]), &(nin->axis[axis]),
525 NRRD_AXIS_INFO_SIZE_BIT(1<< 1)
526 | NRRD_AXIS_INFO_KIND_BIT(1<< 8));
527 /* HEY: (Tue Jan 18 00:28:26 EST 2005) there's a basic question to
528 be answered here: do we want to keep the "location" of the
529 samples fixed, while changing their ordering, or do want to flip
530 the location of the samples? In the former, the position
531 information has to be flipped to cancel the flipping of the the
532 sample order, so that samples maintain location. In the latter,
533 the position information is copied verbatim from the original. */
534 /* (Tue Sep 13 09:59:12 EDT 2005) answer: we keep the "location" of
535 the samples fixed, while changing their ordering. This is the
536 low-level thing to do, so for a nrrd function, its the right thing
537 to do. You don't need a nrrd function to simply manipulate
538 per-axis meta-information */
539 nout->axis[axis].min = nin->axis[axis].max;
540 nout->axis[axis].max = nin->axis[axis].min;
541 /* HEY: Fri Jan 14 02:53:30 EST 2005: isn't spacing supposed to be
542 the step from one sample to the next? So its a signed quantity.
543 If min and max can be flipped (so min > max), then spacing can
544 be negative, right? */
545 nout->axis[axis].spacing = -nin->axis[axis].spacing;
546 /* HEY: Fri Jan 14 02:53:30 EST 2005: but not thickness */
547 nout->axis[axis].thickness = nin->axis[axis].thickness;
548 /* need to set general orientation info too */
549 for (axisIdx=0; axisIdx<NRRD_SPACE_DIM_MAX8; axisIdx++) {
550 nout->axis[axis].spaceDirection[axisIdx] =
551 -nin->axis[axis].spaceDirection[axisIdx];
552 }
553 /* modify origin only if we flipped a spatial axis */
554 if (AIR_EXISTS(nin->axis[axis].spaceDirection[0])(((int)(!((nin->axis[axis].spaceDirection[0]) - (nin->axis
[axis].spaceDirection[0])))))
) {
555 nrrdSpaceVecScaleAdd2(nout->spaceOrigin,
556 1.0,
557 nin->spaceOrigin,
558 AIR_CAST(double, nin->axis[axis].size-1)((double)(nin->axis[axis].size-1)),
559 nin->axis[axis].spaceDirection);
560 } else {
561 nrrdSpaceVecCopy(nout->spaceOrigin, nin->spaceOrigin);
562 }
563 airMopOkay(mop);
564 return 0;
565}
566
567/*
568**
569** NOTE: this seems to destroy all space/orientation info. What
570** should be done?
571*/
572int
573nrrdJoin(Nrrd *nout, const Nrrd *const *nin, unsigned int ninNum,
574 unsigned int axis, int incrDim) {
575 static const char me[]="nrrdJoin";
576 unsigned int ni, ai, mindim, maxdim, outdim,
577 permute[NRRD_DIM_MAX16], ipermute[NRRD_DIM_MAX16];
578 int diffdim, axmap[NRRD_DIM_MAX16];
579 size_t outlen, outnum, chunksize, size[NRRD_DIM_MAX16];
580 char *dataPerm;
581 Nrrd *ntmpperm, /* axis-permuted version of output */
582 **ninperm;
583 airArray *mop;
584 char stmp[2][AIR_STRLEN_SMALL(128+1)];
585
586 /* error checking */
587 if (!(nout && nin)) {
588 biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer", me);
589 return 1;
590 }
591 if (!(ninNum >= 1)) {
592 biffAddf(NRRDnrrdBiffKey, "%s: ninNum (%d) must be >= 1", me, ninNum);
593 return 1;
594 }
595 for (ni=0; ni<ninNum; ni++) {
596 if (!(nin[ni])) {
597 biffAddf(NRRDnrrdBiffKey, "%s: input nrrd #%d NULL", me, ni);
598 return 1;
599 }
600 if (nout==nin[ni]) {
601 biffAddf(NRRDnrrdBiffKey, "%s: nout==nin[%d] disallowed", me, ni);
602 return 1;
603 }
604 }
605
606 mop = airMopNew();
607 ninperm = AIR_CALLOC(ninNum, Nrrd *)(Nrrd **)(calloc((ninNum), sizeof(Nrrd *)));
608 if (!(ninperm)) {
609 biffAddf(NRRDnrrdBiffKey, "%s: couldn't calloc() temp nrrd array", me);
610 airMopError(mop); return 1;
611 }
612 airMopAdd(mop, ninperm, airFree, airMopAlways);
613
614 maxdim = mindim = nin[0]->dim;
615 for (ni=0; ni<ninNum; ni++) {
616 mindim = AIR_MIN(mindim, nin[ni]->dim)((mindim) < (nin[ni]->dim) ? (mindim) : (nin[ni]->dim
))
;
617 maxdim = AIR_MAX(maxdim, nin[ni]->dim)((maxdim) > (nin[ni]->dim) ? (maxdim) : (nin[ni]->dim
))
;
618 }
619 diffdim = maxdim - mindim;
620 if (diffdim > 1) {
621 biffAddf(NRRDnrrdBiffKey, "%s: will only reshape up one dimension (not %d)",
622 me, diffdim);
623 airMopError(mop); return 1;
624 }
625 if (axis > maxdim) {
626 biffAddf(NRRDnrrdBiffKey, "%s: can't join along axis %d with highest input dim = %d",
627 me, axis, maxdim);
628 airMopError(mop); return 1;
629 }
630
631 /* figure out dimension of output (outdim) */
632 if (diffdim) {
633 /* case A: (example) 2D slices and 3D slabs are being joined
634 together to make a bigger 3D volume */
635 outdim = maxdim;
636 } else {
637 /* diffdim == 0 */
638 if (axis == maxdim) {
639 /* case B: this is like the old "stitch": a bunch of equal-sized
640 slices of dimension N are being stacked together to make an
641 N+1 dimensional volume, which is essentially just the result of
642 concatenating the memory of individual inputs */
643 outdim = maxdim + 1;
644 } else {
645 /* case C: axis < maxdim; maxdim == mindim */
646 /* case C1 (!incrDim): a bunch of N-D slabs are being joined
647 together to make a bigger N-D volume. The axis along which
648 they are being joined could be any of existing axes (from 0
649 to maxdim-1) */
650 /* case C2 (incrDim): this is also a "stitch", but the new axis
651 created by the stitching is inserted into the existing
652 axes. (ex: stitch 3 PGMs (R, G, B) together into a PPM (with
653 color on axis zero) */
654 outdim = maxdim + !!incrDim;
655 }
656 }
657 if (outdim > NRRD_DIM_MAX16) {
658 biffAddf(NRRDnrrdBiffKey, "%s: output dimension (%d) exceeds NRRD_DIM_MAX (%d)",
659 me, outdim, NRRD_DIM_MAX16);
660 airMopError(mop); return 1;
661 }
662
663 /* do tacit reshaping, and possibly permuting, as needed */
664 for (ai=0; ai<outdim; ai++) {
665 permute[ai] = (ai < axis
666 ? ai
667 : (ai < outdim-1
668 ? ai + 1
669 : axis));
670 /* fprintf(stderr, "!%s: 1st permute[%d] = %d\n", me, ai, permute[ai]); */
671 }
672 for (ni=0; ni<ninNum; ni++) {
673 ninperm[ni] = nrrdNew();
674 diffdim = outdim - nin[ni]->dim;
675 /* fprintf(stderr, "!%s: ni = %d ---> diffdim = %d\n", me, ni, diffdim); */
676 if (diffdim) {
677 /* we do a tacit reshaping, which actually includes
678 a tacit permuting, so we don't have to call permute
679 on the parts that don't actually need it */
680 /* NB: we register nrrdNix, not nrrdNuke */
681 /* fprintf(stderr, "!%s: %d: tacit reshape/permute\n", me, ni); */
682 airMopAdd(mop, ninperm[ni], (airMopper)nrrdNix, airMopAlways);
683 nrrdAxisInfoGet_nva(nin[ni], nrrdAxisInfoSize, size);
684 for (ai=nin[ni]->dim-1; ai>=mindim+1; ai--) {
685 size[ai] = size[ai-1];
686 }
687 size[mindim] = 1;
688 /* this may be done needlessly often */
689 for (ai=0; ai<=nin[ni]->dim; ai++) {
690 if (ai < mindim) {
691 axmap[ai] = ai;
692 } else if (ai > mindim) {
693 axmap[ai] = ai-1;
694 } else {
695 axmap[ai] = -1;
696 }
697 }
698 /* we don't have to actually call nrrdReshape(): we just nrrdWrap()
699 the input data with the reshaped size array */
700 if (nrrdWrap_nva(ninperm[ni], nin[ni]->data, nin[ni]->type,
701 nin[ni]->dim+1, size)) {
702 biffAddf(NRRDnrrdBiffKey, "%s: trouble creating interm. version of nrrd %d",
703 me, ni);
704 airMopError(mop); return 1;
705 }
706 nrrdAxisInfoCopy(ninperm[ni], nin[ni], axmap,
707 (NRRD_AXIS_INFO_SIZE_BIT(1<< 1)
708 /* HEY: this is being nixed because I can't think
709 of a sane way of keeping it consistent */
710 | NRRD_AXIS_INFO_SPACEDIRECTION_BIT(1<< 6)));
711 } else {
712 /* on this part, we permute (no need for a reshape) */
713 airMopAdd(mop, ninperm[ni], (airMopper)nrrdNuke, airMopAlways);
714 if (nrrdAxesPermute(ninperm[ni], nin[ni], permute)) {
715 biffAddf(NRRDnrrdBiffKey, "%s: trouble permuting input part %d", me, ni);
716 airMopError(mop); return 1;
717 }
718 }
719 }
720
721 /* make sure all parts are compatible in type and shape,
722 determine length of final output along axis (outlen) */
723 outlen = 0;
724 for (ni=0; ni<ninNum; ni++) {
725 if (ninperm[ni]->type != ninperm[0]->type) {
726 biffAddf(NRRDnrrdBiffKey, "%s: type (%s) of part %d unlike first's (%s)",
727 me, airEnumStr(nrrdType, ninperm[ni]->type),
728 ni, airEnumStr(nrrdType, ninperm[0]->type));
729 airMopError(mop); return 1;
730 }
731 if (nrrdTypeBlock == ninperm[0]->type) {
732 if (ninperm[ni]->blockSize != ninperm[0]->blockSize) {
733 biffAddf(NRRDnrrdBiffKey, "%s: blockSize (%s) of part %d != first's (%s)", me,
734 airSprintSize_t(stmp[0], ninperm[ni]->blockSize), ni,
735 airSprintSize_t(stmp[1], ninperm[0]->blockSize));
736 airMopError(mop); return 1;
737 }
738 }
739 if (!nrrdElementSize(ninperm[ni])) {
740 biffAddf(NRRDnrrdBiffKey, "%s: got wacky elements size (%s) for part %d", me,
741 airSprintSize_t(stmp[0], nrrdElementSize(ninperm[ni])), ni);
742 airMopError(mop); return 1;
743 }
744
745 /* fprintf(stderr, "!%s: part %03d shape: ", me, ni); */
746 for (ai=0; ai<outdim-1; ai++) {
747 /* fprintf(stderr, "%03u ", (unsigned int)ninperm[ni]->axis[ai].size);*/
748 if (ninperm[ni]->axis[ai].size != ninperm[0]->axis[ai].size) {
749 biffAddf(NRRDnrrdBiffKey, "%s: axis %d size (%s) of part %d != first's (%s)", me,
750 ai, airSprintSize_t(stmp[0], ninperm[ni]->axis[ai].size),
751 ni, airSprintSize_t(stmp[1], ninperm[0]->axis[ai].size));
752 airMopError(mop); return 1;
753 }
754 }
755 /*
756 fprintf(stderr, "%03u\n", (unsigned int)ninperm[ni]->axis[outdim-1].size);
757 */
758 outlen += ninperm[ni]->axis[outdim-1].size;
759 }
760 /* fprintf(stderr, "!%s: outlen = %u\n", me, (unsigned int)outlen); */
761
762 /* allocate temporary nrrd and concat input into it */
763 outnum = 1;
764 if (outdim > 1) {
765 for (ai=0; ai<outdim-1; ai++) {
766 size[ai] = ninperm[0]->axis[ai].size;
767 outnum *= size[ai];
768 }
769 }
770 size[outdim-1] = outlen;
771 outnum *= size[outdim-1];
Value stored to 'outnum' is never read
772 if (nrrdMaybeAlloc_nva(ntmpperm = nrrdNew(), ninperm[0]->type,
773 outdim, size)) {
774 biffAddf(NRRDnrrdBiffKey, "%s: trouble allocating permutation nrrd", me);
775 airMopError(mop); return 1;
776 }
777 airMopAdd(mop, ntmpperm, (airMopper)nrrdNuke, airMopAlways);
778 dataPerm = AIR_CAST(char *, ntmpperm->data)((char *)(ntmpperm->data));
779 for (ni=0; ni<ninNum; ni++) {
780 /* here is where the actual joining happens */
781 chunksize = nrrdElementNumber(ninperm[ni])*nrrdElementSize(ninperm[ni]);
782 memcpy(dataPerm, ninperm[ni]->data, chunksize)__builtin___memcpy_chk (dataPerm, ninperm[ni]->data, chunksize
, __builtin_object_size (dataPerm, 0))
;
783 dataPerm += chunksize;
784 }
785
786 /* copy other axis-specific fields from nin[0] to ntmpperm */
787 for (ai=0; ai<outdim-1; ai++) {
788 axmap[ai] = ai;
789 }
790 axmap[outdim-1] = -1;
791 nrrdAxisInfoCopy(ntmpperm, ninperm[0], axmap,
792 (NRRD_AXIS_INFO_NONE0
793 /* HEY: this is being nixed because I can't think
794 of a sane way of keeping it consistent */
795 | NRRD_AXIS_INFO_SPACEDIRECTION_BIT(1<< 6)));
796 ntmpperm->axis[outdim-1].size = outlen;
797
798 /* do the permutation required to get output in right order */
799 if (nrrdInvertPerm(ipermute, permute, outdim)
800 || nrrdAxesPermute(nout, ntmpperm, ipermute)) {
801 biffAddf(NRRDnrrdBiffKey, "%s: error permuting temporary nrrd", me);
802 airMopError(mop); return 1;
803 }
804 /* basic info is either already set or invalidated by joining */
805
806 /* HEY: set content on output! */
807
808 airMopOkay(mop);
809 return 0;
810}
811
812/*
813******** nrrdAxesSplit
814**
815** like reshape, but only for splitting one axis into a fast and slow part.
816*/
817int
818nrrdAxesSplit(Nrrd *nout, const Nrrd *nin,
819 unsigned int saxi, size_t sizeFast, size_t sizeSlow) {
820 static const char me[]="nrrdAxesSplit", func[]="axsplit";
821 unsigned int ai;
822
823 if (!(nout && nin)) {
824 biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer", me);
825 return 1;
826 }
827 if (!( saxi <= nin->dim-1 )) {
828 biffAddf(NRRDnrrdBiffKey, "%s: given axis (%d) outside valid range [0, %d]",
829 me, saxi, nin->dim-1);
830 return 1;
831 }
832 if (NRRD_DIM_MAX16 == nin->dim) {
833 biffAddf(NRRDnrrdBiffKey, "%s: given nrrd already at NRRD_DIM_MAX (%d)",
834 me, NRRD_DIM_MAX16);
835 return 1;
836 }
837 if (!(sizeFast*sizeSlow == nin->axis[saxi].size)) {
838 char stmp[4][AIR_STRLEN_SMALL(128+1)];
839 biffAddf(NRRDnrrdBiffKey, "%s: # samples along axis %d (%s) != "
840 "product of fast and slow sizes (%s * %s = %s)", me, saxi,
841 airSprintSize_t(stmp[0], nin->axis[saxi].size),
842 airSprintSize_t(stmp[1], sizeFast),
843 airSprintSize_t(stmp[2], sizeSlow),
844 airSprintSize_t(stmp[3], sizeFast*sizeSlow));
845 return 1;
846 }
847 if (nout != nin) {
848 if (_nrrdCopy(nout, nin, (NRRD_BASIC_INFO_COMMENTS_BIT(1<<14)
849 | (nrrdStateKeyValuePairsPropagate
850 ? 0
851 : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT(1<<15))))) {
852 biffAddf(NRRDnrrdBiffKey, "%s:", me);
853 return 1;
854 }
855 }
856 nout->dim = 1 + nin->dim;
857 for (ai=nin->dim-1; ai>=saxi+1; ai--) {
858 _nrrdAxisInfoCopy(&(nout->axis[ai+1]), &(nin->axis[ai]),
859 NRRD_AXIS_INFO_NONE0);
860 }
861 /* the ONLY thing we can say about the new axes are their sizes */
862 _nrrdAxisInfoInit(&(nout->axis[saxi]));
863 _nrrdAxisInfoInit(&(nout->axis[saxi+1]));
864 nout->axis[saxi].size = sizeFast;
865 nout->axis[saxi+1].size = sizeSlow;
866 if (nrrdContentSet_va(nout, func, nin, "%d,%d,%d",
867 saxi, sizeFast, sizeSlow)) {
868 biffAddf(NRRDnrrdBiffKey, "%s:", me);
869 return 1;
870 }
871 /* all basic information already copied by nrrdCopy */
872 return 0;
873}
874
875/*
876******** nrrdAxesDelete
877**
878** like reshape, but preserves axis information on old axes, and
879** this is only for removing a "stub" axis with length 1.
880*/
881int
882nrrdAxesDelete(Nrrd *nout, const Nrrd *nin, unsigned int daxi) {
883 static const char me[]="nrrdAxesDelete", func[]="axdelete";
884 unsigned int ai;
885
886 if (!(nout && nin)) {
887 biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer", me);
888 return 1;
889 }
890 if (!( daxi < nin->dim )) {
891 biffAddf(NRRDnrrdBiffKey, "%s: given axis (%d) outside valid range [0, %d]",
892 me, daxi, nin->dim-1);
893 return 1;
894 }
895 if (1 == nin->dim) {
896 biffAddf(NRRDnrrdBiffKey, "%s: given nrrd already at lowest dimension (1)", me);
897 return 1;
898 }
899 if (1 != nin->axis[daxi].size) {
900 char stmp[AIR_STRLEN_SMALL(128+1)];
901 biffAddf(NRRDnrrdBiffKey, "%s: size along axis %d is %s, not 1", me, daxi,
902 airSprintSize_t(stmp, nin->axis[daxi].size));
903 return 1;
904 }
905 if (nout != nin) {
906 if (_nrrdCopy(nout, nin, (NRRD_BASIC_INFO_COMMENTS_BIT(1<<14)
907 | (nrrdStateKeyValuePairsPropagate
908 ? 0
909 : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT(1<<15))))) {
910 biffAddf(NRRDnrrdBiffKey, "%s:", me);
911 return 1;
912 }
913 }
914 for (ai=daxi; ai<nin->dim-1; ai++) {
915 _nrrdAxisInfoCopy(&(nout->axis[ai]), &(nin->axis[ai+1]),
916 NRRD_AXIS_INFO_NONE0);
917 }
918 nout->dim = nin->dim - 1;
919 if (nrrdContentSet_va(nout, func, nin, "%d", daxi)) {
920 biffAddf(NRRDnrrdBiffKey, "%s:", me);
921 return 1;
922 }
923 /* all basic information already copied by nrrdCopy */
924 return 0;
925}
926
927/*
928******** nrrdAxesMerge
929**
930** like reshape, but preserves axis information on old axes
931** merges axis ax and ax+1 into one
932*/
933int
934nrrdAxesMerge(Nrrd *nout, const Nrrd *nin, unsigned int maxi) {
935 static const char me[]="nrrdAxesMerge", func[]="axmerge";
936 unsigned int ai;
937 size_t sizeFast, sizeSlow;
938
939 if (!(nout && nin)) {
940 biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer", me);
941 return 1;
942 }
943 if (!( maxi < nin->dim-1 )) {
944 biffAddf(NRRDnrrdBiffKey, "%s: given axis (%d) outside valid range [0, %d]",
945 me, maxi, nin->dim-2);
946 return 1;
947 }
948 if (1 == nin->dim) {
949 biffAddf(NRRDnrrdBiffKey, "%s: given nrrd already at lowest dimension (1)", me);
950 return 1;
951 }
952 if (nout != nin) {
953 if (_nrrdCopy(nout, nin, (NRRD_BASIC_INFO_COMMENTS_BIT(1<<14)
954 | (nrrdStateKeyValuePairsPropagate
955 ? 0
956 : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT(1<<15))))) {
957 biffAddf(NRRDnrrdBiffKey, "%s:", me);
958 return 1;
959 }
960 }
961 sizeFast = nin->axis[maxi].size;
962 sizeSlow = nin->axis[maxi+1].size;
963 nout->dim = nin->dim - 1;
964 for (ai=maxi+1; ai<nout->dim; ai++) {
965 _nrrdAxisInfoCopy(&(nout->axis[ai]), &(nin->axis[ai+1]),
966 NRRD_AXIS_INFO_NONE0);
967 }
968 /* the ONLY thing we can say about the new axis is its size */
969 _nrrdAxisInfoInit(&(nout->axis[maxi]));
970 nout->axis[maxi].size = sizeFast*sizeSlow;
971 if (nrrdContentSet_va(nout, func, nin, "%d", maxi)) {
972 biffAddf(NRRDnrrdBiffKey, "%s:", me);
973 return 1;
974 }
975 /* all basic information already copied by nrrdCopy */
976 return 0;
977}
978
979/*
980******** nrrdReshape_nva()
981**
982*/
983int
984nrrdReshape_nva(Nrrd *nout, const Nrrd *nin,
985 unsigned int dim, const size_t *size) {
986 static const char me[]="nrrdReshape_nva", func[]="reshape";
987 char buff1[NRRD_DIM_MAX16*30], buff2[AIR_STRLEN_SMALL(128+1)];
988 size_t numOut;
989 unsigned int ai;
990 char stmp[2][AIR_STRLEN_SMALL(128+1)];
991
992 if (!(nout && nin && size)) {
993 biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer", me);
994 return 1;
995 }
996 if (!(AIR_IN_CL(1, dim, NRRD_DIM_MAX)((1) <= (dim) && (dim) <= (16)))) {
997 biffAddf(NRRDnrrdBiffKey, "%s: given dimension (%d) outside valid range [1,%d]",
998 me, dim, NRRD_DIM_MAX16);
999 return 1;
1000 }
1001 if (_nrrdSizeCheck(size, dim, AIR_TRUE1)) {
1002 biffAddf(NRRDnrrdBiffKey, "%s:", me);
1003 return 1;
1004 }
1005 numOut = 1;
1006 for (ai=0; ai<dim; ai++) {
1007 numOut *= size[ai];
1008 }
1009 if (numOut != nrrdElementNumber(nin)) {
1010 biffAddf(NRRDnrrdBiffKey, "%s: new sizes product (%s) != # elements (%s)", me,
1011 airSprintSize_t(stmp[0], numOut),
1012 airSprintSize_t(stmp[1], nrrdElementNumber(nin)));
1013 return 1;
1014 }
1015
1016 if (nout != nin) {
1017 if (_nrrdCopy(nout, nin, (NRRD_BASIC_INFO_COMMENTS_BIT(1<<14)
1018 | (nrrdStateKeyValuePairsPropagate
1019 ? 0
1020 : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT(1<<15))))) {
1021 biffAddf(NRRDnrrdBiffKey, "%s:", me);
1022 return 1;
1023 }
1024 }
1025 nout->dim = dim;
1026 for (ai=0; ai<dim; ai++) {
1027 /* the ONLY thing we can say about the axes is the size */
1028 _nrrdAxisInfoInit(&(nout->axis[ai]));
1029 nout->axis[ai].size = size[ai];
1030 }
1031
1032 strcpy(buff1, "")__builtin___strcpy_chk (buff1, "", __builtin_object_size (buff1
, 2 > 1 ? 1 : 0))
;
1033 for (ai=0; ai<dim; ai++) {
1034 sprintf(buff2, "%s%s", (ai ? "x" : ""),__builtin___sprintf_chk (buff2, 0, __builtin_object_size (buff2
, 2 > 1 ? 1 : 0), "%s%s", (ai ? "x" : ""), airSprintSize_t
(stmp[0], size[ai]))
1035 airSprintSize_t(stmp[0], size[ai]))__builtin___sprintf_chk (buff2, 0, __builtin_object_size (buff2
, 2 > 1 ? 1 : 0), "%s%s", (ai ? "x" : ""), airSprintSize_t
(stmp[0], size[ai]))
;
1036 strcat(buff1, buff2)__builtin___strcat_chk (buff1, buff2, __builtin_object_size (
buff1, 2 > 1 ? 1 : 0))
;
1037 }
1038 /* basic info copied by _nrrdCopy, but probably more than we
1039 want- perhaps space dimension and origin should be nixed? */
1040 if (nrrdContentSet_va(nout, func, nin, "%s", buff1)) {
1041 biffAddf(NRRDnrrdBiffKey, "%s:", me);
1042 return 1;
1043 }
1044 return 0;
1045}
1046
1047/*
1048******** nrrdReshape_va()
1049**
1050** var-args version of nrrdReshape_nva()
1051*/
1052int
1053nrrdReshape_va(Nrrd *nout, const Nrrd *nin, unsigned int dim, ...) {
1054 static const char me[]="nrrdReshape_va";
1055 unsigned int ai;
1056 size_t size[NRRD_DIM_MAX16];
1057 va_list ap;
1058
1059 if (!(nout && nin)) {
1060 biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer", me);
1061 return 1;
1062 }
1063 if (!(AIR_IN_CL(1, dim, NRRD_DIM_MAX)((1) <= (dim) && (dim) <= (16)))) {
1064 biffAddf(NRRDnrrdBiffKey, "%s: given dimension (%d) outside valid range [1,%d]",
1065 me, dim, NRRD_DIM_MAX16);
1066 return 1;
1067 }
1068 va_start(ap, dim)__builtin_va_start(ap, dim);
1069 for (ai=0; ai<dim; ai++) {
1070 size[ai] = va_arg(ap, size_t)__builtin_va_arg(ap, size_t);
1071 }
1072 va_end(ap)__builtin_va_end(ap);
1073 /* basic info copied (indirectly) by nrrdReshape_nva() */
1074 if (nrrdReshape_nva(nout, nin, dim, size)) {
1075 biffAddf(NRRDnrrdBiffKey, "%s:", me);
1076 return 1;
1077 }
1078
1079 return 0;
1080}
1081
1082/*
1083******** nrrdBlock()
1084**
1085** collapse the first axis (axis 0) of the nrrd into a block, making
1086** an output nrrd of type nrrdTypeBlock. The input type can be block.
1087** All information for other axes is shifted down one axis.
1088*/
1089int
1090nrrdBlock(Nrrd *nout, const Nrrd *nin) {
1091 static const char me[]="nrrdBlock", func[]="block";
1092 unsigned int ai;
1093 size_t numEl, size[NRRD_DIM_MAX16];
1094 int map[NRRD_DIM_MAX16];
1095
1096 if (!(nout && nin)) {
1097 biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer", me);
1098 return 1;
1099 }
1100 if (nout == nin) {
1101 biffAddf(NRRDnrrdBiffKey, "%s: due to laziness, nout==nin disallowed", me);
1102 return 1;
1103 }
1104 if (1 == nin->dim) {
1105 biffAddf(NRRDnrrdBiffKey, "%s: can't blockify 1-D nrrd", me);
1106 return 1;
1107 }
1108 /* this shouldn't actually be necessary .. */
1109 if (!nrrdElementSize(nin)) {
1110 biffAddf(NRRDnrrdBiffKey, "%s: nrrd reports zero element size!", me);
1111 return 1;
1112 }
1113
1114 numEl = nin->axis[0].size;;
1115 nout->blockSize = numEl*nrrdElementSize(nin);
1116 /*
1117 fprintf(stderr, "!%s: nout->blockSize = %d * %d = %d\n", me,
1118 numEl, nrrdElementSize(nin), nout->blockSize);
1119 */
1120 for (ai=0; ai<nin->dim-1; ai++) {
1121 map[ai] = ai+1;
1122 size[ai] = nin->axis[map[ai]].size;
1123 }
1124
1125 /* nout->blockSize set above */
1126 if (nrrdMaybeAlloc_nva(nout, nrrdTypeBlock, nin->dim-1, size)) {
1127 biffAddf(NRRDnrrdBiffKey, "%s: failed to allocate output", me);
1128 return 1;
1129 }
1130 memcpy(nout->data, nin->data, nrrdElementNumber(nin)*nrrdElementSize(nin))__builtin___memcpy_chk (nout->data, nin->data, nrrdElementNumber
(nin)*nrrdElementSize(nin), __builtin_object_size (nout->data
, 0))
;
1131 if (nrrdAxisInfoCopy(nout, nin, map, NRRD_AXIS_INFO_NONE0)) {
1132 biffAddf(NRRDnrrdBiffKey, "%s: failed to copy axes", me);
1133 return 1;
1134 }
1135 if (nrrdContentSet_va(nout, func, nin, "")) {
1136 biffAddf(NRRDnrrdBiffKey, "%s:", me);
1137 return 1;
1138 }
1139 if (nrrdBasicInfoCopy(nout, nin,
1140 NRRD_BASIC_INFO_DATA_BIT(1<< 1)
1141 | NRRD_BASIC_INFO_TYPE_BIT(1<< 2)
1142 | NRRD_BASIC_INFO_BLOCKSIZE_BIT(1<< 3)
1143 | NRRD_BASIC_INFO_DIMENSION_BIT(1<< 4)
1144 | NRRD_BASIC_INFO_CONTENT_BIT(1<< 5)
1145 | NRRD_BASIC_INFO_COMMENTS_BIT(1<<14)
1146 | (nrrdStateKeyValuePairsPropagate
1147 ? 0
1148 : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT(1<<15)))) {
1149 biffAddf(NRRDnrrdBiffKey, "%s:", me);
1150 return 1;
1151 }
1152 return 0;
1153}
1154
1155/*
1156******** nrrdUnblock()
1157**
1158** takes a nrrdTypeBlock nrrd and breaks the blocks into elements of
1159** type "type", and shifts other axis information up by one axis
1160*/
1161int
1162nrrdUnblock(Nrrd *nout, const Nrrd *nin, int type) {
1163 static const char me[]="nrrdUnblock", func[]="unblock";
1164 unsigned int dim;
1165 size_t size[NRRD_DIM_MAX16], outElSz;
1166 int map[NRRD_DIM_MAX16];
1167 char stmp[2][AIR_STRLEN_SMALL(128+1)];
1168
1169 if (!(nout && nin)) {
1170 biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer", me);
1171 return 1;
1172 }
1173 if (nout == nin) {
1174 biffAddf(NRRDnrrdBiffKey, "%s: due to laziness, nout==nin disallowed", me);
1175 return 1;
1176 }
1177 if (nrrdTypeBlock != nin->type) {
1178 biffAddf(NRRDnrrdBiffKey, "%s: need input nrrd type %s", me,
1179 airEnumStr(nrrdType, nrrdTypeBlock));
1180 return 1;
1181 }
1182 if (NRRD_DIM_MAX16 == nin->dim) {
1183 biffAddf(NRRDnrrdBiffKey, "%s: input nrrd already at dimension limit (%d)",
1184 me, NRRD_DIM_MAX16);
1185 return 1;
1186 }
1187 if (airEnumValCheck(nrrdType, type)) {
1188 biffAddf(NRRDnrrdBiffKey, "%s: invalid requested type %d", me, type);
1189 return 1;
1190 }
1191 if (nrrdTypeBlock == type && (!(0 < nout->blockSize))) {
1192 biffAddf(NRRDnrrdBiffKey, "%s: for %s type, need nout->blockSize set", me,
1193 airEnumStr(nrrdType, nrrdTypeBlock));
1194 return 1;
1195 }
1196 /* this shouldn't actually be necessary .. */
1197 if (!(nrrdElementSize(nin))) {
1198 biffAddf(NRRDnrrdBiffKey, "%s: nin or nout reports zero element size!", me);
1199 return 1;
1200 }
1201
1202 nout->type = type;
1203 outElSz = nrrdElementSize(nout);
1204 if (nin->blockSize % outElSz) {
1205 biffAddf(NRRDnrrdBiffKey, "%s: input blockSize (%s) not multiple of output "
1206 "element size (%s)", me,
1207 airSprintSize_t(stmp[0], nin->blockSize),
1208 airSprintSize_t(stmp[1], outElSz));
1209 return 1;
1210 }
1211 for (dim=0; dim<=nin->dim; dim++) {
1212 map[dim] = !dim ? -1 : (int)dim-1;
1213 size[dim] = !dim ? nin->blockSize / outElSz : nin->axis[map[dim]].size;
1214 }
1215 /* if nout->blockSize is needed, we've checked that its set */
1216 if (nrrdMaybeAlloc_nva(nout, type, nin->dim+1, size)) {
1217 biffAddf(NRRDnrrdBiffKey, "%s: failed to allocate output", me);
1218 return 1;
1219 }
1220 memcpy(nout->data, nin->data, nrrdElementNumber(nin)*nrrdElementSize(nin))__builtin___memcpy_chk (nout->data, nin->data, nrrdElementNumber
(nin)*nrrdElementSize(nin), __builtin_object_size (nout->data
, 0))
;
1221 if (nrrdAxisInfoCopy(nout, nin, map, NRRD_AXIS_INFO_NONE0)) {
1222 biffAddf(NRRDnrrdBiffKey, "%s: failed to copy axes", me);
1223 return 1;
1224 }
1225 if (nrrdContentSet_va(nout, func, nin, "")) {
1226 biffAddf(NRRDnrrdBiffKey, "%s:", me);
1227 return 1;
1228 }
1229 if (nrrdBasicInfoCopy(nout, nin,
1230 NRRD_BASIC_INFO_DATA_BIT(1<< 1)
1231 | NRRD_BASIC_INFO_TYPE_BIT(1<< 2)
1232 | NRRD_BASIC_INFO_BLOCKSIZE_BIT(1<< 3)
1233 | NRRD_BASIC_INFO_DIMENSION_BIT(1<< 4)
1234 | NRRD_BASIC_INFO_CONTENT_BIT(1<< 5)
1235 | NRRD_BASIC_INFO_COMMENTS_BIT(1<<14)
1236 | (nrrdStateKeyValuePairsPropagate
1237 ? 0
1238 : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT(1<<15)))) {
1239 biffAddf(NRRDnrrdBiffKey, "%s:", me);
1240 return 1;
1241 }
1242 return 0;
1243}
1244
1245/* for nrrdTile ..
1246
1247will require that # slices be <= number of images: won't crop for you,
1248but will happy pad with black. This will be handled in another
1249function. Probably unu tile.
1250
1251*/
1252
1253/*
1254******** nrrdTile2D()
1255**
1256** Splits axis axSplit into two pieces of size sizeFast and sizeSlow.
1257** The data from the fast partition is juxtaposed following ax0, the
1258** slow after ax1. nrrdAxesMerge is then called to join ax0 and ax1
1259** with their respective newly permuted data. There should be one
1260** fewer dimensions in the output nrrd than in the input nrrd.
1261*/
1262int
1263nrrdTile2D(Nrrd *nout, const Nrrd *nin, unsigned int ax0, unsigned int ax1,
1264 unsigned int axSplit, size_t sizeFast, size_t sizeSlow) {
1265 static const char me[]="nrrdTile2D";
1266 int E, /* error flag */
1267 insAxis[2*NRRD_DIM_MAX16], /* array for inserting the two axes resulting
1268 from the initial split amongst the other
1269 axes: inserted axes go in odd slots,
1270 other axes go in even slots */
1271 mapIdx, /* index for filling map[] */
1272 merge[2], /* two axes to be merged post-permute */
1273 mergeIdx; /* index for filling merge[] */
1274 unsigned int ii,
1275 map[NRRD_DIM_MAX16]; /* axis map for axis permute */
1276
1277 if (!(nout && nin)) {
1278 biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer", me);
1279 return 1;
1280 }
1281
1282 /* at least for now, axSplit, ax0, and ax1 need to be distinct */
1283 if (!( axSplit != ax0
1284 && axSplit != ax1
1285 && ax0 != ax1 )) {
1286 biffAddf(NRRDnrrdBiffKey, "%s: axSplit, ax0, ax1 (%d,%d,%d) must be distinct",
1287 me, axSplit, ax0, ax1);
1288 return 1;
1289 }
1290 if (!( ax0 < nin->dim
1291 && ax1 < nin->dim
1292 && axSplit < nin->dim )) {
1293 biffAddf(NRRDnrrdBiffKey, "%s: axSplit, ax0, ax1 (%d,%d,%d) must be in range [0,%d]",
1294 me, axSplit, ax0, ax1, nin->dim-1);
1295 return 1;
1296 }
1297
1298 if (nout != nin) {
1299 if (_nrrdCopy(nout, nin, (NRRD_BASIC_INFO_COMMENTS_BIT(1<<14)
1300 | (nrrdStateKeyValuePairsPropagate
1301 ? 0
1302 : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT(1<<15))))) {
1303 biffAddf(NRRDnrrdBiffKey, "%s:", me);
1304 return 1;
1305 }
1306 }
1307
1308 /* increment ax0 and ax1 if they're above axSplit, since the
1309 initial axis split will bump up the corresponding axes */
1310 ax0 += (axSplit < ax0);
1311 ax1 += (axSplit < ax1);
1312 /* initialize insAxis to all invalid (blank) values */
1313 for (ii=0; ii<2*(nout->dim+1); ii++) {
1314 insAxis[ii] = -1;
1315 }
1316 /* run through post-split axes, inserting axSplit and axSplit+1
1317 into the slots after ax0 and ax1 respectively, otherwise
1318 set the identity map */
1319 for (ii=0; ii<(nout->dim+1); ii++) {
1320 if (axSplit == ii) {
1321 insAxis[2*ax0 + 1] = axSplit;
1322 } else if (axSplit+1 == ii) {
1323 insAxis[2*ax1 + 1] = axSplit+1;
1324 } else {
1325 insAxis[2*ii + 0] = ii;
1326 }
1327 }
1328 /* settle the values from insAxis[] into map[] by removing the -1's */
1329 mergeIdx = mapIdx = 0;
1330 for (ii=0; ii<2*(nout->dim+1); ii++) {
1331 if (insAxis[ii] != -1) {
1332 if (1 == ii % 2) {
1333 /* its an odd entry in insAxis[], so the previous axis is to be
1334 merged. Using mapIdx-1 is legit because we disallow
1335 axSplit == ax{0,1} */
1336 merge[mergeIdx++] = mapIdx-1;
1337 }
1338 map[mapIdx++] = insAxis[ii];
1339 }
1340 }
1341
1342 E = AIR_FALSE0;
1343 if (!E) E |= nrrdAxesSplit(nout, nout, axSplit, sizeFast, sizeSlow);
1344 if (!E) E |= nrrdAxesPermute(nout, nout, map);
1345 if (!E) E |= nrrdAxesMerge(nout, nout, merge[1]);
1346 if (!E) E |= nrrdAxesMerge(nout, nout, merge[0]);
1347 if (E) {
1348 biffAddf(NRRDnrrdBiffKey, "%s: trouble", me);
1349 return 1;
1350 }
1351 /* HEY: set content */
1352 if (nrrdBasicInfoCopy(nout, nin,
1353 NRRD_BASIC_INFO_DATA_BIT(1<< 1)
1354 | NRRD_BASIC_INFO_TYPE_BIT(1<< 2)
1355 | NRRD_BASIC_INFO_BLOCKSIZE_BIT(1<< 3)
1356 | NRRD_BASIC_INFO_DIMENSION_BIT(1<< 4)
1357 | NRRD_BASIC_INFO_CONTENT_BIT(1<< 5)
1358 | NRRD_BASIC_INFO_COMMENTS_BIT(1<<14)
1359 | (nrrdStateKeyValuePairsPropagate
1360 ? 0
1361 : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT(1<<15)))) {
1362 biffAddf(NRRDnrrdBiffKey, "%s:", me);
1363 return 1;
1364 }
1365 return 0;
1366}
1367
1368/*
1369******** nrrdUntile2D()
1370**
1371** This will split ax0 into nin->axis[ax0].size/sizeFast and sizeFast
1372** sizes. ax1 will then be split into nin->axis[ax1].size/sizeSlow
1373** and sizeSlow sizes. The axes corresponding to sizeFast and
1374** sizeSlow will be permuted and merged such that
1375** nout->axis[axMerge].size == sizeFast*sizeSlow.
1376**
1377** The thing to be careful of is that axMerge identifies an axis
1378** in the array set *after* the two axis splits, not before. This
1379** is in contrast to the axSplit (and ax0 and ax1) argument of nrrdTile2D
1380** which identifies axes in the original nrrd.
1381*/
1382int nrrdUntile2D(Nrrd *nout, const Nrrd *nin,
1383 unsigned int ax0, unsigned int ax1,
1384 unsigned int axMerge, size_t sizeFast, size_t sizeSlow) {
1385 static const char me[]="nrrdUntile2D";
1386 int E;
1387 unsigned int ii, mapIdx, map[NRRD_DIM_MAX16];
1388 char stmp[2][AIR_STRLEN_SMALL(128+1)];
1389
1390 if (!(nout && nin)) {
1391 biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer", me);
1392 return 1;
1393 }
1394 if (ax0 == ax1) {
1395 biffAddf(NRRDnrrdBiffKey, "%s: ax0 (%d) and ax1 (%d) must be distinct",
1396 me, ax0, ax1);
1397 return 1;
1398 }
1399 if (!( ax0 < nin->dim && ax1 < nin->dim )) {
1400 biffAddf(NRRDnrrdBiffKey, "%s: ax0, ax1 (%d,%d) must be in range [0,%d]",
1401 me, ax0, ax1, nin->dim-1);
1402 return 1;
1403 }
1404 if (!( axMerge <= nin->dim )) {
1405 biffAddf(NRRDnrrdBiffKey, "%s: axMerge (%d) must be in range [0,%d]",
1406 me, axMerge, nin->dim);
1407 return 1;
1408 }
1409 if (nin->axis[ax0].size != sizeFast*(nin->axis[ax0].size/sizeFast)) {
1410 biffAddf(NRRDnrrdBiffKey, "%s: sizeFast (%s) doesn't divide into axis %d size (%s)",
1411 me, airSprintSize_t(stmp[0], sizeFast),
1412 ax0, airSprintSize_t(stmp[1], nin->axis[ax0].size));
1413 return 1;
1414 }
1415 if (nin->axis[ax1].size != sizeSlow*(nin->axis[ax1].size/sizeSlow)) {
1416 biffAddf(NRRDnrrdBiffKey, "%s: sizeSlow (%s) doesn't divide into axis %d size (%s)",
1417 me, airSprintSize_t(stmp[0], sizeSlow),
1418 ax1, airSprintSize_t(stmp[1], nin->axis[ax1].size));
1419 return 1;
1420 }
1421
1422 if (nout != nin) {
1423 if (_nrrdCopy(nout, nin, (NRRD_BASIC_INFO_COMMENTS_BIT(1<<14)
1424 | (nrrdStateKeyValuePairsPropagate
1425 ? 0
1426 : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT(1<<15))))) {
1427 biffAddf(NRRDnrrdBiffKey, "%s:", me);
1428 return 1;
1429 }
1430 }
1431
1432 /* Split the larger (slower) axis first. */
1433 E = AIR_FALSE0;
1434 if (ax0 < ax1) {
1435 if (!E) E |= nrrdAxesSplit(nout, nout, ax1,
1436 nin->axis[ax1].size/sizeSlow, sizeSlow);
1437 if (!E) E |= nrrdAxesSplit(nout, nout, ax0,
1438 nin->axis[ax0].size/sizeFast, sizeFast);
1439 /* Increment the larger value as it will get shifted by the lower
1440 split. */
1441 ax1++;
1442 } else {
1443 if (!E) E |= nrrdAxesSplit(nout, nout, ax0,
1444 nin->axis[ax0].size/sizeFast, sizeFast);
1445 if (!E) E |= nrrdAxesSplit(nout, nout, ax1,
1446 nin->axis[ax1].size/sizeSlow, sizeSlow);
1447 ax0++;
1448 }
1449 if (E) {
1450 biffAddf(NRRDnrrdBiffKey, "%s: trouble with initial splitting", me);
1451 return 1;
1452 }
1453
1454 /* Determine the axis permutation map */
1455 mapIdx = 0;
1456 for (ii=0; ii<nout->dim; ii++) {
1457 if (mapIdx == axMerge) {
1458 /* Insert the slow parts of the axes that have been split */
1459 map[mapIdx++] = ax0+1;
1460 map[mapIdx++] = ax1+1;
1461 }
1462 if (ii == ax0+1 || ii == ax1+1) {
1463 /* These are handled by the logic above */
1464 } else {
1465 /* Otherwise use the identity map */
1466 map[mapIdx++] = ii;
1467 }
1468 }
1469
1470 /*
1471 fprintf(stderr, "%s: map =", me);
1472 for (ii=0; ii<nout->dim; ii++) {
1473 fprintf(stderr, " %d", map[ii]);
1474 }
1475 fprintf(stderr, "; axMerge = %d\n", axMerge);
1476 */
1477
1478 E = AIR_FALSE0;
1479 if (!E) E |= nrrdAxesPermute(nout, nout, map);
1480 if (!E) E |= nrrdAxesMerge(nout, nout, axMerge);
1481 if (E) {
1482 biffAddf(NRRDnrrdBiffKey, "%s: trouble", me);
1483 return 1;
1484 }
1485
1486 if (nrrdBasicInfoCopy(nout, nin,
1487 NRRD_BASIC_INFO_DATA_BIT(1<< 1)
1488 | NRRD_BASIC_INFO_TYPE_BIT(1<< 2)
1489 | NRRD_BASIC_INFO_BLOCKSIZE_BIT(1<< 3)
1490 | NRRD_BASIC_INFO_DIMENSION_BIT(1<< 4)
1491 | NRRD_BASIC_INFO_CONTENT_BIT(1<< 5)
1492 | NRRD_BASIC_INFO_COMMENTS_BIT(1<<14)
1493 | (nrrdStateKeyValuePairsPropagate
1494 ? 0
1495 : NRRD_BASIC_INFO_KEYVALUEPAIRS_BIT(1<<15)))) {
1496 biffAddf(NRRDnrrdBiffKey, "%s:", me);
1497 return 1;
1498 }
1499 return 0;
1500}
1501
1502#if 0
1503int
1504nrrdShift(Nrrd *nout, const Nrrd *nin, const ptrdiff_t *offset,
1505 int boundary, double padValue) {
1506 static const char me[]="nrrdShift", func[] = "shift";
1507
1508 if (!(nout && nin && offset)) {
1509 biffAddf(NRRDnrrdBiffKey, "%s: got NULL pointer", me);
1510 return 1;
1511 }
1512 if (nout == nin) {
1513 biffAddf(NRRDnrrdBiffKey, "%s: nout==nin disallowed", me);
1514 return 1;
1515 }
1516
1517 return 0;
1518}
1519#endif
1520
1521/* ---- END non-NrrdIO */