Skip to content

Commit

Permalink
BUG: Fix reporting of memory error inside sorting
Browse files Browse the repository at this point in the history
PyDataMem_NEW was not checked for error and the occurance of
errors not returned correctly in new_sort. Also for PyArray_LexSort
and new_argsort it should now raise MemoryError correctly.
This is done by setting PyErr_NoMemory() when no error is already
present, as the low level sorting can only fail for this reason.
  • Loading branch information
seberg committed Feb 10, 2013
1 parent 7ccf530 commit 2b76da3
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 6 deletions.
29 changes: 28 additions & 1 deletion numpy/core/src/multiarray/item_selection.c
Original file line number Diff line number Diff line change
Expand Up @@ -750,6 +750,9 @@ _new_sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
(astride != (npy_intp) elsize) || swap;
if (needcopy) {
char *buffer = PyDataMem_NEW(N*elsize);
if (buffer == NULL) {
goto fail;
}

while (size--) {
_unaligned_strided_byte_copy(buffer, (npy_intp) elsize, it->dataptr,
Expand Down Expand Up @@ -783,9 +786,11 @@ _new_sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
return 0;

fail:
/* Out of memory during sorting or buffer creation */
NPY_END_THREADS;
PyErr_NoMemory();
Py_DECREF(it);
return 0;
return -1;
}

static PyObject*
Expand Down Expand Up @@ -832,7 +837,14 @@ _new_argsort(PyArrayObject *op, int axis, NPY_SORTKIND which)
char *valbuffer, *indbuffer;

valbuffer = PyDataMem_NEW(N*elsize);
if (valbuffer == NULL) {
goto fail;
}
indbuffer = PyDataMem_NEW(N*sizeof(npy_intp));
if (indbuffer == NULL) {
PyDataMem_FREE(valbuffer);
goto fail;
}
while (size--) {
_unaligned_strided_byte_copy(valbuffer, (npy_intp) elsize, it->dataptr,
astride, N, elsize);
Expand Down Expand Up @@ -878,6 +890,10 @@ _new_argsort(PyArrayObject *op, int axis, NPY_SORTKIND which)

fail:
NPY_END_THREADS;
if (!PyErr_Occurred()) {
/* Out of memory during sorting or buffer creation */
PyErr_NoMemory();
}
Py_DECREF(ret);
Py_XDECREF(it);
Py_XDECREF(rit);
Expand Down Expand Up @@ -1331,7 +1347,14 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
int *swaps;

valbuffer = PyDataMem_NEW(N*maxelsize);
if (valbuffer == NULL) {
goto fail;
}
indbuffer = PyDataMem_NEW(N*sizeof(npy_intp));
if (indbuffer == NULL) {
PyDataMem_FREE(indbuffer);
goto fail;
}
swaps = malloc(n*sizeof(int));
for (j = 0; j < n; j++) {
swaps[j] = PyArray_ISBYTESWAPPED(mps[j]);
Expand Down Expand Up @@ -1400,6 +1423,10 @@ PyArray_LexSort(PyObject *sort_keys, int axis)

fail:
NPY_END_THREADS;
if (!PyErr_Occurred()) {
/* Out of memory during sorting or buffer creation */
PyErr_NoMemory();
}
Py_XDECREF(rit);
Py_XDECREF(ret);
for (i = 0; i < n; i++) {
Expand Down
5 changes: 0 additions & 5 deletions numpy/core/src/npysort/mergesort.c.src
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,6 @@ mergesort_@suff@(@type@ *start, npy_intp num, void *NOT_USED)
pr = pl + num;
pw = (@type@ *) malloc((num/2) * sizeof(@type@));
if (pw == NULL) {
PyErr_NoMemory();
return -NPY_ENOMEM;
}
mergesort0_@suff@(pl, pr, pw);
Expand Down Expand Up @@ -176,7 +175,6 @@ amergesort_@suff@(@type@ *v, npy_intp *tosort, npy_intp num, void *NOT_USED)
pr = pl + num;
pw = (npy_intp *) malloc((num/2) * sizeof(npy_intp));
if (pw == NULL) {
PyErr_NoMemory();
return -NPY_ENOMEM;
}
amergesort0_@suff@(pl, pr, v, pw);
Expand Down Expand Up @@ -259,13 +257,11 @@ mergesort_@suff@(@type@ *start, npy_intp num, PyArrayObject *arr)
pr = pl + num*len;
pw = (@type@ *) malloc((num/2) * elsize);
if (pw == NULL) {
PyErr_NoMemory();
err = -NPY_ENOMEM;
goto fail_0;
}
vp = (@type@ *) malloc(elsize);
if (vp == NULL) {
PyErr_NoMemory();
err = -NPY_ENOMEM;
goto fail_1;
}
Expand Down Expand Up @@ -335,7 +331,6 @@ amergesort_@suff@(@type@ *v, npy_intp *tosort, npy_intp num, PyArrayObject *arr)
pr = pl + num;
pw = (npy_intp *) malloc((num/2) * sizeof(npy_intp));
if (pw == NULL) {
PyErr_NoMemory();
return -NPY_ENOMEM;
}
amergesort0_@suff@(pl, pr, v, pw, len);
Expand Down

0 comments on commit 2b76da3

Please sign in to comment.