cpython/Objects/memoryobject.c
Stefan Krah 1649c1b33a Issue #14181: Preserve backwards compatibility for getbufferprocs that a) do
not adhere to the new documentation and b) manage to clobber view->obj before
returning failure.
2012-03-05 17:45:17 +01:00

2626 lines
74 KiB
C

/* Memoryview object implementation */
#include "Python.h"
#include <stddef.h>
/****************************************************************************/
/* ManagedBuffer Object */
/****************************************************************************/
/*
ManagedBuffer Object:
---------------------
The purpose of this object is to facilitate the handling of chained
memoryviews that have the same underlying exporting object. PEP-3118
allows the underlying object to change while a view is exported. This
could lead to unexpected results when constructing a new memoryview
from an existing memoryview.
Rather than repeatedly redirecting buffer requests to the original base
object, all chained memoryviews use a single buffer snapshot. This
snapshot is generated by the constructor _PyManagedBuffer_FromObject().
Ownership rules:
----------------
The master buffer inside a managed buffer is filled in by the original
base object. shape, strides, suboffsets and format are read-only for
all consumers.
A memoryview's buffer is a private copy of the exporter's buffer. shape,
strides and suboffsets belong to the memoryview and are thus writable.
If a memoryview itself exports several buffers via memory_getbuf(), all
buffer copies share shape, strides and suboffsets. In this case, the
arrays are NOT writable.
Reference count assumptions:
----------------------------
The 'obj' member of a Py_buffer must either be NULL or refer to the
exporting base object. In the Python codebase, all getbufferprocs
return a new reference to view.obj (example: bytes_buffer_getbuffer()).
PyBuffer_Release() decrements view.obj (if non-NULL), so the
releasebufferprocs must NOT decrement view.obj.
*/
#define XSTRINGIZE(v) #v
#define STRINGIZE(v) XSTRINGIZE(v)
#define CHECK_MBUF_RELEASED(mbuf) \
if (((_PyManagedBufferObject *)mbuf)->flags&_Py_MANAGED_BUFFER_RELEASED) { \
PyErr_SetString(PyExc_ValueError, \
"operation forbidden on released memoryview object"); \
return NULL; \
}
Py_LOCAL_INLINE(_PyManagedBufferObject *)
mbuf_alloc(void)
{
_PyManagedBufferObject *mbuf;
mbuf = (_PyManagedBufferObject *)
PyObject_GC_New(_PyManagedBufferObject, &_PyManagedBuffer_Type);
if (mbuf == NULL)
return NULL;
mbuf->flags = 0;
mbuf->exports = 0;
mbuf->master.obj = NULL;
_PyObject_GC_TRACK(mbuf);
return mbuf;
}
static PyObject *
_PyManagedBuffer_FromObject(PyObject *base)
{
_PyManagedBufferObject *mbuf;
mbuf = mbuf_alloc();
if (mbuf == NULL)
return NULL;
if (PyObject_GetBuffer(base, &mbuf->master, PyBUF_FULL_RO) < 0) {
mbuf->master.obj = NULL;
Py_DECREF(mbuf);
return NULL;
}
return (PyObject *)mbuf;
}
static void
mbuf_release(_PyManagedBufferObject *self)
{
if (self->flags&_Py_MANAGED_BUFFER_RELEASED)
return;
/* NOTE: at this point self->exports can still be > 0 if this function
is called from mbuf_clear() to break up a reference cycle. */
self->flags |= _Py_MANAGED_BUFFER_RELEASED;
/* PyBuffer_Release() decrements master->obj and sets it to NULL. */
_PyObject_GC_UNTRACK(self);
PyBuffer_Release(&self->master);
}
static void
mbuf_dealloc(_PyManagedBufferObject *self)
{
assert(self->exports == 0);
mbuf_release(self);
if (self->flags&_Py_MANAGED_BUFFER_FREE_FORMAT)
PyMem_Free(self->master.format);
PyObject_GC_Del(self);
}
static int
mbuf_traverse(_PyManagedBufferObject *self, visitproc visit, void *arg)
{
Py_VISIT(self->master.obj);
return 0;
}
static int
mbuf_clear(_PyManagedBufferObject *self)
{
assert(self->exports >= 0);
mbuf_release(self);
return 0;
}
PyTypeObject _PyManagedBuffer_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"managedbuffer",
sizeof(_PyManagedBufferObject),
0,
(destructor)mbuf_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_reserved */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
0, /* tp_doc */
(traverseproc)mbuf_traverse, /* tp_traverse */
(inquiry)mbuf_clear /* tp_clear */
};
/****************************************************************************/
/* MemoryView Object */
/****************************************************************************/
/* In the process of breaking reference cycles mbuf_release() can be
called before memory_release(). */
#define BASE_INACCESSIBLE(mv) \
(((PyMemoryViewObject *)mv)->flags&_Py_MEMORYVIEW_RELEASED || \
((PyMemoryViewObject *)mv)->mbuf->flags&_Py_MANAGED_BUFFER_RELEASED)
#define CHECK_RELEASED(mv) \
if (BASE_INACCESSIBLE(mv)) { \
PyErr_SetString(PyExc_ValueError, \
"operation forbidden on released memoryview object"); \
return NULL; \
}
#define CHECK_RELEASED_INT(mv) \
if (BASE_INACCESSIBLE(mv)) { \
PyErr_SetString(PyExc_ValueError, \
"operation forbidden on released memoryview object"); \
return -1; \
}
#define CHECK_LIST_OR_TUPLE(v) \
if (!PyList_Check(v) && !PyTuple_Check(v)) { \
PyErr_SetString(PyExc_TypeError, \
#v " must be a list or a tuple"); \
return NULL; \
}
#define VIEW_ADDR(mv) (&((PyMemoryViewObject *)mv)->view)
/* Check for the presence of suboffsets in the first dimension. */
#define HAVE_PTR(suboffsets) (suboffsets && suboffsets[0] >= 0)
/* Adjust ptr if suboffsets are present. */
#define ADJUST_PTR(ptr, suboffsets) \
(HAVE_PTR(suboffsets) ? *((char**)ptr) + suboffsets[0] : ptr)
/* Memoryview buffer properties */
#define MV_C_CONTIGUOUS(flags) (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C))
#define MV_F_CONTIGUOUS(flags) \
(flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_FORTRAN))
#define MV_ANY_CONTIGUOUS(flags) \
(flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN))
/* Fast contiguity test. Caller must ensure suboffsets==NULL and ndim==1. */
#define MV_CONTIGUOUS_NDIM1(view) \
((view)->shape[0] == 1 || (view)->strides[0] == (view)->itemsize)
/* getbuffer() requests */
#define REQ_INDIRECT(flags) ((flags&PyBUF_INDIRECT) == PyBUF_INDIRECT)
#define REQ_C_CONTIGUOUS(flags) ((flags&PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS)
#define REQ_F_CONTIGUOUS(flags) ((flags&PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS)
#define REQ_ANY_CONTIGUOUS(flags) ((flags&PyBUF_ANY_CONTIGUOUS) == PyBUF_ANY_CONTIGUOUS)
#define REQ_STRIDES(flags) ((flags&PyBUF_STRIDES) == PyBUF_STRIDES)
#define REQ_SHAPE(flags) ((flags&PyBUF_ND) == PyBUF_ND)
#define REQ_WRITABLE(flags) (flags&PyBUF_WRITABLE)
#define REQ_FORMAT(flags) (flags&PyBUF_FORMAT)
PyDoc_STRVAR(memory_doc,
"memoryview(object)\n\
\n\
Create a new memoryview object which references the given object.");
/**************************************************************************/
/* Copy memoryview buffers */
/**************************************************************************/
/* The functions in this section take a source and a destination buffer
with the same logical structure: format, itemsize, ndim and shape
are identical, with ndim > 0.
NOTE: All buffers are assumed to have PyBUF_FULL information, which
is the case for memoryviews! */
/* Assumptions: ndim >= 1. The macro tests for a corner case that should
perhaps be explicitly forbidden in the PEP. */
#define HAVE_SUBOFFSETS_IN_LAST_DIM(view) \
(view->suboffsets && view->suboffsets[dest->ndim-1] >= 0)
Py_LOCAL_INLINE(int)
last_dim_is_contiguous(Py_buffer *dest, Py_buffer *src)
{
assert(dest->ndim > 0 && src->ndim > 0);
return (!HAVE_SUBOFFSETS_IN_LAST_DIM(dest) &&
!HAVE_SUBOFFSETS_IN_LAST_DIM(src) &&
dest->strides[dest->ndim-1] == dest->itemsize &&
src->strides[src->ndim-1] == src->itemsize);
}
/* Check that the logical structure of the destination and source buffers
is identical. */
static int
cmp_structure(Py_buffer *dest, Py_buffer *src)
{
const char *dfmt, *sfmt;
int i;
assert(dest->format && src->format);
dfmt = dest->format[0] == '@' ? dest->format+1 : dest->format;
sfmt = src->format[0] == '@' ? src->format+1 : src->format;
if (strcmp(dfmt, sfmt) != 0 ||
dest->itemsize != src->itemsize ||
dest->ndim != src->ndim) {
goto value_error;
}
for (i = 0; i < dest->ndim; i++) {
if (dest->shape[i] != src->shape[i])
goto value_error;
if (dest->shape[i] == 0)
break;
}
return 0;
value_error:
PyErr_SetString(PyExc_ValueError,
"ndarray assignment: lvalue and rvalue have different structures");
return -1;
}
/* Base case for recursive multi-dimensional copying. Contiguous arrays are
copied with very little overhead. Assumptions: ndim == 1, mem == NULL or
sizeof(mem) == shape[0] * itemsize. */
static void
copy_base(const Py_ssize_t *shape, Py_ssize_t itemsize,
char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
char *mem)
{
if (mem == NULL) { /* contiguous */
Py_ssize_t size = shape[0] * itemsize;
if (dptr + size < sptr || sptr + size < dptr)
memcpy(dptr, sptr, size); /* no overlapping */
else
memmove(dptr, sptr, size);
}
else {
char *p;
Py_ssize_t i;
for (i=0, p=mem; i < shape[0]; p+=itemsize, sptr+=sstrides[0], i++) {
char *xsptr = ADJUST_PTR(sptr, ssuboffsets);
memcpy(p, xsptr, itemsize);
}
for (i=0, p=mem; i < shape[0]; p+=itemsize, dptr+=dstrides[0], i++) {
char *xdptr = ADJUST_PTR(dptr, dsuboffsets);
memcpy(xdptr, p, itemsize);
}
}
}
/* Recursively copy a source buffer to a destination buffer. The two buffers
have the same ndim, shape and itemsize. */
static void
copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize,
char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
char *mem)
{
Py_ssize_t i;
assert(ndim >= 1);
if (ndim == 1) {
copy_base(shape, itemsize,
dptr, dstrides, dsuboffsets,
sptr, sstrides, ssuboffsets,
mem);
return;
}
for (i = 0; i < shape[0]; dptr+=dstrides[0], sptr+=sstrides[0], i++) {
char *xdptr = ADJUST_PTR(dptr, dsuboffsets);
char *xsptr = ADJUST_PTR(sptr, ssuboffsets);
copy_rec(shape+1, ndim-1, itemsize,
xdptr, dstrides+1, dsuboffsets ? dsuboffsets+1 : NULL,
xsptr, sstrides+1, ssuboffsets ? ssuboffsets+1 : NULL,
mem);
}
}
/* Faster copying of one-dimensional arrays. */
static int
copy_single(Py_buffer *dest, Py_buffer *src)
{
char *mem = NULL;
assert(dest->ndim == 1);
if (cmp_structure(dest, src) < 0)
return -1;
if (!last_dim_is_contiguous(dest, src)) {
mem = PyMem_Malloc(dest->shape[0] * dest->itemsize);
if (mem == NULL) {
PyErr_NoMemory();
return -1;
}
}
copy_base(dest->shape, dest->itemsize,
dest->buf, dest->strides, dest->suboffsets,
src->buf, src->strides, src->suboffsets,
mem);
if (mem)
PyMem_Free(mem);
return 0;
}
/* Recursively copy src to dest. Both buffers must have the same basic
structure. Copying is atomic, the function never fails with a partial
copy. */
static int
copy_buffer(Py_buffer *dest, Py_buffer *src)
{
char *mem = NULL;
assert(dest->ndim > 0);
if (cmp_structure(dest, src) < 0)
return -1;
if (!last_dim_is_contiguous(dest, src)) {
mem = PyMem_Malloc(dest->shape[dest->ndim-1] * dest->itemsize);
if (mem == NULL) {
PyErr_NoMemory();
return -1;
}
}
copy_rec(dest->shape, dest->ndim, dest->itemsize,
dest->buf, dest->strides, dest->suboffsets,
src->buf, src->strides, src->suboffsets,
mem);
if (mem)
PyMem_Free(mem);
return 0;
}
/* Initialize strides for a C-contiguous array. */
Py_LOCAL_INLINE(void)
init_strides_from_shape(Py_buffer *view)
{
Py_ssize_t i;
assert(view->ndim > 0);
view->strides[view->ndim-1] = view->itemsize;
for (i = view->ndim-2; i >= 0; i--)
view->strides[i] = view->strides[i+1] * view->shape[i+1];
}
/* Initialize strides for a Fortran-contiguous array. */
Py_LOCAL_INLINE(void)
init_fortran_strides_from_shape(Py_buffer *view)
{
Py_ssize_t i;
assert(view->ndim > 0);
view->strides[0] = view->itemsize;
for (i = 1; i < view->ndim; i++)
view->strides[i] = view->strides[i-1] * view->shape[i-1];
}
/* Copy src to a C-contiguous representation. Assumptions:
len(mem) == src->len. */
static int
buffer_to_c_contiguous(char *mem, Py_buffer *src)
{
Py_buffer dest;
Py_ssize_t *strides;
int ret;
assert(src->shape != NULL);
assert(src->strides != NULL);
strides = PyMem_Malloc(src->ndim * (sizeof *src->strides));
if (strides == NULL) {
PyErr_NoMemory();
return -1;
}
/* initialize dest as a C-contiguous buffer */
dest = *src;
dest.buf = mem;
/* shape is constant and shared */
dest.strides = strides;
init_strides_from_shape(&dest);
dest.suboffsets = NULL;
ret = copy_buffer(&dest, src);
PyMem_Free(strides);
return ret;
}
/****************************************************************************/
/* Constructors */
/****************************************************************************/
/* Initialize values that are shared with the managed buffer. */
Py_LOCAL_INLINE(void)
init_shared_values(Py_buffer *dest, const Py_buffer *src)
{
dest->obj = src->obj;
dest->buf = src->buf;
dest->len = src->len;
dest->itemsize = src->itemsize;
dest->readonly = src->readonly;
dest->format = src->format ? src->format : "B";
dest->internal = src->internal;
}
/* Copy shape and strides. Reconstruct missing values. */
static void
init_shape_strides(Py_buffer *dest, const Py_buffer *src)
{
Py_ssize_t i;
if (src->ndim == 0) {
dest->shape = NULL;
dest->strides = NULL;
return;
}
if (src->ndim == 1) {
dest->shape[0] = src->shape ? src->shape[0] : src->len / src->itemsize;
dest->strides[0] = src->strides ? src->strides[0] : src->itemsize;
return;
}
for (i = 0; i < src->ndim; i++)
dest->shape[i] = src->shape[i];
if (src->strides) {
for (i = 0; i < src->ndim; i++)
dest->strides[i] = src->strides[i];
}
else {
init_strides_from_shape(dest);
}
}
Py_LOCAL_INLINE(void)
init_suboffsets(Py_buffer *dest, const Py_buffer *src)
{
Py_ssize_t i;
if (src->suboffsets == NULL) {
dest->suboffsets = NULL;
return;
}
for (i = 0; i < src->ndim; i++)
dest->suboffsets[i] = src->suboffsets[i];
}
/* len = product(shape) * itemsize */
Py_LOCAL_INLINE(void)
init_len(Py_buffer *view)
{
Py_ssize_t i, len;
len = 1;
for (i = 0; i < view->ndim; i++)
len *= view->shape[i];
len *= view->itemsize;
view->len = len;
}
/* Initialize memoryview buffer properties. */
static void
init_flags(PyMemoryViewObject *mv)
{
const Py_buffer *view = &mv->view;
int flags = 0;
switch (view->ndim) {
case 0:
flags |= (_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|
_Py_MEMORYVIEW_FORTRAN);
break;
case 1:
if (MV_CONTIGUOUS_NDIM1(view))
flags |= (_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
break;
default:
if (PyBuffer_IsContiguous(view, 'C'))
flags |= _Py_MEMORYVIEW_C;
if (PyBuffer_IsContiguous(view, 'F'))
flags |= _Py_MEMORYVIEW_FORTRAN;
break;
}
if (view->suboffsets) {
flags |= _Py_MEMORYVIEW_PIL;
flags &= ~(_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
}
mv->flags = flags;
}
/* Allocate a new memoryview and perform basic initialization. New memoryviews
are exclusively created through the mbuf_add functions. */
Py_LOCAL_INLINE(PyMemoryViewObject *)
memory_alloc(int ndim)
{
PyMemoryViewObject *mv;
mv = (PyMemoryViewObject *)
PyObject_GC_NewVar(PyMemoryViewObject, &PyMemoryView_Type, 3*ndim);
if (mv == NULL)
return NULL;
mv->mbuf = NULL;
mv->hash = -1;
mv->flags = 0;
mv->exports = 0;
mv->view.ndim = ndim;
mv->view.shape = mv->ob_array;
mv->view.strides = mv->ob_array + ndim;
mv->view.suboffsets = mv->ob_array + 2 * ndim;
_PyObject_GC_TRACK(mv);
return mv;
}
/*
Return a new memoryview that is registered with mbuf. If src is NULL,
use mbuf->master as the underlying buffer. Otherwise, use src.
The new memoryview has full buffer information: shape and strides
are always present, suboffsets as needed. Arrays are copied to
the memoryview's ob_array field.
*/
static PyObject *
mbuf_add_view(_PyManagedBufferObject *mbuf, const Py_buffer *src)
{
PyMemoryViewObject *mv;
Py_buffer *dest;
if (src == NULL)
src = &mbuf->master;
if (src->ndim > PyBUF_MAX_NDIM) {
PyErr_SetString(PyExc_ValueError,
"memoryview: number of dimensions must not exceed "
STRINGIZE(PyBUF_MAX_NDIM));
return NULL;
}
mv = memory_alloc(src->ndim);
if (mv == NULL)
return NULL;
dest = &mv->view;
init_shared_values(dest, src);
init_shape_strides(dest, src);
init_suboffsets(dest, src);
init_flags(mv);
mv->mbuf = mbuf;
Py_INCREF(mbuf);
mbuf->exports++;
return (PyObject *)mv;
}
/* Register an incomplete view: shape, strides, suboffsets and flags still
need to be initialized. Use 'ndim' instead of src->ndim to determine the
size of the memoryview's ob_array.
Assumption: ndim <= PyBUF_MAX_NDIM. */
static PyObject *
mbuf_add_incomplete_view(_PyManagedBufferObject *mbuf, const Py_buffer *src,
int ndim)
{
PyMemoryViewObject *mv;
Py_buffer *dest;
if (src == NULL)
src = &mbuf->master;
assert(ndim <= PyBUF_MAX_NDIM);
mv = memory_alloc(ndim);
if (mv == NULL)
return NULL;
dest = &mv->view;
init_shared_values(dest, src);
mv->mbuf = mbuf;
Py_INCREF(mbuf);
mbuf->exports++;
return (PyObject *)mv;
}
/* Expose a raw memory area as a view of contiguous bytes. flags can be
PyBUF_READ or PyBUF_WRITE. view->format is set to "B" (unsigned bytes).
The memoryview has complete buffer information. */
PyObject *
PyMemoryView_FromMemory(char *mem, Py_ssize_t size, int flags)
{
_PyManagedBufferObject *mbuf;
PyObject *mv;
int readonly;
assert(mem != NULL);
assert(flags == PyBUF_READ || flags == PyBUF_WRITE);
mbuf = mbuf_alloc();
if (mbuf == NULL)
return NULL;
readonly = (flags == PyBUF_WRITE) ? 0 : 1;
(void)PyBuffer_FillInfo(&mbuf->master, NULL, mem, size, readonly,
PyBUF_FULL_RO);
mv = mbuf_add_view(mbuf, NULL);
Py_DECREF(mbuf);
return mv;
}
/* Create a memoryview from a given Py_buffer. For simple byte views,
PyMemoryView_FromMemory() should be used instead.
This function is the only entry point that can create a master buffer
without full information. Because of this fact init_shape_strides()
must be able to reconstruct missing values. */
PyObject *
PyMemoryView_FromBuffer(Py_buffer *info)
{
_PyManagedBufferObject *mbuf;
PyObject *mv;
if (info->buf == NULL) {
PyErr_SetString(PyExc_ValueError,
"PyMemoryView_FromBuffer(): info->buf must not be NULL");
return NULL;
}
mbuf = mbuf_alloc();
if (mbuf == NULL)
return NULL;
/* info->obj is either NULL or a borrowed reference. This reference
should not be decremented in PyBuffer_Release(). */
mbuf->master = *info;
mbuf->master.obj = NULL;
mv = mbuf_add_view(mbuf, NULL);
Py_DECREF(mbuf);
return mv;
}
/* Create a memoryview from an object that implements the buffer protocol.
If the object is a memoryview, the new memoryview must be registered
with the same managed buffer. Otherwise, a new managed buffer is created. */
PyObject *
PyMemoryView_FromObject(PyObject *v)
{
_PyManagedBufferObject *mbuf;
if (PyMemoryView_Check(v)) {
PyMemoryViewObject *mv = (PyMemoryViewObject *)v;
CHECK_RELEASED(mv);
return mbuf_add_view(mv->mbuf, &mv->view);
}
else if (PyObject_CheckBuffer(v)) {
PyObject *ret;
mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(v);
if (mbuf == NULL)
return NULL;
ret = mbuf_add_view(mbuf, NULL);
Py_DECREF(mbuf);
return ret;
}
PyErr_Format(PyExc_TypeError,
"memoryview: %.200s object does not have the buffer interface",
Py_TYPE(v)->tp_name);
return NULL;
}
/* Copy the format string from a base object that might vanish. */
static int
mbuf_copy_format(_PyManagedBufferObject *mbuf, const char *fmt)
{
if (fmt != NULL) {
char *cp = PyMem_Malloc(strlen(fmt)+1);
if (cp == NULL) {
PyErr_NoMemory();
return -1;
}
mbuf->master.format = strcpy(cp, fmt);
mbuf->flags |= _Py_MANAGED_BUFFER_FREE_FORMAT;
}
return 0;
}
/*
Return a memoryview that is based on a contiguous copy of src.
Assumptions: src has PyBUF_FULL_RO information, src->ndim > 0.
Ownership rules:
1) As usual, the returned memoryview has a private copy
of src->shape, src->strides and src->suboffsets.
2) src->format is copied to the master buffer and released
in mbuf_dealloc(). The releasebufferproc of the bytes
object is NULL, so it does not matter that mbuf_release()
passes the altered format pointer to PyBuffer_Release().
*/
static PyObject *
memory_from_contiguous_copy(Py_buffer *src, char order)
{
_PyManagedBufferObject *mbuf;
PyMemoryViewObject *mv;
PyObject *bytes;
Py_buffer *dest;
int i;
assert(src->ndim > 0);
assert(src->shape != NULL);
bytes = PyBytes_FromStringAndSize(NULL, src->len);
if (bytes == NULL)
return NULL;
mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(bytes);
Py_DECREF(bytes);
if (mbuf == NULL)
return NULL;
if (mbuf_copy_format(mbuf, src->format) < 0) {
Py_DECREF(mbuf);
return NULL;
}
mv = (PyMemoryViewObject *)mbuf_add_incomplete_view(mbuf, NULL, src->ndim);
Py_DECREF(mbuf);
if (mv == NULL)
return NULL;
dest = &mv->view;
/* shared values are initialized correctly except for itemsize */
dest->itemsize = src->itemsize;
/* shape and strides */
for (i = 0; i < src->ndim; i++) {
dest->shape[i] = src->shape[i];
}
if (order == 'C' || order == 'A') {
init_strides_from_shape(dest);
}
else {
init_fortran_strides_from_shape(dest);
}
/* suboffsets */
dest->suboffsets = NULL;
/* flags */
init_flags(mv);
if (copy_buffer(dest, src) < 0) {
Py_DECREF(mv);
return NULL;
}
return (PyObject *)mv;
}
/*
Return a new memoryview object based on a contiguous exporter with
buffertype={PyBUF_READ, PyBUF_WRITE} and order={'C', 'F'ortran, or 'A'ny}.
The logical structure of the input and output buffers is the same
(i.e. tolist(input) == tolist(output)), but the physical layout in
memory can be explicitly chosen.
As usual, if buffertype=PyBUF_WRITE, the exporter's buffer must be writable,
otherwise it may be writable or read-only.
If the exporter is already contiguous with the desired target order,
the memoryview will be directly based on the exporter.
Otherwise, if the buffertype is PyBUF_READ, the memoryview will be
based on a new bytes object. If order={'C', 'A'ny}, use 'C' order,
'F'ortran order otherwise.
*/
PyObject *
PyMemoryView_GetContiguous(PyObject *obj, int buffertype, char order)
{
PyMemoryViewObject *mv;
PyObject *ret;
Py_buffer *view;
assert(buffertype == PyBUF_READ || buffertype == PyBUF_WRITE);
assert(order == 'C' || order == 'F' || order == 'A');
mv = (PyMemoryViewObject *)PyMemoryView_FromObject(obj);
if (mv == NULL)
return NULL;
view = &mv->view;
if (buffertype == PyBUF_WRITE && view->readonly) {
PyErr_SetString(PyExc_BufferError,
"underlying buffer is not writable");
Py_DECREF(mv);
return NULL;
}
if (PyBuffer_IsContiguous(view, order))
return (PyObject *)mv;
if (buffertype == PyBUF_WRITE) {
PyErr_SetString(PyExc_BufferError,
"writable contiguous buffer requested "
"for a non-contiguous object.");
Py_DECREF(mv);
return NULL;
}
ret = memory_from_contiguous_copy(view, order);
Py_DECREF(mv);
return ret;
}
static PyObject *
memory_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
{
PyObject *obj;
static char *kwlist[] = {"object", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O:memoryview", kwlist,
&obj)) {
return NULL;
}
return PyMemoryView_FromObject(obj);
}
/****************************************************************************/
/* Release/GC management */
/****************************************************************************/
/* Inform the managed buffer that this particular memoryview will not access
the underlying buffer again. If no other memoryviews are registered with
the managed buffer, the underlying buffer is released instantly and
marked as inaccessible for both the memoryview and the managed buffer.
This function fails if the memoryview itself has exported buffers. */
static int
_memory_release(PyMemoryViewObject *self)
{
if (self->flags & _Py_MEMORYVIEW_RELEASED)
return 0;
if (self->exports == 0) {
self->flags |= _Py_MEMORYVIEW_RELEASED;
assert(self->mbuf->exports > 0);
if (--self->mbuf->exports == 0)
mbuf_release(self->mbuf);
return 0;
}
if (self->exports > 0) {
PyErr_Format(PyExc_BufferError,
"memoryview has %zd exported buffer%s", self->exports,
self->exports==1 ? "" : "s");
return -1;
}
Py_FatalError("_memory_release(): negative export count");
return -1;
}
static PyObject *
memory_release(PyMemoryViewObject *self)
{
if (_memory_release(self) < 0)
return NULL;
Py_RETURN_NONE;
}
static void
memory_dealloc(PyMemoryViewObject *self)
{
assert(self->exports == 0);
_PyObject_GC_UNTRACK(self);
(void)_memory_release(self);
Py_CLEAR(self->mbuf);
PyObject_GC_Del(self);
}
static int
memory_traverse(PyMemoryViewObject *self, visitproc visit, void *arg)
{
Py_VISIT(self->mbuf);
return 0;
}
static int
memory_clear(PyMemoryViewObject *self)
{
(void)_memory_release(self);
Py_CLEAR(self->mbuf);
return 0;
}
static PyObject *
memory_enter(PyObject *self, PyObject *args)
{
CHECK_RELEASED(self);
Py_INCREF(self);
return self;
}
static PyObject *
memory_exit(PyObject *self, PyObject *args)
{
return memory_release((PyMemoryViewObject *)self);
}
/****************************************************************************/
/* Casting format and shape */
/****************************************************************************/
#define IS_BYTE_FORMAT(f) (f == 'b' || f == 'B' || f == 'c')
Py_LOCAL_INLINE(Py_ssize_t)
get_native_fmtchar(char *result, const char *fmt)
{
Py_ssize_t size = -1;
if (fmt[0] == '@') fmt++;
switch (fmt[0]) {
case 'c': case 'b': case 'B': size = sizeof(char); break;
case 'h': case 'H': size = sizeof(short); break;
case 'i': case 'I': size = sizeof(int); break;
case 'l': case 'L': size = sizeof(long); break;
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': size = sizeof(PY_LONG_LONG); break;
#endif
case 'n': case 'N': size = sizeof(Py_ssize_t); break;
case 'f': size = sizeof(float); break;
case 'd': size = sizeof(double); break;
#ifdef HAVE_C99_BOOL
case '?': size = sizeof(_Bool); break;
#else
case '?': size = sizeof(char); break;
#endif
case 'P': size = sizeof(void *); break;
}
if (size > 0 && fmt[1] == '\0') {
*result = fmt[0];
return size;
}
return -1;
}
/* Cast a memoryview's data type to 'format'. The input array must be
C-contiguous. At least one of input-format, output-format must have
byte size. The output array is 1-D, with the same byte length as the
input array. Thus, view->len must be a multiple of the new itemsize. */
static int
cast_to_1D(PyMemoryViewObject *mv, PyObject *format)
{
Py_buffer *view = &mv->view;
PyObject *asciifmt;
char srcchar, destchar;
Py_ssize_t itemsize;
int ret = -1;
assert(view->ndim >= 1);
assert(Py_SIZE(mv) == 3*view->ndim);
assert(view->shape == mv->ob_array);
assert(view->strides == mv->ob_array + view->ndim);
assert(view->suboffsets == mv->ob_array + 2*view->ndim);
if (get_native_fmtchar(&srcchar, view->format) < 0) {
PyErr_SetString(PyExc_ValueError,
"memoryview: source format must be a native single character "
"format prefixed with an optional '@'");
return ret;
}
asciifmt = PyUnicode_AsASCIIString(format);
if (asciifmt == NULL)
return ret;
itemsize = get_native_fmtchar(&destchar, PyBytes_AS_STRING(asciifmt));
if (itemsize < 0) {
PyErr_SetString(PyExc_ValueError,
"memoryview: destination format must be a native single "
"character format prefixed with an optional '@'");
goto out;
}
if (!IS_BYTE_FORMAT(srcchar) && !IS_BYTE_FORMAT(destchar)) {
PyErr_SetString(PyExc_TypeError,
"memoryview: cannot cast between two non-byte formats");
goto out;
}
if (view->len % itemsize) {
PyErr_SetString(PyExc_TypeError,
"memoryview: length is not a multiple of itemsize");
goto out;
}
strncpy(mv->format, PyBytes_AS_STRING(asciifmt),
_Py_MEMORYVIEW_MAX_FORMAT);
mv->format[_Py_MEMORYVIEW_MAX_FORMAT-1] = '\0';
view->format = mv->format;
view->itemsize = itemsize;
view->ndim = 1;
view->shape[0] = view->len / view->itemsize;
view->strides[0] = view->itemsize;
view->suboffsets = NULL;
init_flags(mv);
ret = 0;
out:
Py_DECREF(asciifmt);
return ret;
}
/* The memoryview must have space for 3*len(seq) elements. */
static Py_ssize_t
copy_shape(Py_ssize_t *shape, const PyObject *seq, Py_ssize_t ndim,
Py_ssize_t itemsize)
{
Py_ssize_t x, i;
Py_ssize_t len = itemsize;
for (i = 0; i < ndim; i++) {
PyObject *tmp = PySequence_Fast_GET_ITEM(seq, i);
if (!PyLong_Check(tmp)) {
PyErr_SetString(PyExc_TypeError,
"memoryview.cast(): elements of shape must be integers");
return -1;
}
x = PyLong_AsSsize_t(tmp);
if (x == -1 && PyErr_Occurred()) {
return -1;
}
if (x <= 0) {
/* In general elements of shape may be 0, but not for casting. */
PyErr_Format(PyExc_ValueError,
"memoryview.cast(): elements of shape must be integers > 0");
return -1;
}
if (x > PY_SSIZE_T_MAX / len) {
PyErr_Format(PyExc_ValueError,
"memoryview.cast(): product(shape) > SSIZE_MAX");
return -1;
}
len *= x;
shape[i] = x;
}
return len;
}
/* Cast a 1-D array to a new shape. The result array will be C-contiguous.
If the result array does not have exactly the same byte length as the
input array, raise ValueError. */
static int
cast_to_ND(PyMemoryViewObject *mv, const PyObject *shape, int ndim)
{
Py_buffer *view = &mv->view;
Py_ssize_t len;
assert(view->ndim == 1); /* ndim from cast_to_1D() */
assert(Py_SIZE(mv) == 3*(ndim==0?1:ndim)); /* ndim of result array */
assert(view->shape == mv->ob_array);
assert(view->strides == mv->ob_array + (ndim==0?1:ndim));
assert(view->suboffsets == NULL);
view->ndim = ndim;
if (view->ndim == 0) {
view->shape = NULL;
view->strides = NULL;
len = view->itemsize;
}
else {
len = copy_shape(view->shape, shape, ndim, view->itemsize);
if (len < 0)
return -1;
init_strides_from_shape(view);
}
if (view->len != len) {
PyErr_SetString(PyExc_TypeError,
"memoryview: product(shape) * itemsize != buffer size");
return -1;
}
init_flags(mv);
return 0;
}
static int
zero_in_shape(PyMemoryViewObject *mv)
{
Py_buffer *view = &mv->view;
Py_ssize_t i;
for (i = 0; i < view->ndim; i++)
if (view->shape[i] == 0)
return 1;
return 0;
}
/*
Cast a copy of 'self' to a different view. The input view must
be C-contiguous. The function always casts the input view to a
1-D output according to 'format'. At least one of input-format,
output-format must have byte size.
If 'shape' is given, the 1-D view from the previous step will
be cast to a C-contiguous view with new shape and strides.
All casts must result in views that will have the exact byte
size of the original input. Otherwise, an error is raised.
*/
static PyObject *
memory_cast(PyMemoryViewObject *self, PyObject *args, PyObject *kwds)
{
static char *kwlist[] = {"format", "shape", NULL};
PyMemoryViewObject *mv = NULL;
PyObject *shape = NULL;
PyObject *format;
Py_ssize_t ndim = 1;
CHECK_RELEASED(self);
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O", kwlist,
&format, &shape)) {
return NULL;
}
if (!PyUnicode_Check(format)) {
PyErr_SetString(PyExc_TypeError,
"memoryview: format argument must be a string");
return NULL;
}
if (!MV_C_CONTIGUOUS(self->flags)) {
PyErr_SetString(PyExc_TypeError,
"memoryview: casts are restricted to C-contiguous views");
return NULL;
}
if (zero_in_shape(self)) {
PyErr_SetString(PyExc_TypeError,
"memoryview: cannot cast view with zeros in shape or strides");
return NULL;
}
if (shape) {
CHECK_LIST_OR_TUPLE(shape)
ndim = PySequence_Fast_GET_SIZE(shape);
if (ndim > PyBUF_MAX_NDIM) {
PyErr_SetString(PyExc_ValueError,
"memoryview: number of dimensions must not exceed "
STRINGIZE(PyBUF_MAX_NDIM));
return NULL;
}
if (self->view.ndim != 1 && ndim != 1) {
PyErr_SetString(PyExc_TypeError,
"memoryview: cast must be 1D -> ND or ND -> 1D");
return NULL;
}
}
mv = (PyMemoryViewObject *)
mbuf_add_incomplete_view(self->mbuf, &self->view, ndim==0 ? 1 : (int)ndim);
if (mv == NULL)
return NULL;
if (cast_to_1D(mv, format) < 0)
goto error;
if (shape && cast_to_ND(mv, shape, (int)ndim) < 0)
goto error;
return (PyObject *)mv;
error:
Py_DECREF(mv);
return NULL;
}
/**************************************************************************/
/* getbuffer */
/**************************************************************************/
static int
memory_getbuf(PyMemoryViewObject *self, Py_buffer *view, int flags)
{
Py_buffer *base = &self->view;
int baseflags = self->flags;
CHECK_RELEASED_INT(self);
/* start with complete information */
*view = *base;
view->obj = NULL;
if (REQ_WRITABLE(flags) && base->readonly) {
PyErr_SetString(PyExc_BufferError,
"memoryview: underlying buffer is not writable");
return -1;
}
if (!REQ_FORMAT(flags)) {
/* NULL indicates that the buffer's data type has been cast to 'B'.
view->itemsize is the _previous_ itemsize. If shape is present,
the equality product(shape) * itemsize = len still holds at this
point. The equality calcsize(format) = itemsize does _not_ hold
from here on! */
view->format = NULL;
}
if (REQ_C_CONTIGUOUS(flags) && !MV_C_CONTIGUOUS(baseflags)) {
PyErr_SetString(PyExc_BufferError,
"memoryview: underlying buffer is not C-contiguous");
return -1;
}
if (REQ_F_CONTIGUOUS(flags) && !MV_F_CONTIGUOUS(baseflags)) {
PyErr_SetString(PyExc_BufferError,
"memoryview: underlying buffer is not Fortran contiguous");
return -1;
}
if (REQ_ANY_CONTIGUOUS(flags) && !MV_ANY_CONTIGUOUS(baseflags)) {
PyErr_SetString(PyExc_BufferError,
"memoryview: underlying buffer is not contiguous");
return -1;
}
if (!REQ_INDIRECT(flags) && (baseflags & _Py_MEMORYVIEW_PIL)) {
PyErr_SetString(PyExc_BufferError,
"memoryview: underlying buffer requires suboffsets");
return -1;
}
if (!REQ_STRIDES(flags)) {
if (!MV_C_CONTIGUOUS(baseflags)) {
PyErr_SetString(PyExc_BufferError,
"memoryview: underlying buffer is not C-contiguous");
return -1;
}
view->strides = NULL;
}
if (!REQ_SHAPE(flags)) {
/* PyBUF_SIMPLE or PyBUF_WRITABLE: at this point buf is C-contiguous,
so base->buf = ndbuf->data. */
if (view->format != NULL) {
/* PyBUF_SIMPLE|PyBUF_FORMAT and PyBUF_WRITABLE|PyBUF_FORMAT do
not make sense. */
PyErr_Format(PyExc_BufferError,
"ndarray: cannot cast to unsigned bytes if the format flag "
"is present");
return -1;
}
/* product(shape) * itemsize = len and calcsize(format) = itemsize
do _not_ hold from here on! */
view->ndim = 1;
view->shape = NULL;
}
view->obj = (PyObject *)self;
Py_INCREF(view->obj);
self->exports++;
return 0;
}
static void
memory_releasebuf(PyMemoryViewObject *self, Py_buffer *view)
{
self->exports--;
return;
/* PyBuffer_Release() decrements view->obj after this function returns. */
}
/* Buffer methods */
static PyBufferProcs memory_as_buffer = {
(getbufferproc)memory_getbuf, /* bf_getbuffer */
(releasebufferproc)memory_releasebuf, /* bf_releasebuffer */
};
/****************************************************************************/
/* Optimized pack/unpack for all native format specifiers */
/****************************************************************************/
/*
Fix exceptions:
1) Include format string in the error message.
2) OverflowError -> ValueError.
3) The error message from PyNumber_Index() is not ideal.
*/
static int
type_error_int(const char *fmt)
{
PyErr_Format(PyExc_TypeError,
"memoryview: invalid type for format '%s'", fmt);
return -1;
}
static int
value_error_int(const char *fmt)
{
PyErr_Format(PyExc_ValueError,
"memoryview: invalid value for format '%s'", fmt);
return -1;
}
static int
fix_error_int(const char *fmt)
{
assert(PyErr_Occurred());
if (PyErr_ExceptionMatches(PyExc_TypeError)) {
PyErr_Clear();
return type_error_int(fmt);
}
else if (PyErr_ExceptionMatches(PyExc_OverflowError) ||
PyErr_ExceptionMatches(PyExc_ValueError)) {
PyErr_Clear();
return value_error_int(fmt);
}
return -1;
}
/* Accept integer objects or objects with an __index__() method. */
static long
pylong_as_ld(PyObject *item)
{
PyObject *tmp;
long ld;
tmp = PyNumber_Index(item);
if (tmp == NULL)
return -1;
ld = PyLong_AsLong(tmp);
Py_DECREF(tmp);
return ld;
}
static unsigned long
pylong_as_lu(PyObject *item)
{
PyObject *tmp;
unsigned long lu;
tmp = PyNumber_Index(item);
if (tmp == NULL)
return (unsigned long)-1;
lu = PyLong_AsUnsignedLong(tmp);
Py_DECREF(tmp);
return lu;
}
#ifdef HAVE_LONG_LONG
static PY_LONG_LONG
pylong_as_lld(PyObject *item)
{
PyObject *tmp;
PY_LONG_LONG lld;
tmp = PyNumber_Index(item);
if (tmp == NULL)
return -1;
lld = PyLong_AsLongLong(tmp);
Py_DECREF(tmp);
return lld;
}
static unsigned PY_LONG_LONG
pylong_as_llu(PyObject *item)
{
PyObject *tmp;
unsigned PY_LONG_LONG llu;
tmp = PyNumber_Index(item);
if (tmp == NULL)
return (unsigned PY_LONG_LONG)-1;
llu = PyLong_AsUnsignedLongLong(tmp);
Py_DECREF(tmp);
return llu;
}
#endif
static Py_ssize_t
pylong_as_zd(PyObject *item)
{
PyObject *tmp;
Py_ssize_t zd;
tmp = PyNumber_Index(item);
if (tmp == NULL)
return -1;
zd = PyLong_AsSsize_t(tmp);
Py_DECREF(tmp);
return zd;
}
static size_t
pylong_as_zu(PyObject *item)
{
PyObject *tmp;
size_t zu;
tmp = PyNumber_Index(item);
if (tmp == NULL)
return (size_t)-1;
zu = PyLong_AsSize_t(tmp);
Py_DECREF(tmp);
return zu;
}
/* Timings with the ndarray from _testbuffer.c indicate that using the
struct module is around 15x slower than the two functions below. */
#define UNPACK_SINGLE(dest, ptr, type) \
do { \
type x; \
memcpy((char *)&x, ptr, sizeof x); \
dest = x; \
} while (0)
/* Unpack a single item. 'fmt' can be any native format character in struct
module syntax. This function is very sensitive to small changes. With this
layout gcc automatically generates a fast jump table. */
Py_LOCAL_INLINE(PyObject *)
unpack_single(const char *ptr, const char *fmt)
{
unsigned PY_LONG_LONG llu;
unsigned long lu;
size_t zu;
PY_LONG_LONG lld;
long ld;
Py_ssize_t zd;
double d;
unsigned char uc;
void *p;
switch (fmt[0]) {
/* signed integers and fast path for 'B' */
case 'B': uc = *((unsigned char *)ptr); goto convert_uc;
case 'b': ld = *((signed char *)ptr); goto convert_ld;
case 'h': UNPACK_SINGLE(ld, ptr, short); goto convert_ld;
case 'i': UNPACK_SINGLE(ld, ptr, int); goto convert_ld;
case 'l': UNPACK_SINGLE(ld, ptr, long); goto convert_ld;
/* boolean */
#ifdef HAVE_C99_BOOL
case '?': UNPACK_SINGLE(ld, ptr, _Bool); goto convert_bool;
#else
case '?': UNPACK_SINGLE(ld, ptr, char); goto convert_bool;
#endif
/* unsigned integers */
case 'H': UNPACK_SINGLE(lu, ptr, unsigned short); goto convert_lu;
case 'I': UNPACK_SINGLE(lu, ptr, unsigned int); goto convert_lu;
case 'L': UNPACK_SINGLE(lu, ptr, unsigned long); goto convert_lu;
/* native 64-bit */
#ifdef HAVE_LONG_LONG
case 'q': UNPACK_SINGLE(lld, ptr, PY_LONG_LONG); goto convert_lld;
case 'Q': UNPACK_SINGLE(llu, ptr, unsigned PY_LONG_LONG); goto convert_llu;
#endif
/* ssize_t and size_t */
case 'n': UNPACK_SINGLE(zd, ptr, Py_ssize_t); goto convert_zd;
case 'N': UNPACK_SINGLE(zu, ptr, size_t); goto convert_zu;
/* floats */
case 'f': UNPACK_SINGLE(d, ptr, float); goto convert_double;
case 'd': UNPACK_SINGLE(d, ptr, double); goto convert_double;
/* bytes object */
case 'c': goto convert_bytes;
/* pointer */
case 'P': UNPACK_SINGLE(p, ptr, void *); goto convert_pointer;
/* default */
default: goto err_format;
}
convert_uc:
/* PyLong_FromUnsignedLong() is slower */
return PyLong_FromLong(uc);
convert_ld:
return PyLong_FromLong(ld);
convert_lu:
return PyLong_FromUnsignedLong(lu);
convert_lld:
return PyLong_FromLongLong(lld);
convert_llu:
return PyLong_FromUnsignedLongLong(llu);
convert_zd:
return PyLong_FromSsize_t(zd);
convert_zu:
return PyLong_FromSize_t(zu);
convert_double:
return PyFloat_FromDouble(d);
convert_bool:
return PyBool_FromLong(ld);
convert_bytes:
return PyBytes_FromStringAndSize(ptr, 1);
convert_pointer:
return PyLong_FromVoidPtr(p);
err_format:
PyErr_Format(PyExc_NotImplementedError,
"memoryview: format %s not supported", fmt);
return NULL;
}
#define PACK_SINGLE(ptr, src, type) \
do { \
type x; \
x = (type)src; \
memcpy(ptr, (char *)&x, sizeof x); \
} while (0)
/* Pack a single item. 'fmt' can be any native format character in
struct module syntax. */
static int
pack_single(char *ptr, PyObject *item, const char *fmt)
{
unsigned PY_LONG_LONG llu;
unsigned long lu;
size_t zu;
PY_LONG_LONG lld;
long ld;
Py_ssize_t zd;
double d;
void *p;
switch (fmt[0]) {
/* signed integers */
case 'b': case 'h': case 'i': case 'l':
ld = pylong_as_ld(item);
if (ld == -1 && PyErr_Occurred())
goto err_occurred;
switch (fmt[0]) {
case 'b':
if (ld < SCHAR_MIN || ld > SCHAR_MAX) goto err_range;
*((signed char *)ptr) = (signed char)ld; break;
case 'h':
if (ld < SHRT_MIN || ld > SHRT_MAX) goto err_range;
PACK_SINGLE(ptr, ld, short); break;
case 'i':
if (ld < INT_MIN || ld > INT_MAX) goto err_range;
PACK_SINGLE(ptr, ld, int); break;
default: /* 'l' */
PACK_SINGLE(ptr, ld, long); break;
}
break;
/* unsigned integers */
case 'B': case 'H': case 'I': case 'L':
lu = pylong_as_lu(item);
if (lu == (unsigned long)-1 && PyErr_Occurred())
goto err_occurred;
switch (fmt[0]) {
case 'B':
if (lu > UCHAR_MAX) goto err_range;
*((unsigned char *)ptr) = (unsigned char)lu; break;
case 'H':
if (lu > USHRT_MAX) goto err_range;
PACK_SINGLE(ptr, lu, unsigned short); break;
case 'I':
if (lu > UINT_MAX) goto err_range;
PACK_SINGLE(ptr, lu, unsigned int); break;
default: /* 'L' */
PACK_SINGLE(ptr, lu, unsigned long); break;
}
break;
/* native 64-bit */
#ifdef HAVE_LONG_LONG
case 'q':
lld = pylong_as_lld(item);
if (lld == -1 && PyErr_Occurred())
goto err_occurred;
PACK_SINGLE(ptr, lld, PY_LONG_LONG);
break;
case 'Q':
llu = pylong_as_llu(item);
if (llu == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())
goto err_occurred;
PACK_SINGLE(ptr, llu, unsigned PY_LONG_LONG);
break;
#endif
/* ssize_t and size_t */
case 'n':
zd = pylong_as_zd(item);
if (zd == -1 && PyErr_Occurred())
goto err_occurred;
PACK_SINGLE(ptr, zd, Py_ssize_t);
break;
case 'N':
zu = pylong_as_zu(item);
if (zu == (size_t)-1 && PyErr_Occurred())
goto err_occurred;
PACK_SINGLE(ptr, zu, size_t);
break;
/* floats */
case 'f': case 'd':
d = PyFloat_AsDouble(item);
if (d == -1.0 && PyErr_Occurred())
goto err_occurred;
if (fmt[0] == 'f') {
PACK_SINGLE(ptr, d, float);
}
else {
PACK_SINGLE(ptr, d, double);
}
break;
/* bool */
case '?':
ld = PyObject_IsTrue(item);
if (ld < 0)
return -1; /* preserve original error */
#ifdef HAVE_C99_BOOL
PACK_SINGLE(ptr, ld, _Bool);
#else
PACK_SINGLE(ptr, ld, char);
#endif
break;
/* bytes object */
case 'c':
if (!PyBytes_Check(item))
return type_error_int(fmt);
if (PyBytes_GET_SIZE(item) != 1)
return value_error_int(fmt);
*ptr = PyBytes_AS_STRING(item)[0];
break;
/* pointer */
case 'P':
p = PyLong_AsVoidPtr(item);
if (p == NULL && PyErr_Occurred())
goto err_occurred;
PACK_SINGLE(ptr, p, void *);
break;
/* default */
default: goto err_format;
}
return 0;
err_occurred:
return fix_error_int(fmt);
err_range:
return value_error_int(fmt);
err_format:
PyErr_Format(PyExc_NotImplementedError,
"memoryview: format %s not supported", fmt);
return -1;
}
/****************************************************************************/
/* Representations */
/****************************************************************************/
/* allow explicit form of native format */
Py_LOCAL_INLINE(const char *)
adjust_fmt(const Py_buffer *view)
{
const char *fmt;
fmt = (view->format[0] == '@') ? view->format+1 : view->format;
if (fmt[0] && fmt[1] == '\0')
return fmt;
PyErr_Format(PyExc_NotImplementedError,
"memoryview: unsupported format %s", view->format);
return NULL;
}
/* Base case for multi-dimensional unpacking. Assumption: ndim == 1. */
static PyObject *
tolist_base(const char *ptr, const Py_ssize_t *shape,
const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
const char *fmt)
{
PyObject *lst, *item;
Py_ssize_t i;
lst = PyList_New(shape[0]);
if (lst == NULL)
return NULL;
for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
const char *xptr = ADJUST_PTR(ptr, suboffsets);
item = unpack_single(xptr, fmt);
if (item == NULL) {
Py_DECREF(lst);
return NULL;
}
PyList_SET_ITEM(lst, i, item);
}
return lst;
}
/* Unpack a multi-dimensional array into a nested list.
Assumption: ndim >= 1. */
static PyObject *
tolist_rec(const char *ptr, Py_ssize_t ndim, const Py_ssize_t *shape,
const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
const char *fmt)
{
PyObject *lst, *item;
Py_ssize_t i;
assert(ndim >= 1);
assert(shape != NULL);
assert(strides != NULL);
if (ndim == 1)
return tolist_base(ptr, shape, strides, suboffsets, fmt);
lst = PyList_New(shape[0]);
if (lst == NULL)
return NULL;
for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
const char *xptr = ADJUST_PTR(ptr, suboffsets);
item = tolist_rec(xptr, ndim-1, shape+1,
strides+1, suboffsets ? suboffsets+1 : NULL,
fmt);
if (item == NULL) {
Py_DECREF(lst);
return NULL;
}
PyList_SET_ITEM(lst, i, item);
}
return lst;
}
/* Return a list representation of the memoryview. Currently only buffers
with native format strings are supported. */
static PyObject *
memory_tolist(PyMemoryViewObject *mv, PyObject *noargs)
{
const Py_buffer *view = &(mv->view);
const char *fmt;
CHECK_RELEASED(mv);
fmt = adjust_fmt(view);
if (fmt == NULL)
return NULL;
if (view->ndim == 0) {
return unpack_single(view->buf, fmt);
}
else if (view->ndim == 1) {
return tolist_base(view->buf, view->shape,
view->strides, view->suboffsets,
fmt);
}
else {
return tolist_rec(view->buf, view->ndim, view->shape,
view->strides, view->suboffsets,
fmt);
}
}
static PyObject *
memory_tobytes(PyMemoryViewObject *self, PyObject *dummy)
{
Py_buffer *src = VIEW_ADDR(self);
PyObject *bytes = NULL;
CHECK_RELEASED(self);
if (MV_C_CONTIGUOUS(self->flags)) {
return PyBytes_FromStringAndSize(src->buf, src->len);
}
bytes = PyBytes_FromStringAndSize(NULL, src->len);
if (bytes == NULL)
return NULL;
if (buffer_to_c_contiguous(PyBytes_AS_STRING(bytes), src) < 0) {
Py_DECREF(bytes);
return NULL;
}
return bytes;
}
static PyObject *
memory_repr(PyMemoryViewObject *self)
{
if (self->flags & _Py_MEMORYVIEW_RELEASED)
return PyUnicode_FromFormat("<released memory at %p>", self);
else
return PyUnicode_FromFormat("<memory at %p>", self);
}
/**************************************************************************/
/* Indexing and slicing */
/**************************************************************************/
/* Get the pointer to the item at index. */
static char *
ptr_from_index(Py_buffer *view, Py_ssize_t index)
{
char *ptr;
Py_ssize_t nitems; /* items in the first dimension */
assert(view->shape);
assert(view->strides);
nitems = view->shape[0];
if (index < 0) {
index += nitems;
}
if (index < 0 || index >= nitems) {
PyErr_SetString(PyExc_IndexError, "index out of bounds");
return NULL;
}
ptr = (char *)view->buf;
ptr += view->strides[0] * index;
ptr = ADJUST_PTR(ptr, view->suboffsets);
return ptr;
}
/* Return the item at index. In a one-dimensional view, this is an object
with the type specified by view->format. Otherwise, the item is a sub-view.
The function is used in memory_subscript() and memory_as_sequence. */
static PyObject *
memory_item(PyMemoryViewObject *self, Py_ssize_t index)
{
Py_buffer *view = &(self->view);
const char *fmt;
CHECK_RELEASED(self);
fmt = adjust_fmt(view);
if (fmt == NULL)
return NULL;
if (view->ndim == 0) {
PyErr_SetString(PyExc_TypeError, "invalid indexing of 0-dim memory");
return NULL;
}
if (view->ndim == 1) {
char *ptr = ptr_from_index(view, index);
if (ptr == NULL)
return NULL;
return unpack_single(ptr, fmt);
}
PyErr_SetString(PyExc_NotImplementedError,
"multi-dimensional sub-views are not implemented");
return NULL;
}
Py_LOCAL_INLINE(int)
init_slice(Py_buffer *base, PyObject *key, int dim)
{
Py_ssize_t start, stop, step, slicelength;
if (PySlice_GetIndicesEx(key, base->shape[dim],
&start, &stop, &step, &slicelength) < 0) {
return -1;
}
if (base->suboffsets == NULL || dim == 0) {
adjust_buf:
base->buf = (char *)base->buf + base->strides[dim] * start;
}
else {
Py_ssize_t n = dim-1;
while (n >= 0 && base->suboffsets[n] < 0)
n--;
if (n < 0)
goto adjust_buf; /* all suboffsets are negative */
base->suboffsets[n] = base->suboffsets[n] + base->strides[dim] * start;
}
base->shape[dim] = slicelength;
base->strides[dim] = base->strides[dim] * step;
return 0;
}
static int
is_multislice(PyObject *key)
{
Py_ssize_t size, i;
if (!PyTuple_Check(key))
return 0;
size = PyTuple_GET_SIZE(key);
if (size == 0)
return 0;
for (i = 0; i < size; i++) {
PyObject *x = PyTuple_GET_ITEM(key, i);
if (!PySlice_Check(x))
return 0;
}
return 1;
}
/* mv[obj] returns an object holding the data for one element if obj
fully indexes the memoryview or another memoryview object if it
does not.
0-d memoryview objects can be referenced using mv[...] or mv[()]
but not with anything else. */
static PyObject *
memory_subscript(PyMemoryViewObject *self, PyObject *key)
{
Py_buffer *view;
view = &(self->view);
CHECK_RELEASED(self);
if (view->ndim == 0) {
if (PyTuple_Check(key) && PyTuple_GET_SIZE(key) == 0) {
const char *fmt = adjust_fmt(view);
if (fmt == NULL)
return NULL;
return unpack_single(view->buf, fmt);
}
else if (key == Py_Ellipsis) {
Py_INCREF(self);
return (PyObject *)self;
}
else {
PyErr_SetString(PyExc_TypeError,
"invalid indexing of 0-dim memory");
return NULL;
}
}
if (PyIndex_Check(key)) {
Py_ssize_t index;
index = PyNumber_AsSsize_t(key, PyExc_IndexError);
if (index == -1 && PyErr_Occurred())
return NULL;
return memory_item(self, index);
}
else if (PySlice_Check(key)) {
PyMemoryViewObject *sliced;
sliced = (PyMemoryViewObject *)mbuf_add_view(self->mbuf, view);
if (sliced == NULL)
return NULL;
if (init_slice(&sliced->view, key, 0) < 0) {
Py_DECREF(sliced);
return NULL;
}
init_len(&sliced->view);
init_flags(sliced);
return (PyObject *)sliced;
}
else if (is_multislice(key)) {
PyErr_SetString(PyExc_NotImplementedError,
"multi-dimensional slicing is not implemented");
return NULL;
}
PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
return NULL;
}
static int
memory_ass_sub(PyMemoryViewObject *self, PyObject *key, PyObject *value)
{
Py_buffer *view = &(self->view);
Py_buffer src;
const char *fmt;
char *ptr;
CHECK_RELEASED_INT(self);
fmt = adjust_fmt(view);
if (fmt == NULL)
return -1;
if (view->readonly) {
PyErr_SetString(PyExc_TypeError, "cannot modify read-only memory");
return -1;
}
if (value == NULL) {
PyErr_SetString(PyExc_TypeError, "cannot delete memory");
return -1;
}
if (view->ndim == 0) {
if (key == Py_Ellipsis ||
(PyTuple_Check(key) && PyTuple_GET_SIZE(key)==0)) {
ptr = (char *)view->buf;
return pack_single(ptr, value, fmt);
}
else {
PyErr_SetString(PyExc_TypeError,
"invalid indexing of 0-dim memory");
return -1;
}
}
if (view->ndim != 1) {
PyErr_SetString(PyExc_NotImplementedError,
"memoryview assignments are currently restricted to ndim = 1");
return -1;
}
if (PyIndex_Check(key)) {
Py_ssize_t index = PyNumber_AsSsize_t(key, PyExc_IndexError);
if (index == -1 && PyErr_Occurred())
return -1;
ptr = ptr_from_index(view, index);
if (ptr == NULL)
return -1;
return pack_single(ptr, value, fmt);
}
/* one-dimensional: fast path */
if (PySlice_Check(key) && view->ndim == 1) {
Py_buffer dest; /* sliced view */
Py_ssize_t arrays[3];
int ret = -1;
/* rvalue must be an exporter */
if (PyObject_GetBuffer(value, &src, PyBUF_FULL_RO) < 0)
return ret;
dest = *view;
dest.shape = &arrays[0]; dest.shape[0] = view->shape[0];
dest.strides = &arrays[1]; dest.strides[0] = view->strides[0];
if (view->suboffsets) {
dest.suboffsets = &arrays[2]; dest.suboffsets[0] = view->suboffsets[0];
}
if (init_slice(&dest, key, 0) < 0)
goto end_block;
dest.len = dest.shape[0] * dest.itemsize;
ret = copy_single(&dest, &src);
end_block:
PyBuffer_Release(&src);
return ret;
}
else if (PySlice_Check(key) || is_multislice(key)) {
/* Call memory_subscript() to produce a sliced lvalue, then copy
rvalue into lvalue. This is already implemented in _testbuffer.c. */
PyErr_SetString(PyExc_NotImplementedError,
"memoryview slice assignments are currently restricted "
"to ndim = 1");
return -1;
}
PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
return -1;
}
static Py_ssize_t
memory_length(PyMemoryViewObject *self)
{
CHECK_RELEASED_INT(self);
return self->view.ndim == 0 ? 1 : self->view.shape[0];
}
/* As mapping */
static PyMappingMethods memory_as_mapping = {
(lenfunc)memory_length, /* mp_length */
(binaryfunc)memory_subscript, /* mp_subscript */
(objobjargproc)memory_ass_sub, /* mp_ass_subscript */
};
/* As sequence */
static PySequenceMethods memory_as_sequence = {
0, /* sq_length */
0, /* sq_concat */
0, /* sq_repeat */
(ssizeargfunc)memory_item, /* sq_item */
};
/**************************************************************************/
/* Comparisons */
/**************************************************************************/
#define CMP_SINGLE(p, q, type) \
do { \
type x; \
type y; \
memcpy((char *)&x, p, sizeof x); \
memcpy((char *)&y, q, sizeof y); \
equal = (x == y); \
} while (0)
Py_LOCAL_INLINE(int)
unpack_cmp(const char *p, const char *q, const char *fmt)
{
int equal;
switch (fmt[0]) {
/* signed integers and fast path for 'B' */
case 'B': return *((unsigned char *)p) == *((unsigned char *)q);
case 'b': return *((signed char *)p) == *((signed char *)q);
case 'h': CMP_SINGLE(p, q, short); return equal;
case 'i': CMP_SINGLE(p, q, int); return equal;
case 'l': CMP_SINGLE(p, q, long); return equal;
/* boolean */
#ifdef HAVE_C99_BOOL
case '?': CMP_SINGLE(p, q, _Bool); return equal;
#else
case '?': CMP_SINGLE(p, q, char); return equal;
#endif
/* unsigned integers */
case 'H': CMP_SINGLE(p, q, unsigned short); return equal;
case 'I': CMP_SINGLE(p, q, unsigned int); return equal;
case 'L': CMP_SINGLE(p, q, unsigned long); return equal;
/* native 64-bit */
#ifdef HAVE_LONG_LONG
case 'q': CMP_SINGLE(p, q, PY_LONG_LONG); return equal;
case 'Q': CMP_SINGLE(p, q, unsigned PY_LONG_LONG); return equal;
#endif
/* ssize_t and size_t */
case 'n': CMP_SINGLE(p, q, Py_ssize_t); return equal;
case 'N': CMP_SINGLE(p, q, size_t); return equal;
/* floats */
/* XXX DBL_EPSILON? */
case 'f': CMP_SINGLE(p, q, float); return equal;
case 'd': CMP_SINGLE(p, q, double); return equal;
/* bytes object */
case 'c': return *p == *q;
/* pointer */
case 'P': CMP_SINGLE(p, q, void *); return equal;
/* Py_NotImplemented */
default: return -1;
}
}
/* Base case for recursive array comparisons. Assumption: ndim == 1. */
static int
cmp_base(const char *p, const char *q, const Py_ssize_t *shape,
const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
const char *fmt)
{
Py_ssize_t i;
int equal;
for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
const char *xp = ADJUST_PTR(p, psuboffsets);
const char *xq = ADJUST_PTR(q, qsuboffsets);
equal = unpack_cmp(xp, xq, fmt);
if (equal <= 0)
return equal;
}
return 1;
}
/* Recursively compare two multi-dimensional arrays that have the same
logical structure. Assumption: ndim >= 1. */
static int
cmp_rec(const char *p, const char *q,
Py_ssize_t ndim, const Py_ssize_t *shape,
const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
const char *fmt)
{
Py_ssize_t i;
int equal;
assert(ndim >= 1);
assert(shape != NULL);
assert(pstrides != NULL);
assert(qstrides != NULL);
if (ndim == 1) {
return cmp_base(p, q, shape,
pstrides, psuboffsets,
qstrides, qsuboffsets,
fmt);
}
for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
const char *xp = ADJUST_PTR(p, psuboffsets);
const char *xq = ADJUST_PTR(q, qsuboffsets);
equal = cmp_rec(xp, xq, ndim-1, shape+1,
pstrides+1, psuboffsets ? psuboffsets+1 : NULL,
qstrides+1, qsuboffsets ? qsuboffsets+1 : NULL,
fmt);
if (equal <= 0)
return equal;
}
return 1;
}
static PyObject *
memory_richcompare(PyObject *v, PyObject *w, int op)
{
PyObject *res;
Py_buffer wbuf, *vv, *ww = NULL;
const char *vfmt, *wfmt;
int equal = -1; /* Py_NotImplemented */
if (op != Py_EQ && op != Py_NE)
goto result; /* Py_NotImplemented */
assert(PyMemoryView_Check(v));
if (BASE_INACCESSIBLE(v)) {
equal = (v == w);
goto result;
}
vv = VIEW_ADDR(v);
if (PyMemoryView_Check(w)) {
if (BASE_INACCESSIBLE(w)) {
equal = (v == w);
goto result;
}
ww = VIEW_ADDR(w);
}
else {
if (PyObject_GetBuffer(w, &wbuf, PyBUF_FULL_RO) < 0) {
PyErr_Clear();
goto result; /* Py_NotImplemented */
}
ww = &wbuf;
}
vfmt = adjust_fmt(vv);
wfmt = adjust_fmt(ww);
if (vfmt == NULL || wfmt == NULL) {
PyErr_Clear();
goto result; /* Py_NotImplemented */
}
if (cmp_structure(vv, ww) < 0) {
PyErr_Clear();
equal = 0;
goto result;
}
if (vv->ndim == 0) {
equal = unpack_cmp(vv->buf, ww->buf, vfmt);
}
else if (vv->ndim == 1) {
equal = cmp_base(vv->buf, ww->buf, vv->shape,
vv->strides, vv->suboffsets,
ww->strides, ww->suboffsets,
vfmt);
}
else {
equal = cmp_rec(vv->buf, ww->buf, vv->ndim, vv->shape,
vv->strides, vv->suboffsets,
ww->strides, ww->suboffsets,
vfmt);
}
result:
if (equal < 0)
res = Py_NotImplemented;
else if ((equal && op == Py_EQ) || (!equal && op == Py_NE))
res = Py_True;
else
res = Py_False;
if (ww == &wbuf)
PyBuffer_Release(ww);
Py_INCREF(res);
return res;
}
/**************************************************************************/
/* Hash */
/**************************************************************************/
static Py_hash_t
memory_hash(PyMemoryViewObject *self)
{
if (self->hash == -1) {
Py_buffer *view = &self->view;
char *mem = view->buf;
CHECK_RELEASED_INT(self);
if (!view->readonly) {
PyErr_SetString(PyExc_ValueError,
"cannot hash writable memoryview object");
return -1;
}
if (view->obj != NULL && PyObject_Hash(view->obj) == -1) {
/* Keep the original error message */
return -1;
}
if (!MV_C_CONTIGUOUS(self->flags)) {
mem = PyMem_Malloc(view->len);
if (mem == NULL) {
PyErr_NoMemory();
return -1;
}
if (buffer_to_c_contiguous(mem, view) < 0) {
PyMem_Free(mem);
return -1;
}
}
/* Can't fail */
self->hash = _Py_HashBytes((unsigned char *)mem, view->len);
if (mem != view->buf)
PyMem_Free(mem);
}
return self->hash;
}
/**************************************************************************/
/* getters */
/**************************************************************************/
static PyObject *
_IntTupleFromSsizet(int len, Py_ssize_t *vals)
{
int i;
PyObject *o;
PyObject *intTuple;
if (vals == NULL)
return PyTuple_New(0);
intTuple = PyTuple_New(len);
if (!intTuple)
return NULL;
for (i=0; i<len; i++) {
o = PyLong_FromSsize_t(vals[i]);
if (!o) {
Py_DECREF(intTuple);
return NULL;
}
PyTuple_SET_ITEM(intTuple, i, o);
}
return intTuple;
}
static PyObject *
memory_obj_get(PyMemoryViewObject *self)
{
Py_buffer *view = &self->view;
CHECK_RELEASED(self);
if (view->obj == NULL) {
Py_RETURN_NONE;
}
Py_INCREF(view->obj);
return view->obj;
}
static PyObject *
memory_nbytes_get(PyMemoryViewObject *self)
{
CHECK_RELEASED(self);
return PyLong_FromSsize_t(self->view.len);
}
static PyObject *
memory_format_get(PyMemoryViewObject *self)
{
CHECK_RELEASED(self);
return PyUnicode_FromString(self->view.format);
}
static PyObject *
memory_itemsize_get(PyMemoryViewObject *self)
{
CHECK_RELEASED(self);
return PyLong_FromSsize_t(self->view.itemsize);
}
static PyObject *
memory_shape_get(PyMemoryViewObject *self)
{
CHECK_RELEASED(self);
return _IntTupleFromSsizet(self->view.ndim, self->view.shape);
}
static PyObject *
memory_strides_get(PyMemoryViewObject *self)
{
CHECK_RELEASED(self);
return _IntTupleFromSsizet(self->view.ndim, self->view.strides);
}
static PyObject *
memory_suboffsets_get(PyMemoryViewObject *self)
{
CHECK_RELEASED(self);
return _IntTupleFromSsizet(self->view.ndim, self->view.suboffsets);
}
static PyObject *
memory_readonly_get(PyMemoryViewObject *self)
{
CHECK_RELEASED(self);
return PyBool_FromLong(self->view.readonly);
}
static PyObject *
memory_ndim_get(PyMemoryViewObject *self)
{
CHECK_RELEASED(self);
return PyLong_FromLong(self->view.ndim);
}
static PyObject *
memory_c_contiguous(PyMemoryViewObject *self, PyObject *dummy)
{
CHECK_RELEASED(self);
return PyBool_FromLong(MV_C_CONTIGUOUS(self->flags));
}
static PyObject *
memory_f_contiguous(PyMemoryViewObject *self, PyObject *dummy)
{
CHECK_RELEASED(self);
return PyBool_FromLong(MV_F_CONTIGUOUS(self->flags));
}
static PyObject *
memory_contiguous(PyMemoryViewObject *self, PyObject *dummy)
{
CHECK_RELEASED(self);
return PyBool_FromLong(MV_ANY_CONTIGUOUS(self->flags));
}
static PyGetSetDef memory_getsetlist[] = {
{"obj", (getter)memory_obj_get, NULL, NULL},
{"nbytes", (getter)memory_nbytes_get, NULL, NULL},
{"readonly", (getter)memory_readonly_get, NULL, NULL},
{"itemsize", (getter)memory_itemsize_get, NULL, NULL},
{"format", (getter)memory_format_get, NULL, NULL},
{"ndim", (getter)memory_ndim_get, NULL, NULL},
{"shape", (getter)memory_shape_get, NULL, NULL},
{"strides", (getter)memory_strides_get, NULL, NULL},
{"suboffsets", (getter)memory_suboffsets_get, NULL, NULL},
{"c_contiguous", (getter)memory_c_contiguous, NULL, NULL},
{"f_contiguous", (getter)memory_f_contiguous, NULL, NULL},
{"contiguous", (getter)memory_contiguous, NULL, NULL},
{NULL, NULL, NULL, NULL},
};
static PyMethodDef memory_methods[] = {
{"release", (PyCFunction)memory_release, METH_NOARGS},
{"tobytes", (PyCFunction)memory_tobytes, METH_NOARGS, NULL},
{"tolist", (PyCFunction)memory_tolist, METH_NOARGS, NULL},
{"cast", (PyCFunction)memory_cast, METH_VARARGS|METH_KEYWORDS, NULL},
{"__enter__", memory_enter, METH_NOARGS},
{"__exit__", memory_exit, METH_VARARGS},
{NULL, NULL}
};
PyTypeObject PyMemoryView_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"memoryview", /* tp_name */
offsetof(PyMemoryViewObject, ob_array), /* tp_basicsize */
sizeof(Py_ssize_t), /* tp_itemsize */
(destructor)memory_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_reserved */
(reprfunc)memory_repr, /* tp_repr */
0, /* tp_as_number */
&memory_as_sequence, /* tp_as_sequence */
&memory_as_mapping, /* tp_as_mapping */
(hashfunc)memory_hash, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
&memory_as_buffer, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
memory_doc, /* tp_doc */
(traverseproc)memory_traverse, /* tp_traverse */
(inquiry)memory_clear, /* tp_clear */
memory_richcompare, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
memory_methods, /* tp_methods */
0, /* tp_members */
memory_getsetlist, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
memory_new, /* tp_new */
};