cpython/Objects/tupleobject.c

903 lines
20 KiB
C
Raw Normal View History

1991-02-19 20:39:46 +08:00
1990-10-14 20:07:46 +08:00
/* Tuple object implementation */
1997-05-02 11:12:38 +08:00
#include "Python.h"
1990-10-14 20:07:46 +08:00
Patch by Charles G Waldman to avoid a sneaky memory leak in _PyTuple_Resize(). In addition, a change suggested by Jeremy Hylton to limit the size of the free lists is also merged into this patch. Charles wrote initially: """ Test Case: run the following code: class Nothing: def __len__(self): return 5 def __getitem__(self, i): if i < 3: return i else: raise IndexError, i def g(a,*b,**c): return for x in xrange(1000000): g(*Nothing()) and watch Python's memory use go up and up. Diagnosis: The analysis begins with the call to PySequence_Tuple at line 1641 in ceval.c - the argument to g is seen to be a sequence but not a tuple, so it needs to be converted from an abstract sequence to a concrete tuple. PySequence_Tuple starts off by creating a new tuple of length 5 (line 1122 in abstract.c). Then at line 1149, since only 3 elements were assigned, _PyTuple_Resize is called to make the 5-tuple into a 3-tuple. When we're all done the 3-tuple is decrefed, but rather than being freed it is placed on the free_tuples cache. The basic problem is that the 3-tuples are being added to the cache but never picked up again, since _PyTuple_Resize doesn't make use of the free_tuples cache. If you are resizing a 5-tuple to a 3-tuple and there is already a 3-tuple in free_tuples[3], instead of using this tuple, _PyTuple_Resize will realloc the 5-tuple to a 3-tuple. It would more efficient to use the existing 3-tuple and cache the 5-tuple. By making _PyTuple_Resize aware of the free_tuples (just as PyTuple_New), we not only save a few calls to realloc, but also prevent this misbehavior whereby tuples are being added to the free_tuples list but never properly "recycled". """ And later: """ This patch replaces my submission of Sun, 16 Apr and addresses Jeremy Hylton's suggestions that we also limit the size of the free tuple list. I chose 2000 as the maximum number of tuples of any particular size to save. There was also a problem with the previous version of this patch causing a core dump if Python was built with Py_TRACE_REFS. This is fixed in the below version of the patch, which uses tupledealloc instead of _Py_Dealloc. """
2000-04-22 05:15:05 +08:00
/* Speed optimization to avoid frequent malloc/free of small tuples */
#ifndef MAXSAVESIZE
Patch by Charles G Waldman to avoid a sneaky memory leak in _PyTuple_Resize(). In addition, a change suggested by Jeremy Hylton to limit the size of the free lists is also merged into this patch. Charles wrote initially: """ Test Case: run the following code: class Nothing: def __len__(self): return 5 def __getitem__(self, i): if i < 3: return i else: raise IndexError, i def g(a,*b,**c): return for x in xrange(1000000): g(*Nothing()) and watch Python's memory use go up and up. Diagnosis: The analysis begins with the call to PySequence_Tuple at line 1641 in ceval.c - the argument to g is seen to be a sequence but not a tuple, so it needs to be converted from an abstract sequence to a concrete tuple. PySequence_Tuple starts off by creating a new tuple of length 5 (line 1122 in abstract.c). Then at line 1149, since only 3 elements were assigned, _PyTuple_Resize is called to make the 5-tuple into a 3-tuple. When we're all done the 3-tuple is decrefed, but rather than being freed it is placed on the free_tuples cache. The basic problem is that the 3-tuples are being added to the cache but never picked up again, since _PyTuple_Resize doesn't make use of the free_tuples cache. If you are resizing a 5-tuple to a 3-tuple and there is already a 3-tuple in free_tuples[3], instead of using this tuple, _PyTuple_Resize will realloc the 5-tuple to a 3-tuple. It would more efficient to use the existing 3-tuple and cache the 5-tuple. By making _PyTuple_Resize aware of the free_tuples (just as PyTuple_New), we not only save a few calls to realloc, but also prevent this misbehavior whereby tuples are being added to the free_tuples list but never properly "recycled". """ And later: """ This patch replaces my submission of Sun, 16 Apr and addresses Jeremy Hylton's suggestions that we also limit the size of the free tuple list. I chose 2000 as the maximum number of tuples of any particular size to save. There was also a problem with the previous version of this patch causing a core dump if Python was built with Py_TRACE_REFS. This is fixed in the below version of the patch, which uses tupledealloc instead of _Py_Dealloc. """
2000-04-22 05:15:05 +08:00
#define MAXSAVESIZE 20 /* Largest tuple to save on free list */
#endif
#ifndef MAXSAVEDTUPLES
#define MAXSAVEDTUPLES 2000 /* Maximum number of tuples of each size to save */
#endif
#if MAXSAVESIZE > 0
Patch by Charles G Waldman to avoid a sneaky memory leak in _PyTuple_Resize(). In addition, a change suggested by Jeremy Hylton to limit the size of the free lists is also merged into this patch. Charles wrote initially: """ Test Case: run the following code: class Nothing: def __len__(self): return 5 def __getitem__(self, i): if i < 3: return i else: raise IndexError, i def g(a,*b,**c): return for x in xrange(1000000): g(*Nothing()) and watch Python's memory use go up and up. Diagnosis: The analysis begins with the call to PySequence_Tuple at line 1641 in ceval.c - the argument to g is seen to be a sequence but not a tuple, so it needs to be converted from an abstract sequence to a concrete tuple. PySequence_Tuple starts off by creating a new tuple of length 5 (line 1122 in abstract.c). Then at line 1149, since only 3 elements were assigned, _PyTuple_Resize is called to make the 5-tuple into a 3-tuple. When we're all done the 3-tuple is decrefed, but rather than being freed it is placed on the free_tuples cache. The basic problem is that the 3-tuples are being added to the cache but never picked up again, since _PyTuple_Resize doesn't make use of the free_tuples cache. If you are resizing a 5-tuple to a 3-tuple and there is already a 3-tuple in free_tuples[3], instead of using this tuple, _PyTuple_Resize will realloc the 5-tuple to a 3-tuple. It would more efficient to use the existing 3-tuple and cache the 5-tuple. By making _PyTuple_Resize aware of the free_tuples (just as PyTuple_New), we not only save a few calls to realloc, but also prevent this misbehavior whereby tuples are being added to the free_tuples list but never properly "recycled". """ And later: """ This patch replaces my submission of Sun, 16 Apr and addresses Jeremy Hylton's suggestions that we also limit the size of the free tuple list. I chose 2000 as the maximum number of tuples of any particular size to save. There was also a problem with the previous version of this patch causing a core dump if Python was built with Py_TRACE_REFS. This is fixed in the below version of the patch, which uses tupledealloc instead of _Py_Dealloc. """
2000-04-22 05:15:05 +08:00
/* Entries 1 up to MAXSAVESIZE are free lists, entry 0 is the empty
tuple () of which at most one instance will be allocated.
*/
1997-05-02 11:12:38 +08:00
static PyTupleObject *free_tuples[MAXSAVESIZE];
Patch by Charles G Waldman to avoid a sneaky memory leak in _PyTuple_Resize(). In addition, a change suggested by Jeremy Hylton to limit the size of the free lists is also merged into this patch. Charles wrote initially: """ Test Case: run the following code: class Nothing: def __len__(self): return 5 def __getitem__(self, i): if i < 3: return i else: raise IndexError, i def g(a,*b,**c): return for x in xrange(1000000): g(*Nothing()) and watch Python's memory use go up and up. Diagnosis: The analysis begins with the call to PySequence_Tuple at line 1641 in ceval.c - the argument to g is seen to be a sequence but not a tuple, so it needs to be converted from an abstract sequence to a concrete tuple. PySequence_Tuple starts off by creating a new tuple of length 5 (line 1122 in abstract.c). Then at line 1149, since only 3 elements were assigned, _PyTuple_Resize is called to make the 5-tuple into a 3-tuple. When we're all done the 3-tuple is decrefed, but rather than being freed it is placed on the free_tuples cache. The basic problem is that the 3-tuples are being added to the cache but never picked up again, since _PyTuple_Resize doesn't make use of the free_tuples cache. If you are resizing a 5-tuple to a 3-tuple and there is already a 3-tuple in free_tuples[3], instead of using this tuple, _PyTuple_Resize will realloc the 5-tuple to a 3-tuple. It would more efficient to use the existing 3-tuple and cache the 5-tuple. By making _PyTuple_Resize aware of the free_tuples (just as PyTuple_New), we not only save a few calls to realloc, but also prevent this misbehavior whereby tuples are being added to the free_tuples list but never properly "recycled". """ And later: """ This patch replaces my submission of Sun, 16 Apr and addresses Jeremy Hylton's suggestions that we also limit the size of the free tuple list. I chose 2000 as the maximum number of tuples of any particular size to save. There was also a problem with the previous version of this patch causing a core dump if Python was built with Py_TRACE_REFS. This is fixed in the below version of the patch, which uses tupledealloc instead of _Py_Dealloc. """
2000-04-22 05:15:05 +08:00
static int num_free_tuples[MAXSAVESIZE];
#endif
#ifdef COUNT_ALLOCS
int fast_tuple_allocs;
int tuple_zero_allocs;
#endif
1997-05-02 11:12:38 +08:00
PyObject *
2006-02-16 01:27:45 +08:00
PyTuple_New(register Py_ssize_t size)
1990-10-14 20:07:46 +08:00
{
1997-05-02 11:12:38 +08:00
register PyTupleObject *op;
2006-02-16 01:27:45 +08:00
Py_ssize_t i;
1990-10-14 20:07:46 +08:00
if (size < 0) {
1997-05-02 11:12:38 +08:00
PyErr_BadInternalCall();
1990-10-14 20:07:46 +08:00
return NULL;
}
#if MAXSAVESIZE > 0
if (size == 0 && free_tuples[0]) {
op = free_tuples[0];
1997-05-02 11:12:38 +08:00
Py_INCREF(op);
#ifdef COUNT_ALLOCS
tuple_zero_allocs++;
#endif
1997-05-02 11:12:38 +08:00
return (PyObject *) op;
}
if (size < MAXSAVESIZE && (op = free_tuples[size]) != NULL) {
1997-05-02 11:12:38 +08:00
free_tuples[size] = (PyTupleObject *) op->ob_item[0];
Patch by Charles G Waldman to avoid a sneaky memory leak in _PyTuple_Resize(). In addition, a change suggested by Jeremy Hylton to limit the size of the free lists is also merged into this patch. Charles wrote initially: """ Test Case: run the following code: class Nothing: def __len__(self): return 5 def __getitem__(self, i): if i < 3: return i else: raise IndexError, i def g(a,*b,**c): return for x in xrange(1000000): g(*Nothing()) and watch Python's memory use go up and up. Diagnosis: The analysis begins with the call to PySequence_Tuple at line 1641 in ceval.c - the argument to g is seen to be a sequence but not a tuple, so it needs to be converted from an abstract sequence to a concrete tuple. PySequence_Tuple starts off by creating a new tuple of length 5 (line 1122 in abstract.c). Then at line 1149, since only 3 elements were assigned, _PyTuple_Resize is called to make the 5-tuple into a 3-tuple. When we're all done the 3-tuple is decrefed, but rather than being freed it is placed on the free_tuples cache. The basic problem is that the 3-tuples are being added to the cache but never picked up again, since _PyTuple_Resize doesn't make use of the free_tuples cache. If you are resizing a 5-tuple to a 3-tuple and there is already a 3-tuple in free_tuples[3], instead of using this tuple, _PyTuple_Resize will realloc the 5-tuple to a 3-tuple. It would more efficient to use the existing 3-tuple and cache the 5-tuple. By making _PyTuple_Resize aware of the free_tuples (just as PyTuple_New), we not only save a few calls to realloc, but also prevent this misbehavior whereby tuples are being added to the free_tuples list but never properly "recycled". """ And later: """ This patch replaces my submission of Sun, 16 Apr and addresses Jeremy Hylton's suggestions that we also limit the size of the free tuple list. I chose 2000 as the maximum number of tuples of any particular size to save. There was also a problem with the previous version of this patch causing a core dump if Python was built with Py_TRACE_REFS. This is fixed in the below version of the patch, which uses tupledealloc instead of _Py_Dealloc. """
2000-04-22 05:15:05 +08:00
num_free_tuples[size]--;
#ifdef COUNT_ALLOCS
fast_tuple_allocs++;
#endif
/* Inline PyObject_InitVar */
#ifdef Py_TRACE_REFS
op->ob_size = size;
op->ob_type = &PyTuple_Type;
#endif
_Py_NewReference((PyObject *)op);
}
else
#endif
{
2006-02-16 01:27:45 +08:00
Py_ssize_t nbytes = size * sizeof(PyObject *);
/* Check for overflow */
if (nbytes / sizeof(PyObject *) != (size_t)size ||
2001-08-30 07:54:21 +08:00
(nbytes += sizeof(PyTupleObject) - sizeof(PyObject *))
<= 0)
{
return PyErr_NoMemory();
}
2001-08-30 07:54:21 +08:00
op = PyObject_GC_NewVar(PyTupleObject, &PyTuple_Type, size);
if (op == NULL)
2001-08-30 07:54:21 +08:00
return NULL;
}
2004-03-22 06:29:05 +08:00
for (i=0; i < size; i++)
op->ob_item[i] = NULL;
#if MAXSAVESIZE > 0
if (size == 0) {
free_tuples[0] = op;
Patch by Charles G Waldman to avoid a sneaky memory leak in _PyTuple_Resize(). In addition, a change suggested by Jeremy Hylton to limit the size of the free lists is also merged into this patch. Charles wrote initially: """ Test Case: run the following code: class Nothing: def __len__(self): return 5 def __getitem__(self, i): if i < 3: return i else: raise IndexError, i def g(a,*b,**c): return for x in xrange(1000000): g(*Nothing()) and watch Python's memory use go up and up. Diagnosis: The analysis begins with the call to PySequence_Tuple at line 1641 in ceval.c - the argument to g is seen to be a sequence but not a tuple, so it needs to be converted from an abstract sequence to a concrete tuple. PySequence_Tuple starts off by creating a new tuple of length 5 (line 1122 in abstract.c). Then at line 1149, since only 3 elements were assigned, _PyTuple_Resize is called to make the 5-tuple into a 3-tuple. When we're all done the 3-tuple is decrefed, but rather than being freed it is placed on the free_tuples cache. The basic problem is that the 3-tuples are being added to the cache but never picked up again, since _PyTuple_Resize doesn't make use of the free_tuples cache. If you are resizing a 5-tuple to a 3-tuple and there is already a 3-tuple in free_tuples[3], instead of using this tuple, _PyTuple_Resize will realloc the 5-tuple to a 3-tuple. It would more efficient to use the existing 3-tuple and cache the 5-tuple. By making _PyTuple_Resize aware of the free_tuples (just as PyTuple_New), we not only save a few calls to realloc, but also prevent this misbehavior whereby tuples are being added to the free_tuples list but never properly "recycled". """ And later: """ This patch replaces my submission of Sun, 16 Apr and addresses Jeremy Hylton's suggestions that we also limit the size of the free tuple list. I chose 2000 as the maximum number of tuples of any particular size to save. There was also a problem with the previous version of this patch causing a core dump if Python was built with Py_TRACE_REFS. This is fixed in the below version of the patch, which uses tupledealloc instead of _Py_Dealloc. """
2000-04-22 05:15:05 +08:00
++num_free_tuples[0];
1997-05-02 11:12:38 +08:00
Py_INCREF(op); /* extra INCREF so that this is never freed */
}
#endif
2001-08-30 07:54:21 +08:00
_PyObject_GC_TRACK(op);
1997-05-02 11:12:38 +08:00
return (PyObject *) op;
1990-10-14 20:07:46 +08:00
}
2006-02-16 01:27:45 +08:00
Py_ssize_t
2000-07-09 15:04:36 +08:00
PyTuple_Size(register PyObject *op)
1990-10-14 20:07:46 +08:00
{
1997-05-02 11:12:38 +08:00
if (!PyTuple_Check(op)) {
PyErr_BadInternalCall();
1990-10-14 20:07:46 +08:00
return -1;
}
else
1997-05-02 11:12:38 +08:00
return ((PyTupleObject *)op)->ob_size;
1990-10-14 20:07:46 +08:00
}
1997-05-02 11:12:38 +08:00
PyObject *
2006-02-16 01:27:45 +08:00
PyTuple_GetItem(register PyObject *op, register Py_ssize_t i)
1990-10-14 20:07:46 +08:00
{
1997-05-02 11:12:38 +08:00
if (!PyTuple_Check(op)) {
PyErr_BadInternalCall();
1990-10-14 20:07:46 +08:00
return NULL;
}
1997-05-02 11:12:38 +08:00
if (i < 0 || i >= ((PyTupleObject *)op) -> ob_size) {
PyErr_SetString(PyExc_IndexError, "tuple index out of range");
1990-10-14 20:07:46 +08:00
return NULL;
}
1997-05-02 11:12:38 +08:00
return ((PyTupleObject *)op) -> ob_item[i];
1990-10-14 20:07:46 +08:00
}
int
2006-02-16 01:27:45 +08:00
PyTuple_SetItem(register PyObject *op, register Py_ssize_t i, PyObject *newitem)
1990-10-14 20:07:46 +08:00
{
1997-05-02 11:12:38 +08:00
register PyObject *olditem;
register PyObject **p;
if (!PyTuple_Check(op) || op->ob_refcnt != 1) {
1997-05-02 11:12:38 +08:00
Py_XDECREF(newitem);
PyErr_BadInternalCall();
1990-10-22 06:15:08 +08:00
return -1;
1990-10-14 20:07:46 +08:00
}
1997-05-02 11:12:38 +08:00
if (i < 0 || i >= ((PyTupleObject *)op) -> ob_size) {
Py_XDECREF(newitem);
PyErr_SetString(PyExc_IndexError,
"tuple assignment index out of range");
1990-10-22 06:15:08 +08:00
return -1;
1990-10-14 20:07:46 +08:00
}
1997-05-02 11:12:38 +08:00
p = ((PyTupleObject *)op) -> ob_item + i;
1995-03-09 20:12:50 +08:00
olditem = *p;
*p = newitem;
1997-05-02 11:12:38 +08:00
Py_XDECREF(olditem);
1990-10-14 20:07:46 +08:00
return 0;
}
PyObject *
2006-02-16 01:27:45 +08:00
PyTuple_Pack(Py_ssize_t n, ...)
{
2006-02-16 01:27:45 +08:00
Py_ssize_t i;
PyObject *o;
PyObject *result;
PyObject **items;
va_list vargs;
va_start(vargs, n);
result = PyTuple_New(n);
if (result == NULL)
return NULL;
items = ((PyTupleObject *)result)->ob_item;
for (i = 0; i < n; i++) {
o = va_arg(vargs, PyObject *);
Py_INCREF(o);
items[i] = o;
}
va_end(vargs);
return result;
}
1990-10-14 20:07:46 +08:00
/* Methods */
static void
2000-07-09 15:04:36 +08:00
tupledealloc(register PyTupleObject *op)
1990-10-14 20:07:46 +08:00
{
2006-02-16 01:27:45 +08:00
register Py_ssize_t i;
register Py_ssize_t len = op->ob_size;
PyObject_GC_UnTrack(op);
Py_TRASHCAN_SAFE_BEGIN(op)
Patch by Charles G Waldman to avoid a sneaky memory leak in _PyTuple_Resize(). In addition, a change suggested by Jeremy Hylton to limit the size of the free lists is also merged into this patch. Charles wrote initially: """ Test Case: run the following code: class Nothing: def __len__(self): return 5 def __getitem__(self, i): if i < 3: return i else: raise IndexError, i def g(a,*b,**c): return for x in xrange(1000000): g(*Nothing()) and watch Python's memory use go up and up. Diagnosis: The analysis begins with the call to PySequence_Tuple at line 1641 in ceval.c - the argument to g is seen to be a sequence but not a tuple, so it needs to be converted from an abstract sequence to a concrete tuple. PySequence_Tuple starts off by creating a new tuple of length 5 (line 1122 in abstract.c). Then at line 1149, since only 3 elements were assigned, _PyTuple_Resize is called to make the 5-tuple into a 3-tuple. When we're all done the 3-tuple is decrefed, but rather than being freed it is placed on the free_tuples cache. The basic problem is that the 3-tuples are being added to the cache but never picked up again, since _PyTuple_Resize doesn't make use of the free_tuples cache. If you are resizing a 5-tuple to a 3-tuple and there is already a 3-tuple in free_tuples[3], instead of using this tuple, _PyTuple_Resize will realloc the 5-tuple to a 3-tuple. It would more efficient to use the existing 3-tuple and cache the 5-tuple. By making _PyTuple_Resize aware of the free_tuples (just as PyTuple_New), we not only save a few calls to realloc, but also prevent this misbehavior whereby tuples are being added to the free_tuples list but never properly "recycled". """ And later: """ This patch replaces my submission of Sun, 16 Apr and addresses Jeremy Hylton's suggestions that we also limit the size of the free tuple list. I chose 2000 as the maximum number of tuples of any particular size to save. There was also a problem with the previous version of this patch causing a core dump if Python was built with Py_TRACE_REFS. This is fixed in the below version of the patch, which uses tupledealloc instead of _Py_Dealloc. """
2000-04-22 05:15:05 +08:00
if (len > 0) {
i = len;
2004-03-22 06:29:05 +08:00
while (--i >= 0)
Py_XDECREF(op->ob_item[i]);
#if MAXSAVESIZE > 0
if (len < MAXSAVESIZE &&
num_free_tuples[len] < MAXSAVEDTUPLES &&
op->ob_type == &PyTuple_Type)
{
Patch by Charles G Waldman to avoid a sneaky memory leak in _PyTuple_Resize(). In addition, a change suggested by Jeremy Hylton to limit the size of the free lists is also merged into this patch. Charles wrote initially: """ Test Case: run the following code: class Nothing: def __len__(self): return 5 def __getitem__(self, i): if i < 3: return i else: raise IndexError, i def g(a,*b,**c): return for x in xrange(1000000): g(*Nothing()) and watch Python's memory use go up and up. Diagnosis: The analysis begins with the call to PySequence_Tuple at line 1641 in ceval.c - the argument to g is seen to be a sequence but not a tuple, so it needs to be converted from an abstract sequence to a concrete tuple. PySequence_Tuple starts off by creating a new tuple of length 5 (line 1122 in abstract.c). Then at line 1149, since only 3 elements were assigned, _PyTuple_Resize is called to make the 5-tuple into a 3-tuple. When we're all done the 3-tuple is decrefed, but rather than being freed it is placed on the free_tuples cache. The basic problem is that the 3-tuples are being added to the cache but never picked up again, since _PyTuple_Resize doesn't make use of the free_tuples cache. If you are resizing a 5-tuple to a 3-tuple and there is already a 3-tuple in free_tuples[3], instead of using this tuple, _PyTuple_Resize will realloc the 5-tuple to a 3-tuple. It would more efficient to use the existing 3-tuple and cache the 5-tuple. By making _PyTuple_Resize aware of the free_tuples (just as PyTuple_New), we not only save a few calls to realloc, but also prevent this misbehavior whereby tuples are being added to the free_tuples list but never properly "recycled". """ And later: """ This patch replaces my submission of Sun, 16 Apr and addresses Jeremy Hylton's suggestions that we also limit the size of the free tuple list. I chose 2000 as the maximum number of tuples of any particular size to save. There was also a problem with the previous version of this patch causing a core dump if Python was built with Py_TRACE_REFS. This is fixed in the below version of the patch, which uses tupledealloc instead of _Py_Dealloc. """
2000-04-22 05:15:05 +08:00
op->ob_item[0] = (PyObject *) free_tuples[len];
num_free_tuples[len]++;
free_tuples[len] = op;
goto done; /* return */
}
#endif
}
op->ob_type->tp_free((PyObject *)op);
done:
Py_TRASHCAN_SAFE_END(op)
1990-10-14 20:07:46 +08:00
}
static int
2000-07-09 15:04:36 +08:00
tupleprint(PyTupleObject *op, FILE *fp, int flags)
1990-10-14 20:07:46 +08:00
{
2006-02-16 01:27:45 +08:00
Py_ssize_t i;
1990-10-14 20:07:46 +08:00
fprintf(fp, "(");
for (i = 0; i < op->ob_size; i++) {
if (i > 0)
1990-10-14 20:07:46 +08:00
fprintf(fp, ", ");
1997-05-02 11:12:38 +08:00
if (PyObject_Print(op->ob_item[i], fp, 0) != 0)
return -1;
1990-10-14 20:07:46 +08:00
}
if (op->ob_size == 1)
fprintf(fp, ",");
fprintf(fp, ")");
return 0;
1990-10-14 20:07:46 +08:00
}
1997-05-02 11:12:38 +08:00
static PyObject *
2000-07-09 15:04:36 +08:00
tuplerepr(PyTupleObject *v)
1990-10-14 20:07:46 +08:00
{
2006-02-16 01:27:45 +08:00
Py_ssize_t i, n;
PyObject *s, *temp;
PyObject *pieces, *result = NULL;
n = v->ob_size;
if (n == 0)
return PyString_FromString("()");
pieces = PyTuple_New(n);
if (pieces == NULL)
return NULL;
/* Do repr() on each element. */
for (i = 0; i < n; ++i) {
s = PyObject_Repr(v->ob_item[i]);
if (s == NULL)
goto Done;
PyTuple_SET_ITEM(pieces, i, s);
1990-10-14 20:07:46 +08:00
}
/* Add "()" decorations to the first and last items. */
assert(n > 0);
s = PyString_FromString("(");
if (s == NULL)
goto Done;
temp = PyTuple_GET_ITEM(pieces, 0);
PyString_ConcatAndDel(&s, temp);
PyTuple_SET_ITEM(pieces, 0, s);
if (s == NULL)
goto Done;
s = PyString_FromString(n == 1 ? ",)" : ")");
if (s == NULL)
goto Done;
temp = PyTuple_GET_ITEM(pieces, n-1);
PyString_ConcatAndDel(&temp, s);
PyTuple_SET_ITEM(pieces, n-1, temp);
if (temp == NULL)
goto Done;
/* Paste them all together with ", " between. */
s = PyString_FromString(", ");
if (s == NULL)
goto Done;
result = _PyString_Join(s, pieces);
Py_DECREF(s);
Done:
Py_DECREF(pieces);
return result;
1990-10-14 20:07:46 +08:00
}
/* The addend 82520, was selected from the range(0, 1000000) for
generating the greatest number of prime multipliers for tuples
upto length eight:
1082527, 1165049, 1082531, 1165057, 1247581, 1330103, 1082533,
1330111, 1412633, 1165069, 1247599, 1495177, 1577699
*/
static long
2000-07-09 15:04:36 +08:00
tuplehash(PyTupleObject *v)
{
register long x, y;
2006-02-16 01:27:45 +08:00
register Py_ssize_t len = v->ob_size;
1997-05-02 11:12:38 +08:00
register PyObject **p;
long mult = 1000003L;
x = 0x345678L;
p = v->ob_item;
while (--len >= 0) {
1997-05-02 11:12:38 +08:00
y = PyObject_Hash(*p++);
if (y == -1)
return -1;
x = (x ^ y) * mult;
/* the cast might truncate len; that doesn't change hash stability */
mult += (long)(82520L + len + len);
}
x += 97531L;
if (x == -1)
x = -2;
return x;
}
2006-02-16 01:27:45 +08:00
static Py_ssize_t
2000-07-09 15:04:36 +08:00
tuplelength(PyTupleObject *a)
1990-10-14 20:07:46 +08:00
{
return a->ob_size;
}
static int
2000-07-09 15:04:36 +08:00
tuplecontains(PyTupleObject *a, PyObject *el)
{
2006-02-16 01:27:45 +08:00
Py_ssize_t i;
int cmp;
for (i = 0, cmp = 0 ; cmp == 0 && i < a->ob_size; ++i)
cmp = PyObject_RichCompareBool(el, PyTuple_GET_ITEM(a, i),
Py_EQ);
return cmp;
}
1997-05-02 11:12:38 +08:00
static PyObject *
2006-02-16 01:27:45 +08:00
tupleitem(register PyTupleObject *a, register Py_ssize_t i)
1990-10-14 20:07:46 +08:00
{
if (i < 0 || i >= a->ob_size) {
1997-05-02 11:12:38 +08:00
PyErr_SetString(PyExc_IndexError, "tuple index out of range");
1990-10-14 20:07:46 +08:00
return NULL;
}
1997-05-02 11:12:38 +08:00
Py_INCREF(a->ob_item[i]);
1990-10-14 20:07:46 +08:00
return a->ob_item[i];
}
1997-05-02 11:12:38 +08:00
static PyObject *
2006-02-16 01:27:45 +08:00
tupleslice(register PyTupleObject *a, register Py_ssize_t ilow,
register Py_ssize_t ihigh)
1990-10-14 20:07:46 +08:00
{
1997-05-02 11:12:38 +08:00
register PyTupleObject *np;
PyObject **src, **dest;
2006-02-16 01:27:45 +08:00
register Py_ssize_t i;
Py_ssize_t len;
1990-10-14 20:07:46 +08:00
if (ilow < 0)
ilow = 0;
if (ihigh > a->ob_size)
ihigh = a->ob_size;
if (ihigh < ilow)
ihigh = ilow;
if (ilow == 0 && ihigh == a->ob_size && PyTuple_CheckExact(a)) {
1997-05-02 11:12:38 +08:00
Py_INCREF(a);
return (PyObject *)a;
1990-10-14 20:07:46 +08:00
}
len = ihigh - ilow;
np = (PyTupleObject *)PyTuple_New(len);
1990-10-14 20:07:46 +08:00
if (np == NULL)
return NULL;
src = a->ob_item + ilow;
dest = np->ob_item;
for (i = 0; i < len; i++) {
PyObject *v = src[i];
1997-05-02 11:12:38 +08:00
Py_INCREF(v);
dest[i] = v;
1990-10-14 20:07:46 +08:00
}
1997-05-02 11:12:38 +08:00
return (PyObject *)np;
1990-10-14 20:07:46 +08:00
}
1997-05-02 11:12:38 +08:00
PyObject *
2006-02-16 01:27:45 +08:00
PyTuple_GetSlice(PyObject *op, Py_ssize_t i, Py_ssize_t j)
1992-01-15 02:45:33 +08:00
{
1997-05-02 11:12:38 +08:00
if (op == NULL || !PyTuple_Check(op)) {
PyErr_BadInternalCall();
1992-01-15 02:45:33 +08:00
return NULL;
}
1997-05-02 11:12:38 +08:00
return tupleslice((PyTupleObject *)op, i, j);
1992-01-15 02:45:33 +08:00
}
1997-05-02 11:12:38 +08:00
static PyObject *
2000-07-09 15:04:36 +08:00
tupleconcat(register PyTupleObject *a, register PyObject *bb)
1990-10-14 20:07:46 +08:00
{
2006-02-16 01:27:45 +08:00
register Py_ssize_t size;
register Py_ssize_t i;
PyObject **src, **dest;
1997-05-02 11:12:38 +08:00
PyTupleObject *np;
if (!PyTuple_Check(bb)) {
PyErr_Format(PyExc_TypeError,
"can only concatenate tuple (not \"%.200s\") to tuple",
bb->ob_type->tp_name);
1990-10-14 20:07:46 +08:00
return NULL;
}
1997-05-02 11:12:38 +08:00
#define b ((PyTupleObject *)bb)
1990-10-14 20:07:46 +08:00
size = a->ob_size + b->ob_size;
if (size < 0)
return PyErr_NoMemory();
1997-05-02 11:12:38 +08:00
np = (PyTupleObject *) PyTuple_New(size);
1990-10-14 20:07:46 +08:00
if (np == NULL) {
return NULL;
1990-10-14 20:07:46 +08:00
}
src = a->ob_item;
dest = np->ob_item;
1990-10-14 20:07:46 +08:00
for (i = 0; i < a->ob_size; i++) {
PyObject *v = src[i];
1997-05-02 11:12:38 +08:00
Py_INCREF(v);
dest[i] = v;
1990-10-14 20:07:46 +08:00
}
src = b->ob_item;
dest = np->ob_item + a->ob_size;
1990-10-14 20:07:46 +08:00
for (i = 0; i < b->ob_size; i++) {
PyObject *v = src[i];
1997-05-02 11:12:38 +08:00
Py_INCREF(v);
dest[i] = v;
1990-10-14 20:07:46 +08:00
}
1997-05-02 11:12:38 +08:00
return (PyObject *)np;
1990-10-14 20:07:46 +08:00
#undef b
}
1997-05-02 11:12:38 +08:00
static PyObject *
2006-02-16 01:27:45 +08:00
tuplerepeat(PyTupleObject *a, Py_ssize_t n)
{
2006-02-16 01:27:45 +08:00
Py_ssize_t i, j;
Py_ssize_t size;
1997-05-02 11:12:38 +08:00
PyTupleObject *np;
PyObject **p, **items;
if (n < 0)
n = 0;
if (a->ob_size == 0 || n == 1) {
if (PyTuple_CheckExact(a)) {
/* Since tuples are immutable, we can return a shared
copy in this case */
Py_INCREF(a);
return (PyObject *)a;
}
if (a->ob_size == 0)
return PyTuple_New(0);
}
size = a->ob_size * n;
if (size/a->ob_size != n)
return PyErr_NoMemory();
1997-05-02 11:12:38 +08:00
np = (PyTupleObject *) PyTuple_New(size);
if (np == NULL)
return NULL;
p = np->ob_item;
items = a->ob_item;
for (i = 0; i < n; i++) {
for (j = 0; j < a->ob_size; j++) {
*p = items[j];
1997-05-02 11:12:38 +08:00
Py_INCREF(*p);
p++;
}
}
1997-05-02 11:12:38 +08:00
return (PyObject *) np;
}
static int
tupletraverse(PyTupleObject *o, visitproc visit, void *arg)
{
2006-02-16 01:27:45 +08:00
Py_ssize_t i;
PyObject *x;
for (i = o->ob_size; --i >= 0; ) {
x = o->ob_item[i];
if (x != NULL) {
2006-02-16 01:27:45 +08:00
int err = visit(x, arg);
if (err)
return err;
}
}
return 0;
}
static PyObject *
tuplerichcompare(PyObject *v, PyObject *w, int op)
{
PyTupleObject *vt, *wt;
2006-02-16 01:27:45 +08:00
Py_ssize_t i;
Py_ssize_t vlen, wlen;
if (!PyTuple_Check(v) || !PyTuple_Check(w)) {
Py_INCREF(Py_NotImplemented);
return Py_NotImplemented;
}
vt = (PyTupleObject *)v;
wt = (PyTupleObject *)w;
vlen = vt->ob_size;
wlen = wt->ob_size;
/* Note: the corresponding code for lists has an "early out" test
* here when op is EQ or NE and the lengths differ. That pays there,
* but Tim was unable to find any real code where EQ/NE tuple
* compares don't have the same length, so testing for it here would
* have cost without benefit.
*/
/* Search for the first index where items are different.
* Note that because tuples are immutable, it's safe to reuse
* vlen and wlen across the comparison calls.
*/
for (i = 0; i < vlen && i < wlen; i++) {
int k = PyObject_RichCompareBool(vt->ob_item[i],
wt->ob_item[i], Py_EQ);
if (k < 0)
return NULL;
if (!k)
break;
}
if (i >= vlen || i >= wlen) {
/* No more items to compare -- compare sizes */
int cmp;
PyObject *res;
switch (op) {
case Py_LT: cmp = vlen < wlen; break;
case Py_LE: cmp = vlen <= wlen; break;
case Py_EQ: cmp = vlen == wlen; break;
case Py_NE: cmp = vlen != wlen; break;
case Py_GT: cmp = vlen > wlen; break;
case Py_GE: cmp = vlen >= wlen; break;
default: return NULL; /* cannot happen */
}
if (cmp)
res = Py_True;
else
res = Py_False;
Py_INCREF(res);
return res;
}
/* We have an item that differs -- shortcuts for EQ/NE */
if (op == Py_EQ) {
Py_INCREF(Py_False);
return Py_False;
}
if (op == Py_NE) {
Py_INCREF(Py_True);
return Py_True;
}
/* Compare the final item again using the proper operator */
return PyObject_RichCompare(vt->ob_item[i], wt->ob_item[i], op);
}
static PyObject *
2001-08-30 11:11:59 +08:00
tuple_subtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds);
2001-08-02 12:15:00 +08:00
static PyObject *
tuple_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
PyObject *arg = NULL;
static char *kwlist[] = {"sequence", 0};
2001-08-02 12:15:00 +08:00
2001-08-30 11:11:59 +08:00
if (type != &PyTuple_Type)
return tuple_subtype_new(type, args, kwds);
2001-08-02 12:15:00 +08:00
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O:tuple", kwlist, &arg))
return NULL;
if (arg == NULL)
return PyTuple_New(0);
else
return PySequence_Tuple(arg);
}
2001-08-30 11:11:59 +08:00
static PyObject *
tuple_subtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
PyObject *tmp, *new, *item;
2006-02-16 01:27:45 +08:00
Py_ssize_t i, n;
2001-08-30 11:11:59 +08:00
assert(PyType_IsSubtype(type, &PyTuple_Type));
tmp = tuple_new(&PyTuple_Type, args, kwds);
if (tmp == NULL)
return NULL;
assert(PyTuple_Check(tmp));
new = type->tp_alloc(type, n = PyTuple_GET_SIZE(tmp));
if (new == NULL)
return NULL;
for (i = 0; i < n; i++) {
item = PyTuple_GET_ITEM(tmp, i);
Py_INCREF(item);
PyTuple_SET_ITEM(new, i, item);
}
Py_DECREF(tmp);
return new;
}
2002-06-14 04:33:02 +08:00
PyDoc_STRVAR(tuple_doc,
"tuple() -> an empty tuple\n"
"tuple(sequence) -> tuple initialized from sequence's items\n"
"\n"
2002-06-14 04:33:02 +08:00
"If the argument is a tuple, the return value is the same object.");
2001-08-02 12:15:00 +08:00
1997-05-02 11:12:38 +08:00
static PySequenceMethods tuple_as_sequence = {
2006-02-16 01:27:45 +08:00
(lenfunc)tuplelength, /* sq_length */
(binaryfunc)tupleconcat, /* sq_concat */
2006-02-16 01:27:45 +08:00
(ssizeargfunc)tuplerepeat, /* sq_repeat */
(ssizeargfunc)tupleitem, /* sq_item */
(ssizessizeargfunc)tupleslice, /* sq_slice */
0, /* sq_ass_item */
0, /* sq_ass_slice */
(objobjproc)tuplecontains, /* sq_contains */
1990-10-14 20:07:46 +08:00
};
#define HASINDEX(o) PyType_HasFeature((o)->ob_type, Py_TPFLAGS_HAVE_INDEX)
static PyObject*
tuplesubscript(PyTupleObject* self, PyObject* item)
{
PyNumberMethods *nb = item->ob_type->tp_as_number;
if (nb != NULL && HASINDEX(item) && nb->nb_index != NULL) {
Py_ssize_t i = nb->nb_index(item);
if (i == -1 && PyErr_Occurred())
return NULL;
if (i < 0)
i += PyTuple_GET_SIZE(self);
return tupleitem(self, i);
}
else if (PySlice_Check(item)) {
2006-02-16 01:27:45 +08:00
Py_ssize_t start, stop, step, slicelength, cur, i;
PyObject* result;
PyObject* it;
PyObject **src, **dest;
if (PySlice_GetIndicesEx((PySliceObject*)item,
PyTuple_GET_SIZE(self),
&start, &stop, &step, &slicelength) < 0) {
return NULL;
}
if (slicelength <= 0) {
return PyTuple_New(0);
}
else {
result = PyTuple_New(slicelength);
src = self->ob_item;
dest = ((PyTupleObject *)result)->ob_item;
for (cur = start, i = 0; i < slicelength;
cur += step, i++) {
it = src[cur];
Py_INCREF(it);
dest[i] = it;
}
return result;
}
}
else {
PyErr_SetString(PyExc_TypeError,
"tuple indices must be integers");
return NULL;
}
}
static PyObject *
tuple_getnewargs(PyTupleObject *v)
{
return Py_BuildValue("(N)", tupleslice(v, 0, v->ob_size));
}
static PyMethodDef tuple_methods[] = {
{"__getnewargs__", (PyCFunction)tuple_getnewargs, METH_NOARGS},
{NULL, NULL} /* sentinel */
};
static PyMappingMethods tuple_as_mapping = {
2006-02-16 01:27:45 +08:00
(lenfunc)tuplelength,
(binaryfunc)tuplesubscript,
0
};
static PyObject *tuple_iter(PyObject *seq);
1997-05-02 11:12:38 +08:00
PyTypeObject PyTuple_Type = {
PyObject_HEAD_INIT(&PyType_Type)
1990-10-14 20:07:46 +08:00
0,
"tuple",
2001-08-30 07:54:21 +08:00
sizeof(PyTupleObject) - sizeof(PyObject *),
1997-05-02 11:12:38 +08:00
sizeof(PyObject *),
(destructor)tupledealloc, /* tp_dealloc */
(printfunc)tupleprint, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
(reprfunc)tuplerepr, /* tp_repr */
0, /* tp_as_number */
&tuple_as_sequence, /* tp_as_sequence */
&tuple_as_mapping, /* tp_as_mapping */
(hashfunc)tuplehash, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
2001-08-02 12:15:00 +08:00
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
2001-08-30 11:11:59 +08:00
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
Py_TPFLAGS_BASETYPE, /* tp_flags */
2001-08-02 12:15:00 +08:00
tuple_doc, /* tp_doc */
(traverseproc)tupletraverse, /* tp_traverse */
0, /* tp_clear */
tuplerichcompare, /* tp_richcompare */
2001-08-02 12:15:00 +08:00
0, /* tp_weaklistoffset */
tuple_iter, /* tp_iter */
2001-08-02 12:15:00 +08:00
0, /* tp_iternext */
tuple_methods, /* tp_methods */
2001-08-02 12:15:00 +08:00
0, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
tuple_new, /* tp_new */
PyObject_GC_Del, /* tp_free */
1990-10-14 20:07:46 +08:00
};
/* The following function breaks the notion that tuples are immutable:
it changes the size of a tuple. We get away with this only if there
is only one module referencing the object. You can also think of it
as creating a new tuple object and destroying the old one, only more
efficiently. In any case, don't use this if the tuple may already be
known to some other part of the code. */
int
2006-02-16 01:27:45 +08:00
_PyTuple_Resize(PyObject **pv, Py_ssize_t newsize)
{
1997-05-02 11:12:38 +08:00
register PyTupleObject *v;
register PyTupleObject *sv;
2006-02-16 01:27:45 +08:00
Py_ssize_t i;
Py_ssize_t oldsize;
1997-05-02 11:12:38 +08:00
v = (PyTupleObject *) *pv;
if (v == NULL || v->ob_type != &PyTuple_Type ||
(v->ob_size != 0 && v->ob_refcnt != 1)) {
*pv = 0;
Py_XDECREF(v);
1997-05-02 11:12:38 +08:00
PyErr_BadInternalCall();
return -1;
}
oldsize = v->ob_size;
if (oldsize == newsize)
return 0;
if (oldsize == 0) {
/* Empty tuples are often shared, so we should never
resize them in-place even if we do own the only
(current) reference */
Py_DECREF(v);
*pv = PyTuple_New(newsize);
return *pv == NULL ? -1 : 0;
}
/* XXX UNREF/NEWREF interface should be more symmetrical */
_Py_DEC_REFTOTAL;
2001-08-30 07:54:21 +08:00
_PyObject_GC_UNTRACK(v);
_Py_ForgetReference((PyObject *) v);
/* DECREF items deleted by shrinkage */
for (i = newsize; i < oldsize; i++) {
1997-05-02 11:12:38 +08:00
Py_XDECREF(v->ob_item[i]);
v->ob_item[i] = NULL;
}
2001-08-30 07:54:21 +08:00
sv = PyObject_GC_Resize(PyTupleObject, v, newsize);
if (sv == NULL) {
*pv = NULL;
2001-08-30 07:54:21 +08:00
PyObject_GC_Del(v);
return -1;
}
_Py_NewReference((PyObject *) sv);
/* Zero out items added by growing */
if (newsize > oldsize)
memset(&sv->ob_item[oldsize], 0,
sizeof(*sv->ob_item) * (newsize - oldsize));
*pv = (PyObject *) sv;
2001-08-30 07:54:21 +08:00
_PyObject_GC_TRACK(sv);
return 0;
}
void
2000-07-09 15:04:36 +08:00
PyTuple_Fini(void)
{
#if MAXSAVESIZE > 0
int i;
Py_XDECREF(free_tuples[0]);
free_tuples[0] = NULL;
for (i = 1; i < MAXSAVESIZE; i++) {
PyTupleObject *p, *q;
p = free_tuples[i];
free_tuples[i] = NULL;
while (p) {
q = p;
p = (PyTupleObject *)(p->ob_item[0]);
2001-08-30 07:54:21 +08:00
PyObject_GC_Del(q);
}
}
#endif
}
/*********************** Tuple Iterator **************************/
typedef struct {
PyObject_HEAD
long it_index;
PyTupleObject *it_seq; /* Set to NULL when iterator is exhausted */
} tupleiterobject;
PyTypeObject PyTupleIter_Type;
static PyObject *
tuple_iter(PyObject *seq)
{
tupleiterobject *it;
if (!PyTuple_Check(seq)) {
PyErr_BadInternalCall();
return NULL;
}
it = PyObject_GC_New(tupleiterobject, &PyTupleIter_Type);
if (it == NULL)
return NULL;
it->it_index = 0;
Py_INCREF(seq);
it->it_seq = (PyTupleObject *)seq;
_PyObject_GC_TRACK(it);
return (PyObject *)it;
}
static void
tupleiter_dealloc(tupleiterobject *it)
{
_PyObject_GC_UNTRACK(it);
Py_XDECREF(it->it_seq);
PyObject_GC_Del(it);
}
static int
tupleiter_traverse(tupleiterobject *it, visitproc visit, void *arg)
{
if (it->it_seq == NULL)
return 0;
return visit((PyObject *)it->it_seq, arg);
}
static PyObject *
tupleiter_next(tupleiterobject *it)
{
PyTupleObject *seq;
PyObject *item;
assert(it != NULL);
seq = it->it_seq;
if (seq == NULL)
return NULL;
assert(PyTuple_Check(seq));
if (it->it_index < PyTuple_GET_SIZE(seq)) {
item = PyTuple_GET_ITEM(seq, it->it_index);
++it->it_index;
Py_INCREF(item);
return item;
}
Py_DECREF(seq);
it->it_seq = NULL;
return NULL;
}
static PyObject *
tupleiter_len(tupleiterobject *it)
{
Py_ssize_t len = 0;
if (it->it_seq)
len = PyTuple_GET_SIZE(it->it_seq) - it->it_index;
return PyInt_FromSsize_t(len);
}
PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it)).");
static PyMethodDef tupleiter_methods[] = {
{"__length_hint__", (PyCFunction)tupleiter_len, METH_NOARGS, length_hint_doc},
{NULL, NULL} /* sentinel */
};
PyTypeObject PyTupleIter_Type = {
PyObject_HEAD_INIT(&PyType_Type)
0, /* ob_size */
"tupleiterator", /* tp_name */
sizeof(tupleiterobject), /* tp_basicsize */
0, /* tp_itemsize */
/* methods */
(destructor)tupleiter_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
0, /* tp_doc */
(traverseproc)tupleiter_traverse, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
PyObject_SelfIter, /* tp_iter */
(iternextfunc)tupleiter_next, /* tp_iternext */
tupleiter_methods, /* tp_methods */
0,
};