mirror of
https://github.com/python/cpython.git
synced 2024-11-23 01:45:25 +08:00
Fix typos in docs, error messages and comments (#123336)
Co-authored-by: Bénédikt Tran <10796600+picnixz@users.noreply.github.com>
This commit is contained in:
parent
4c6dca8292
commit
9e108b8719
@ -426,7 +426,7 @@ Initializing and finalizing the interpreter
|
||||
loaded extension modules loaded by Python are not unloaded. Small amounts of
|
||||
memory allocated by the Python interpreter may not be freed (if you find a leak,
|
||||
please report it). Memory tied up in circular references between objects is not
|
||||
freed. Interned strings will all be deallocated regarldess of their reference count.
|
||||
freed. Interned strings will all be deallocated regardless of their reference count.
|
||||
Some memory allocated by extension modules may not be freed. Some extensions may not
|
||||
work properly if their initialization routine is called more than once; this can
|
||||
happen if an application calls :c:func:`Py_Initialize` and :c:func:`Py_FinalizeEx`
|
||||
|
@ -277,7 +277,7 @@ cause any runtime effects with ``from __future__ import annotations``.
|
||||
.. section: Core and Builtins
|
||||
|
||||
:exc:`SyntaxError` exceptions raised by the interpreter will highlight the
|
||||
full error range of the expression that consistutes the syntax error itself,
|
||||
full error range of the expression that constitutes the syntax error itself,
|
||||
instead of just where the problem is detected. Patch by Pablo Galindo.
|
||||
|
||||
..
|
||||
|
@ -221,7 +221,7 @@ returns an invalid file descriptor.
|
||||
|
||||
Also \ escape \s in the http.server BaseHTTPRequestHandler.log_message so
|
||||
that it is technically possible to parse the line and reconstruct what the
|
||||
original data was. Without this a \xHH is ambiguious as to if it is a hex
|
||||
original data was. Without this a \xHH is ambiguous as to if it is a hex
|
||||
replacement we put in or the characters r"\x" came through in the original
|
||||
request line.
|
||||
|
||||
|
@ -287,7 +287,7 @@ a positional argument would lead to a :exc:`TypeError`.
|
||||
.. section: Library
|
||||
|
||||
Group-related variables of ``_posixsubprocess`` module are renamed to stress
|
||||
that supplimentary group affinity is added to a fork, not replace the
|
||||
that supplementary group affinity is added to a fork, not replace the
|
||||
inherited ones. Patch by Oleg Iarygin.
|
||||
|
||||
..
|
||||
|
@ -414,7 +414,7 @@ callback.
|
||||
.. section: Library
|
||||
|
||||
Fix memory leak in asyncio.Queue, when the queue has limited size and it is
|
||||
full, the cancelation of queue.put() can cause a memory leak. Patch by: José
|
||||
full, the cancellation of queue.put() can cause a memory leak. Patch by: José
|
||||
Melero.
|
||||
|
||||
..
|
||||
|
@ -40,7 +40,7 @@
|
||||
# - struct_abi_kind: for `struct`, defines how much of the struct is exposed:
|
||||
# - 'full-abi': All of the struct is part of the ABI, including the size
|
||||
# (users may define arrays of these structs).
|
||||
# Typically used for initalization, rather than at runtime.
|
||||
# Typically used for initialization, rather than at runtime.
|
||||
# - 'opaque': No members are part of the ABI, nor is the size. The Limited
|
||||
# API only handles these via pointers. The C definition should be
|
||||
# incomplete (opaque).
|
||||
|
@ -211,8 +211,8 @@ _testfunc_array_in_struct3C_set_defaults(void)
|
||||
/*
|
||||
* Test3D struct tests the MAX_STRUCT_SIZE 64. Structs containing arrays of up
|
||||
* to eight floating-point types are passed in registers on PPC64LE platforms.
|
||||
* This struct is used for within bounds test on PPC64LE platfroms and for an
|
||||
* out-of-bounds tests for platfroms where MAX_STRUCT_SIZE is less than 64.
|
||||
* This struct is used for within bounds test on PPC64LE platforms and for an
|
||||
* out-of-bounds tests for platforms where MAX_STRUCT_SIZE is less than 64.
|
||||
* See gh-110190.
|
||||
*/
|
||||
typedef struct {
|
||||
|
@ -1855,7 +1855,7 @@ wrap_strftime(PyObject *object, PyObject *format, PyObject *timetuple,
|
||||
/* Buffer of maximum size of formatted year permitted by long. */
|
||||
char buf[SIZEOF_LONG * 5 / 2 + 2
|
||||
#ifdef Py_STRFTIME_C99_SUPPORT
|
||||
/* Need 6 more to accomodate dashes, 2-digit month and day for %F. */
|
||||
/* Need 6 more to accommodate dashes, 2-digit month and day for %F. */
|
||||
+ 6
|
||||
#endif
|
||||
];
|
||||
|
@ -1719,7 +1719,7 @@ _io_TextIOWrapper_write_impl(textio *self, PyObject *text)
|
||||
bytes_len = PyBytes_GET_SIZE(b);
|
||||
}
|
||||
|
||||
// We should avoid concatinating huge data.
|
||||
// We should avoid concatenating huge data.
|
||||
// Flush the buffer before adding b to the buffer if b is not small.
|
||||
// https://github.com/python/cpython/issues/87426
|
||||
if (bytes_len >= self->chunk_size) {
|
||||
|
@ -506,7 +506,7 @@ S_IWOTH: write by others\n\
|
||||
S_IXOTH: execute by others\n\
|
||||
\n"
|
||||
|
||||
"UF_SETTABLE: mask of owner changable flags\n\
|
||||
"UF_SETTABLE: mask of owner changeable flags\n\
|
||||
UF_NODUMP: do not dump file\n\
|
||||
UF_IMMUTABLE: file may not be changed\n\
|
||||
UF_APPEND: file may only be appended to\n\
|
||||
|
@ -78,14 +78,14 @@ run_fileexflags(PyObject *mod, PyObject *pos_args)
|
||||
result = PyRun_FileExFlags(fp, filename, start, globals, locals, closeit, pflags);
|
||||
|
||||
if (closeit && result && _Py_IsValidFD(fd)) {
|
||||
PyErr_SetString(PyExc_AssertionError, "File was not closed after excution");
|
||||
PyErr_SetString(PyExc_AssertionError, "File was not closed after execution");
|
||||
Py_DECREF(result);
|
||||
fclose(fp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!closeit && !_Py_IsValidFD(fd)) {
|
||||
PyErr_SetString(PyExc_AssertionError, "Bad file descriptor after excution");
|
||||
PyErr_SetString(PyExc_AssertionError, "Bad file descriptor after execution");
|
||||
Py_XDECREF(result);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -9025,7 +9025,7 @@ os_getgrouplist_impl(PyObject *module, const char *user, gid_t basegid)
|
||||
|
||||
/*
|
||||
* NGROUPS_MAX is defined by POSIX.1 as the maximum
|
||||
* number of supplimental groups a users can belong to.
|
||||
* number of supplemental groups a users can belong to.
|
||||
* We have to increment it by one because
|
||||
* getgrouplist() returns both the supplemental groups
|
||||
* and the primary group, i.e. all of the groups the
|
||||
|
@ -817,7 +817,7 @@ static int devpoll_flush(devpollObject *self)
|
||||
|
||||
if (n < size) {
|
||||
/*
|
||||
** Data writed to /dev/poll is a binary data structure. It is not
|
||||
** Data written to /dev/poll is a binary data structure. It is not
|
||||
** clear what to do if a partial write occurred. For now, raise
|
||||
** an exception and see if we actually found this problem in
|
||||
** the wild.
|
||||
|
@ -432,7 +432,7 @@ _SHAKE_digest(SHA3object *self, unsigned long digestlen, int hex)
|
||||
}
|
||||
|
||||
/* Get the raw (binary) digest value. The HACL functions errors out if:
|
||||
* - the algorith is not shake -- not the case here
|
||||
* - the algorithm is not shake -- not the case here
|
||||
* - the output length is zero -- we follow the existing behavior and return
|
||||
* an empty digest, without raising an error */
|
||||
if (digestlen > 0) {
|
||||
|
@ -237,7 +237,7 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block
|
||||
if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer?
|
||||
(n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
|
||||
{
|
||||
// Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free?
|
||||
// Suspicious: decoded value a in block is in the same page (or NULL) -- maybe a double free?
|
||||
// (continue in separate function to improve code generation)
|
||||
is_double_free = mi_check_is_double_freex(page, block);
|
||||
}
|
||||
|
@ -269,7 +269,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
|
||||
return p;
|
||||
}
|
||||
|
||||
// allocate in a speficic arena
|
||||
// allocate in a specific arena
|
||||
static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
|
||||
bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
|
||||
{
|
||||
@ -493,7 +493,7 @@ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx,
|
||||
size_t bitidx = startidx;
|
||||
bool all_purged = false;
|
||||
while (bitidx < endidx) {
|
||||
// count consequetive ones in the purge mask
|
||||
// count consecutive ones in the purge mask
|
||||
size_t count = 0;
|
||||
while (bitidx + count < endidx && (purge & ((size_t)1 << (bitidx + count))) != 0) {
|
||||
count++;
|
||||
@ -530,7 +530,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi
|
||||
if (purge != 0) {
|
||||
size_t bitidx = 0;
|
||||
while (bitidx < MI_BITMAP_FIELD_BITS) {
|
||||
// find consequetive range of ones in the purge mask
|
||||
// find consecutive range of ones in the purge mask
|
||||
size_t bitlen = 0;
|
||||
while (bitidx + bitlen < MI_BITMAP_FIELD_BITS && (purge & ((size_t)1 << (bitidx + bitlen))) != 0) {
|
||||
bitlen++;
|
||||
|
@ -7,7 +7,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
Concurrent bitmap that can set/reset sequences of bits atomically,
|
||||
represeted as an array of fields where each field is a machine word (`size_t`)
|
||||
represented as an array of fields where each field is a machine word (`size_t`)
|
||||
|
||||
There are two api's; the standard one cannot have sequences that cross
|
||||
between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS).
|
||||
|
@ -269,7 +269,7 @@ static _Atomic(size_t) warning_count; // = 0; // when >= max_warning_count stop
|
||||
// (recursively) invoke malloc again to allocate space for the thread local
|
||||
// variables on demand. This is why we use a _mi_preloading test on such
|
||||
// platforms. However, C code generator may move the initial thread local address
|
||||
// load before the `if` and we therefore split it out in a separate funcion.
|
||||
// load before the `if` and we therefore split it out in a separate function.
|
||||
static mi_decl_thread bool recurse = false;
|
||||
|
||||
static mi_decl_noinline bool mi_recurse_enter_prim(void) {
|
||||
|
@ -481,7 +481,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
|
||||
if (index < heap->page_retired_min) heap->page_retired_min = index;
|
||||
if (index > heap->page_retired_max) heap->page_retired_max = index;
|
||||
mi_assert_internal(mi_page_all_free(page));
|
||||
return; // dont't free after all
|
||||
return; // don't free after all
|
||||
}
|
||||
}
|
||||
_PyMem_mi_page_maybe_free(page, pq, false);
|
||||
|
@ -739,7 +739,7 @@ bool _mi_prim_getenv(const char* name, char* result, size_t result_size) {
|
||||
#endif
|
||||
bool _mi_prim_random_buf(void* buf, size_t buf_len) {
|
||||
#if defined(MAC_OS_X_VERSION_10_15) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_15
|
||||
// We prefere CCRandomGenerateBytes as it returns an error code while arc4random_buf
|
||||
// We prefer CCRandomGenerateBytes as it returns an error code while arc4random_buf
|
||||
// may fail silently on macOS. See PR #390, and <https://opensource.apple.com/source/Libc/Libc-1439.40.11/gen/FreeBSD/arc4random.c.auto.html>
|
||||
return (CCRandomGenerateBytes(buf, buf_len) == kCCSuccess);
|
||||
#else
|
||||
|
@ -718,7 +718,7 @@ static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_i
|
||||
// set slice back pointers for the first MI_MAX_SLICE_OFFSET entries
|
||||
size_t extra = slice_count-1;
|
||||
if (extra > MI_MAX_SLICE_OFFSET) extra = MI_MAX_SLICE_OFFSET;
|
||||
if (slice_index + extra >= segment->slice_entries) extra = segment->slice_entries - slice_index - 1; // huge objects may have more slices than avaiable entries in the segment->slices
|
||||
if (slice_index + extra >= segment->slice_entries) extra = segment->slice_entries - slice_index - 1; // huge objects may have more slices than available entries in the segment->slices
|
||||
|
||||
mi_slice_t* slice_next = slice + 1;
|
||||
for (size_t i = 1; i <= extra; i++, slice_next++) {
|
||||
|
Loading…
Reference in New Issue
Block a user