Python 中 tuple 与 list 的最大区别是, 语义上 tuple 是不可变的, 因而自然而然会有人认为构建一个不变的东西应该比变化的东西更快. 这大部分时候是正确的, 但是 Python 中, 由于实现的关系, 不变有时并没有得到显著性能优势, 反而, 有时有显著的劣势.
快速检查
$ uv run --python 3.13.11 python -m timeit "n=1000; [0]*n"
1000000 loops, best of 5: 324 nsec per loop
$ uv run --python 3.13.11 python -m timeit "n=1000; (0,)*n"
500000 loops, best of 5: 487 nsec per loop在i7-11700、U7-265KF、Xeon 6226R、EPYC 7763都能复现, 这些CPU覆盖了最近几年、大小核和统一核、服务器和家用、Intel和AMD, 而且在 n>100 之后这个差异开始变得明显.
如果你想看看详细的结果, 可以访问 https://github.com/myuanz/tuple-list-benchmark
那么, 这个差异究竟来自哪里呢?
字节码实现不同?
def tpl(n: int) -> tuple[int, ...]:
return (0, ) * n
def tpl2(l: tuple, n: int) -> tuple[int, ...]:
return l * n
def lst(n: int) -> list[int]:
return [0] * n
def lst2(l: list, n: int) -> list[int]:
return l * n
if __name__ == "__main__":
import dis, timeit
print('tpl')
dis.dis(tpl)
print('tpl2')
dis.dis(tpl2)
print('lst')
dis.dis(lst)
print('lst2')
dis.dis(lst2)
n = 1000
print('tpl:', timeit.timeit('tpl(n)', globals=globals()))
print('lst:', timeit.timeit('lst(n)', globals=globals()))
l_tpl = (0, )
l_lst = [0, ]
print('tpl2:', timeit.timeit('tpl2(l_tpl, n)', globals=globals()))
print('lst2:', timeit.timeit('lst2(l_lst, n)', globals=globals()))可以得到:
tpl
1 RESUME 0
2 LOAD_CONST 1 ((0,))
LOAD_FAST 0 (n)
BINARY_OP 5 (*)
RETURN_VALUE
tpl2
4 RESUME 0
5 LOAD_FAST_LOAD_FAST 1 (l, n)
BINARY_OP 5 (*)
RETURN_VALUE
lst
7 RESUME 0
8 LOAD_CONST 1 (0)
BUILD_LIST 1
LOAD_FAST 0 (n)
BINARY_OP 5 (*)
RETURN_VALUE
lst2
10 RESUME 0
11 LOAD_FAST_LOAD_FAST 1 (l, n)
BINARY_OP 5 (*)
RETURN_VALUE
tpl: 0.5103932949714363
lst: 0.3303473610430956
tpl2: 0.5040466050268151
lst2: 0.3167809500009753
我构建了两组测试, 待乘的变量分别是原地构建和外部传入, 可以看到时间差异不大, 其他字节码也都相同, 那么问题就应该出在BINARY_OP了
也就是说, 这里的性能差异来自 tuple 和 list 在乘法的重载上的不一致
如果你从未读过 Python 源代码但想要找到相关代码: 在 Python3.13.11源代码里搜索BinaryOps, 只有9处31个结果, 只看*.c里的结果就会发现有一个_PyEval_BinaryOps变量里定义了一堆二元操作符, 其中与乘法操作符相关的只有四个:
const binaryfunc _PyEval_BinaryOps[] = {
[NB_ADD] = PyNumber_Add,
[NB_AND] = PyNumber_And,
[NB_FLOOR_DIVIDE] = PyNumber_FloorDivide,
[NB_LSHIFT] = PyNumber_Lshift,
[NB_MATRIX_MULTIPLY] = PyNumber_MatrixMultiply,
[NB_MULTIPLY] = PyNumber_Multiply,
[NB_REMAINDER] = PyNumber_Remainder,
[NB_OR] = PyNumber_Or,
[NB_POWER] = _PyNumber_PowerNoMod,
[NB_RSHIFT] = PyNumber_Rshift,
[NB_SUBTRACT] = PyNumber_Subtract,
[NB_TRUE_DIVIDE] = PyNumber_TrueDivide,
[NB_XOR] = PyNumber_Xor,
[NB_INPLACE_ADD] = PyNumber_InPlaceAdd,
[NB_INPLACE_AND] = PyNumber_InPlaceAnd,
[NB_INPLACE_FLOOR_DIVIDE] = PyNumber_InPlaceFloorDivide,
[NB_INPLACE_LSHIFT] = PyNumber_InPlaceLshift,
[NB_INPLACE_MATRIX_MULTIPLY] = PyNumber_InPlaceMatrixMultiply,
[NB_INPLACE_MULTIPLY] = PyNumber_InPlaceMultiply,
[NB_INPLACE_REMAINDER] = PyNumber_InPlaceRemainder,
[NB_INPLACE_OR] = PyNumber_InPlaceOr,
[NB_INPLACE_POWER] = _PyNumber_InPlacePowerNoMod,
[NB_INPLACE_RSHIFT] = PyNumber_InPlaceRshift,
[NB_INPLACE_SUBTRACT] = PyNumber_InPlaceSubtract,
[NB_INPLACE_TRUE_DIVIDE] = PyNumber_InPlaceTrueDivide,
[NB_INPLACE_XOR] = PyNumber_InPlaceXor,
};明显应该走到PyNumber_Multiply, 因为前面代码乘了数字, 跳过去一看:
PyObject *
PyNumber_Multiply(PyObject *v, PyObject *w)
{
PyObject *result = BINARY_OP1(v, w, NB_SLOT(nb_multiply), "*");
if (result == Py_NotImplemented) {
PySequenceMethods *mv = Py_TYPE(v)->tp_as_sequence;
PySequenceMethods *mw = Py_TYPE(w)->tp_as_sequence;
Py_DECREF(result);
if (mv && mv->sq_repeat) {
return sequence_repeat(mv->sq_repeat, v, w);
}
else if (mw && mw->sq_repeat) {
return sequence_repeat(mw->sq_repeat, w, v);
}
result = binop_type_error(v, w, "*");
}
return result;
}很明显与sequence_repeat有关, 检查该函数发现第一个参数是函数指针, 即->sq_repeat应该还是个函数, 全局搜索sq_repeat一眼就能看到Python里许多基础类型都定义了sq_repeat:
Objects/setobject.c
2385: 0, /* sq_repeat */
Python/context.c
689: 0, /* sq_repeat */
Objects/bytesobject.c
1708: (ssizeargfunc)bytes_repeat, /*sq_repeat*/
Objects/memoryobject.c
2746: 0, /* sq_repeat */
Objects/typeobject.c
7608: COPYSEQ(sq_repeat);
10408: /* Heap types defining __add__/__mul__ have sq_concat/sq_repeat == NULL.
10415: SQSLOT(__mul__, sq_repeat, NULL, wrap_indexargfunc,
10417: SQSLOT(__rmul__, sq_repeat, NULL, wrap_indexargfunc,
Objects/listobject.c
3542: list_repeat, /* sq_repeat */
Doc/data/stable_abi.dat
998:macro,Py_sq_repeat,3.2,,
Objects/tupleobject.c
756: (ssizeargfunc)tuplerepeat, /* sq_repeat */
Objects/rangeobject.c
665: 0, /* sq_repeat */
Objects/weakrefobject.c
831: 0, /*sq_repeat*/那么 Objects/listobject.c&Objects/tupleobject.c里应该就是实现了. 最终可以找到其实现为:
static PyObject *
tuplerepeat(PyTupleObject *a, Py_ssize_t n)
{
const Py_ssize_t input_size = Py_SIZE(a);
if (input_size == 0 || n == 1) {
if (PyTuple_CheckExact(a)) {
/* Since tuples are immutable, we can return a shared
copy in this case */
return Py_NewRef(a);
}
}
if (input_size == 0 || n <= 0) {
return tuple_get_empty();
}
assert(n>0);
if (input_size > PY_SSIZE_T_MAX / n)
return PyErr_NoMemory();
Py_ssize_t output_size = input_size * n;
PyTupleObject *np = tuple_alloc(output_size);
if (np == NULL)
return NULL;
PyObject **dest = np->ob_item;
if (input_size == 1) {
PyObject *elem = a->ob_item[0];
_Py_RefcntAdd(elem, n);
PyObject **dest_end = dest + output_size;
while (dest < dest_end) {
*dest++ = elem;
}
}
else {
PyObject **src = a->ob_item;
PyObject **src_end = src + input_size;
while (src < src_end) {
_Py_RefcntAdd(*src, n);
*dest++ = *src++;
}
_Py_memory_repeat((char *)np->ob_item, sizeof(PyObject *)*output_size,
sizeof(PyObject *)*input_size);
}
_PyObject_GC_TRACK(np);
return (PyObject *) np;
}
static PyObject *
list_repeat_lock_held(PyListObject *a, Py_ssize_t n)
{
const Py_ssize_t input_size = Py_SIZE(a);
if (input_size == 0 || n <= 0)
return PyList_New(0);
assert(n > 0);
if (input_size > PY_SSIZE_T_MAX / n)
return PyErr_NoMemory();
Py_ssize_t output_size = input_size * n;
PyListObject *np = (PyListObject *) list_new_prealloc(output_size);
if (np == NULL)
return NULL;
PyObject **dest = np->ob_item;
if (input_size == 1) {
PyObject *elem = a->ob_item[0];
_Py_RefcntAdd(elem, n);
PyObject **dest_end = dest + output_size;
while (dest < dest_end) {
*dest++ = elem;
}
}
else {
PyObject **src = a->ob_item;
PyObject **src_end = src + input_size;
while (src < src_end) {
_Py_RefcntAdd(*src, n);
*dest++ = *src++;
}
_Py_memory_repeat((char *)np->ob_item, sizeof(PyObject *)*output_size,
sizeof(PyObject *)*input_size);
}
Py_SET_SIZE(np, output_size);
return (PyObject *) np;
}
static PyObject *
list_repeat(PyObject *aa, Py_ssize_t n)
{
PyObject *ret;
PyListObject *a = (PyListObject *)aa;
Py_BEGIN_CRITICAL_SECTION(a);
ret = list_repeat_lock_held(a, n);
Py_END_CRITICAL_SECTION();
return ret;
}
仔细观察可以发现二者都是先计算最终长度, 然后 alloc 一块内存, 再分别对 input_size分组讨论, 而且 复制流程完全一致.
一个自然的想法是申请内存速度有一些差异, 因为仔细跟下去可以发现list_new_prealloc最终会为内存清零, 而tuple_alloc最终不会清零. 不过, 自然的想法也需要验证:
$ uv run --python 3.13.11 python -m timeit "[0, 0, 0, 0, 0]"
10000000 loops, best of 5: 20.8 nsec per loop
$ uv run --python 3.13.11 python -m timeit "(0, 0, 0, 0, 0)"
50000000 loops, best of 5: 6.03 nsec per loop构建小 tuple 果然更快一些, 而小 list 反而慢得多, 也就是说, 只从自然的想法来看, 理论上 tuple repeat 就是应该更快, 可是这个推论跟文章开头的完全对不上. 我再把文章开头的测试抄回来:
$ uv run --python 3.13.11 python -m timeit "n=1000; [0]*n"
1000000 loops, best of 5: 324 nsec per loop
$ uv run --python 3.13.11 python -m timeit "n=1000; (0,)*n"
500000 loops, best of 5: 487 nsec per loop可以推论, 至少复制是占大头的. 也就是说「同样的复制代码, 性能不同」
为什么相同代码复制速度不同?
看到这里, 我怀疑与内存结构有关. 分别看一下 3.13.11 下 list 和 tuple 的源代码:
// listobject.h
typedef struct {
PyObject_VAR_HEAD
/* Vector of pointers to list elements. list[0] is ob_item[0], etc. */
PyObject **ob_item;
/* ob_item contains space for 'allocated' elements. The number
* currently in use is ob_size.
* Invariants:
* 0 <= ob_size <= allocated
* len(list) == ob_size
* ob_item == NULL implies ob_size == allocated == 0
* list.sort() temporarily sets allocated to -1 to detect mutations.
*
* Items must normally not be NULL, except during construction when
* the list is not yet visible outside the function that builds it.
*/
Py_ssize_t allocated;
} PyListObject;//tupleobject.h
typedef struct {
PyObject_VAR_HEAD
/* ob_item contains space for 'ob_size' elements.
Items must normally not be NULL, except during construction when
the tuple is not yet visible outside the function that builds it. */
PyObject *ob_item[1];
} PyTupleObject;其中 PyObject_VAR_HEAD 相关定义如下:
typedef struct _object {
Py_ssize_t ob_refcnt;
PyTypeObject *ob_type;
} PyObject;
typedef struct {
PyObject ob_base;
Py_ssize_t ob_size; /* Number of items in variable part */
} PyVarObject;可以看得出来, list 中要赋值首先会寻址到一个新地方, 然后再在那个地方开始赋值, 而 tuple 则是直接在PyObject_VAR_HEAD后面紧邻的内存里放元素(柔性数组), 毕竟构造 tuple 的时候, 大小是已知的, 不需要再在额外的地方重新内存来保存.
那么局势就很明朗了, PyObject_VAR_HEAD占用空间是Py_ssize_t ob_refcnt + PyTypeObject *ob_type + Py_ssize_t ob_size=24Byte. 这个数字的对齐度不太好, 那么添加一个 pad 在PyObject_VAR_HEAD后面, 达到 32 对齐, 应该会有所优化? 试一下
为 tuple 添加 pad
先克隆一份 github/cpython, 然后 checkout 到 3.13.11, 使用 LTO + PGO 的编译
$ ./configure --enable-optimizations --with-lto
$ make -j"$(nproc)" profile-opt
得到:
$ ./python -m timeit "n=1000; [0]*n"; ./python -m timeit "n=1000; (0,)*n";
1000000 loops, best of 5: 320 nsec per loop
500000 loops, best of 5: 483 nsec per loop
跟 uv 带的预编译结果差不多. 然后将 tuple 改为:
typedef struct {
PyObject_VAR_HEAD
char ob_item_pad[8];
/* ob_item contains space for 'ob_size' elements.
Items must normally not be NULL, except during construction when
the tuple is not yet visible outside the function that builds it. */
PyObject *ob_item[1];
} PyTupleObject;
ABI变了, 同时还需要修改Tools/clinic/libclinic/parse_args.py, 然后make clinic将所有内部接口里的结构一起改掉.
然后坏消息是, 无论 pad8 让 ob_item 的 offset 加到 32, 还是 pad40 将其 offset 加到64, 填充速度都没有提升. 我添加了 pad 代码的 cpython 在 https://github.com/myuanz/cpython, 添加 pad 后
print(().__sizeof__())将会输出 32 或 64, 但是性能与之前半斤八两.
虎头蛇尾
看看 perf:
$ perf stat \
-e task-clock,context-switches,cpu-migrations,page-faults \
-e cycles,instructions,branches,branch-misses \
-e cache-references,cache-misses \
-e L1-dcache-loads,L1-dcache-load-misses \
-e LLC-loads,LLC-load-misses \
-e dTLB-loads,dTLB-load-misses \
python -m timeit "n=1000; (0,)*n"
500000 loops, best of 5: 728 nsec per loop
Performance counter stats for 'python -m timeit n=1000; (0,)*n':
2,500,516,889 task-clock # 0.999 CPUs utilized
15 context-switches # 5.999 /sec
5 cpu-migrations # 2.000 /sec
1,277 page-faults # 510.694 /sec
11,670,698,367 cycles # 4.667 GHz (41.49%)
44,839,865,978 instructions # 3.84 insn per cycle (49.89%)
14,376,420,602 branches # 5.749 G/sec (58.24%)
7,291,428 branch-misses # 0.05% of all branches (66.63%)
1,710,740 cache-references # 684.155 K/sec (75.03%)
312,783 cache-misses # 18.28% of all cache refs (75.15%)
7,950,561,316 L1-dcache-loads # 3.180 G/sec (75.17%)
2,187,552 L1-dcache-load-misses # 0.03% of all L1-dcache accesses (75.17%)
464,852 LLC-loads # 185.902 K/sec (75.17%)
37,806 LLC-load-misses # 8.13% of all LL-cache accesses (75.06%)
8,011,622,746 dTLB-loads # 3.204 G/sec (33.11%)
1,292 dTLB-load-misses # 0.00% of all dTLB cache accesses (33.11%)
2.503313737 seconds time elapsed
2.494860000 seconds user
0.003301000 seconds sys
$ perf stat \
-e task-clock,context-switches,cpu-migrations,page-faults \
-e cycles,instructions,branches,branch-misses \
-e cache-references,cache-misses \
-e L1-dcache-loads,L1-dcache-load-misses \
-e LLC-loads,LLC-load-misses \
-e dTLB-loads,dTLB-load-misses \
python -m timeit "n=1000; [0]*n"
500000 loops, best of 5: 486 nsec per loop
Performance counter stats for 'python -m timeit n=1000; [0]*n':
1,689,509,169 task-clock # 0.999 CPUs utilized
9 context-switches # 5.327 /sec
5 cpu-migrations # 2.959 /sec
1,275 page-faults # 754.657 /sec
7,878,898,880 cycles # 4.663 GHz (41.59%)
43,420,150,609 instructions # 5.51 insn per cycle (49.93%)
12,064,926,239 branches # 7.141 G/sec (58.28%)
7,219,074 branch-misses # 0.06% of all branches (66.62%)
1,999,688 cache-references # 1.184 M/sec (74.97%)
306,104 cache-misses # 15.31% of all cache refs (74.96%)
11,662,103,497 L1-dcache-loads # 6.903 G/sec (74.96%)
2,204,498 L1-dcache-load-misses # 0.02% of all L1-dcache accesses (74.96%)
311,765 LLC-loads # 184.530 K/sec (75.02%)
24,007 LLC-load-misses # 7.70% of all LL-cache accesses (75.10%)
11,749,126,109 dTLB-loads # 6.954 G/sec (33.38%)
82,556 dTLB-load-misses # 0.00% of all dTLB cache accesses (33.33%)
1.690894205 seconds time elapsed
1.684841000 seconds user
0.003297000 seconds sys为避免大小核影响, 如上结果在另一个纯大核设备上执行.
可以看到二者指令数差不多(44G vs 43G), cache 和 branch 指标都差不多, list 的访存甚至更多, 但 tuple 的 IPC 就是更低. 我现在只能怀疑
- list 在二次 malloc 时, 申请到了一块缓存友好的内存
- PGO 带来了一些莫名其妙的优化, 因为在未开启 PGO 时, 二者的用时差不多
不过跑一次 PGO 编译太慢了, 很难调试, 暂时没有深入的想法. 而将 tuple 的储存改为 list 式外置工作量也非常大, 我简单试了一下涉及的东西太多了. 希望未来会有一个答案.