diff --git a/Python-3.11.13.tar.xz b/Python-3.11.14.tar.xz similarity index 32% rename from Python-3.11.13.tar.xz rename to Python-3.11.14.tar.xz index 48243b4c2e9b3e6f524e2a0b04da1edff81b2249..3a72cd54c4e3746c6347bfe276b9208b2636ff6b 100644 --- a/Python-3.11.13.tar.xz +++ b/Python-3.11.14.tar.xz @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8fb5f9fbc7609fa822cb31549884575db7fd9657cbffb89510b5d7975963a83a -size 20117496 +oid sha256:8d3ed8ec5c88c1c95f5e558612a725450d2452813ddad5e58fdb1a53b1209b78 +size 20326860 diff --git a/backport-CVE-2025-6069.patch b/backport-CVE-2025-6069.patch deleted file mode 100644 index 7c16a573058be146b77a1e8708f8d9889acd1380..0000000000000000000000000000000000000000 --- a/backport-CVE-2025-6069.patch +++ /dev/null @@ -1,240 +0,0 @@ -From f3c6f882cddc8dc30320d2e73edf019e201394fc Mon Sep 17 00:00:00 2001 -From: Serhiy Storchaka -Date: Fri, 4 Jul 2025 00:05:46 +0300 -Subject: [PATCH] [3.11] gh-135462: Fix quadratic complexity in processing - special input in HTMLParser (GH-135464) (GH-135484) - -End-of-file errors are now handled according to the HTML5 specs -- -comments and declarations are automatically closed, tags are ignored. -(cherry picked from commit 6eb6c5dbfb528bd07d77b60fd71fd05d81d45c41) ---- - Lib/html/parser.py | 41 +++++--- - Lib/test/test_htmlparser.py | 95 ++++++++++++++++--- - ...-06-13-15-55-22.gh-issue-135462.KBeJpc.rst | 4 + - 3 files changed, 117 insertions(+), 23 deletions(-) - create mode 100644 Misc/NEWS.d/next/Security/2025-06-13-15-55-22.gh-issue-135462.KBeJpc.rst - -diff --git a/Lib/html/parser.py b/Lib/html/parser.py -index bef0f4fe4bf..9c38008bbfd 100644 ---- a/Lib/html/parser.py -+++ b/Lib/html/parser.py -@@ -25,6 +25,7 @@ - charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]') - - starttagopen = re.compile('<[a-zA-Z]') -+endtagopen = re.compile('') - commentclose = re.compile(r'--\s*>') - # Note: -@@ -176,7 +177,7 @@ def goahead(self, end): - k = self.parse_pi(i) - elif startswith("', i + 1) -- if k < 0: -- k = rawdata.find('<', i + 1) -- if k < 0: -- k = i + 1 -- else: -- k += 1 -- if self.convert_charrefs and not self.cdata_elem: -- self.handle_data(unescape(rawdata[i:k])) -+ if starttagopen.match(rawdata, i): # < + letter -+ pass -+ elif startswith("'), -- ('comment', '/img'), -- ('endtag', 'html<')]) -+ ('data', '\n')]) - - def test_starttag_junk_chars(self): -+ self._run_check("<", [('data', '<')]) -+ self._run_check("<>", [('data', '<>')]) -+ self._run_check("< >", [('data', '< >')]) -+ self._run_check("< ", [('data', '< ')]) - self._run_check("", []) -+ self._run_check("<$>", [('data', '<$>')]) - self._run_check("", [('comment', '$')]) - self._run_check("", [('endtag', 'a')]) -+ self._run_check("", [('starttag', 'a", [('endtag', 'a'", [('data', "'", []) -+ self._run_check("", [('starttag', 'a$b', [])]) - self._run_check("", [('startendtag', 'a$b', [])]) - self._run_check("", [('starttag', 'a$b', [])]) - self._run_check("", [('startendtag', 'a$b', [])]) -+ self._run_check("", [('endtag', 'a$b')]) - - def test_slashes_in_starttag(self): - self._run_check('', [('startendtag', 'a', [('foo', 'var')])]) -@@ -537,13 +545,56 @@ def test_EOF_in_charref(self): - for html, expected in data: - self._run_check(html, expected) - -- def test_broken_comments(self): -- html = ('' -+ def test_eof_in_comments(self): -+ data = [ -+ ('', [('comment', '-!>')]), -+ ('' - '' - '' - '') - expected = [ -+ ('comment', 'ELEMENT br EMPTY'), - ('comment', ' not really a comment '), - ('comment', ' not a comment either --'), - ('comment', ' -- close enough --'), -@@ -598,6 +649,26 @@ def test_convert_charrefs_dropped_text(self): - ('endtag', 'a'), ('data', ' bar & baz')] - ) - -+ @support.requires_resource('cpu') -+ def test_eof_no_quadratic_complexity(self): -+ # Each of these examples used to take about an hour. -+ # Now they take a fraction of a second. -+ def check(source): -+ parser = html.parser.HTMLParser() -+ parser.feed(source) -+ parser.close() -+ n = 120_000 -+ check(" -Date: Mon, 28 Jul 2025 17:37:26 +0200 -Subject: [PATCH] gh-130577: tarfile now validates archives to ensure member - offsets are non-negative (GH-137027) (cherry picked from commit - 7040aa54f14676938970e10c5f74ea93cd56aa38) - -Co-authored-by: Alexander Urieles -Co-authored-by: Gregory P. Smith ---- - Lib/tarfile.py | 3 + - Lib/test/test_tarfile.py | 156 ++++++++++++++++++ - ...-07-23-00-35-29.gh-issue-130577.c7EITy.rst | 3 + - 3 files changed, 162 insertions(+) - create mode 100644 Misc/NEWS.d/next/Library/2025-07-23-00-35-29.gh-issue-130577.c7EITy.rst - -diff --git a/Lib/tarfile.py b/Lib/tarfile.py -index 2423e14bc540d8..c04c576ea22d2d 100755 ---- a/Lib/tarfile.py -+++ b/Lib/tarfile.py -@@ -1614,6 +1614,9 @@ def _block(self, count): - """Round up a byte count by BLOCKSIZE and return it, - e.g. _block(834) => 1024. - """ -+ # Only non-negative offsets are allowed -+ if count < 0: -+ raise InvalidHeaderError("invalid offset") - blocks, remainder = divmod(count, BLOCKSIZE) - if remainder: - blocks += 1 -diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py -index 7377acdf398622..366aac781df1e7 100644 ---- a/Lib/test/test_tarfile.py -+++ b/Lib/test/test_tarfile.py -@@ -50,6 +50,7 @@ def sha256sum(data): - xzname = os.path.join(TEMPDIR, "testtar.tar.xz") - tmpname = os.path.join(TEMPDIR, "tmp.tar") - dotlessname = os.path.join(TEMPDIR, "testtar") -+SPACE = b" " - - sha256_regtype = ( - "e09e4bc8b3c9d9177e77256353b36c159f5f040531bbd4b024a8f9b9196c71ce" -@@ -4386,6 +4387,161 @@ def extractall(self, ar): - ar.extractall(self.testdir, filter='fully_trusted') - - -+class OffsetValidationTests(unittest.TestCase): -+ tarname = tmpname -+ invalid_posix_header = ( -+ # name: 100 bytes -+ tarfile.NUL * tarfile.LENGTH_NAME -+ # mode, space, null terminator: 8 bytes -+ + b"000755" + SPACE + tarfile.NUL -+ # uid, space, null terminator: 8 bytes -+ + b"000001" + SPACE + tarfile.NUL -+ # gid, space, null terminator: 8 bytes -+ + b"000001" + SPACE + tarfile.NUL -+ # size, space: 12 bytes -+ + b"\xff" * 11 + SPACE -+ # mtime, space: 12 bytes -+ + tarfile.NUL * 11 + SPACE -+ # chksum: 8 bytes -+ + b"0011407" + tarfile.NUL -+ # type: 1 byte -+ + tarfile.REGTYPE -+ # linkname: 100 bytes -+ + tarfile.NUL * tarfile.LENGTH_LINK -+ # magic: 6 bytes, version: 2 bytes -+ + tarfile.POSIX_MAGIC -+ # uname: 32 bytes -+ + tarfile.NUL * 32 -+ # gname: 32 bytes -+ + tarfile.NUL * 32 -+ # devmajor, space, null terminator: 8 bytes -+ + tarfile.NUL * 6 + SPACE + tarfile.NUL -+ # devminor, space, null terminator: 8 bytes -+ + tarfile.NUL * 6 + SPACE + tarfile.NUL -+ # prefix: 155 bytes -+ + tarfile.NUL * tarfile.LENGTH_PREFIX -+ # padding: 12 bytes -+ + tarfile.NUL * 12 -+ ) -+ invalid_gnu_header = ( -+ # name: 100 bytes -+ tarfile.NUL * tarfile.LENGTH_NAME -+ # mode, null terminator: 8 bytes -+ + b"0000755" + tarfile.NUL -+ # uid, null terminator: 8 bytes -+ + b"0000001" + tarfile.NUL -+ # gid, space, null terminator: 8 bytes -+ + b"0000001" + tarfile.NUL -+ # size, space: 12 bytes -+ + b"\xff" * 11 + SPACE -+ # mtime, space: 12 bytes -+ + tarfile.NUL * 11 + SPACE -+ # chksum: 8 bytes -+ + b"0011327" + tarfile.NUL -+ # type: 1 byte -+ + tarfile.REGTYPE -+ # linkname: 100 bytes -+ + tarfile.NUL * tarfile.LENGTH_LINK -+ # magic: 8 bytes -+ + tarfile.GNU_MAGIC -+ # uname: 32 bytes -+ + tarfile.NUL * 32 -+ # gname: 32 bytes -+ + tarfile.NUL * 32 -+ # devmajor, null terminator: 8 bytes -+ + tarfile.NUL * 8 -+ # devminor, null terminator: 8 bytes -+ + tarfile.NUL * 8 -+ # padding: 167 bytes -+ + tarfile.NUL * 167 -+ ) -+ invalid_v7_header = ( -+ # name: 100 bytes -+ tarfile.NUL * tarfile.LENGTH_NAME -+ # mode, space, null terminator: 8 bytes -+ + b"000755" + SPACE + tarfile.NUL -+ # uid, space, null terminator: 8 bytes -+ + b"000001" + SPACE + tarfile.NUL -+ # gid, space, null terminator: 8 bytes -+ + b"000001" + SPACE + tarfile.NUL -+ # size, space: 12 bytes -+ + b"\xff" * 11 + SPACE -+ # mtime, space: 12 bytes -+ + tarfile.NUL * 11 + SPACE -+ # chksum: 8 bytes -+ + b"0010070" + tarfile.NUL -+ # type: 1 byte -+ + tarfile.REGTYPE -+ # linkname: 100 bytes -+ + tarfile.NUL * tarfile.LENGTH_LINK -+ # padding: 255 bytes -+ + tarfile.NUL * 255 -+ ) -+ valid_gnu_header = tarfile.TarInfo("filename").tobuf(tarfile.GNU_FORMAT) -+ data_block = b"\xff" * tarfile.BLOCKSIZE -+ -+ def _write_buffer(self, buffer): -+ with open(self.tarname, "wb") as f: -+ f.write(buffer) -+ -+ def _get_members(self, ignore_zeros=None): -+ with open(self.tarname, "rb") as f: -+ with tarfile.open( -+ mode="r", fileobj=f, ignore_zeros=ignore_zeros -+ ) as tar: -+ return tar.getmembers() -+ -+ def _assert_raises_read_error_exception(self): -+ with self.assertRaisesRegex( -+ tarfile.ReadError, "file could not be opened successfully" -+ ): -+ self._get_members() -+ -+ def test_invalid_offset_header_validations(self): -+ for tar_format, invalid_header in ( -+ ("posix", self.invalid_posix_header), -+ ("gnu", self.invalid_gnu_header), -+ ("v7", self.invalid_v7_header), -+ ): -+ with self.subTest(format=tar_format): -+ self._write_buffer(invalid_header) -+ self._assert_raises_read_error_exception() -+ -+ def test_early_stop_at_invalid_offset_header(self): -+ buffer = self.valid_gnu_header + self.invalid_gnu_header + self.valid_gnu_header -+ self._write_buffer(buffer) -+ members = self._get_members() -+ self.assertEqual(len(members), 1) -+ self.assertEqual(members[0].name, "filename") -+ self.assertEqual(members[0].offset, 0) -+ -+ def test_ignore_invalid_archive(self): -+ # 3 invalid headers with their respective data -+ buffer = (self.invalid_gnu_header + self.data_block) * 3 -+ self._write_buffer(buffer) -+ members = self._get_members(ignore_zeros=True) -+ self.assertEqual(len(members), 0) -+ -+ def test_ignore_invalid_offset_headers(self): -+ for first_block, second_block, expected_offset in ( -+ ( -+ (self.valid_gnu_header), -+ (self.invalid_gnu_header + self.data_block), -+ 0, -+ ), -+ ( -+ (self.invalid_gnu_header + self.data_block), -+ (self.valid_gnu_header), -+ 1024, -+ ), -+ ): -+ self._write_buffer(first_block + second_block) -+ members = self._get_members(ignore_zeros=True) -+ self.assertEqual(len(members), 1) -+ self.assertEqual(members[0].name, "filename") -+ self.assertEqual(members[0].offset, expected_offset) -+ -+ - def setUpModule(): - os_helper.unlink(TEMPDIR) - os.makedirs(TEMPDIR) -diff --git a/Misc/NEWS.d/next/Library/2025-07-23-00-35-29.gh-issue-130577.c7EITy.rst b/Misc/NEWS.d/next/Library/2025-07-23-00-35-29.gh-issue-130577.c7EITy.rst -new file mode 100644 -index 00000000000000..342cabbc865dc4 ---- /dev/null -+++ b/Misc/NEWS.d/next/Library/2025-07-23-00-35-29.gh-issue-130577.c7EITy.rst -@@ -0,0 +1,3 @@ -+:mod:`tarfile` now validates archives to ensure member offsets are -+non-negative. (Contributed by Alexander Enrique Urieles Nieto in -+:gh:`130577`.) diff --git a/backport-CVE-2025-8291.patch b/backport-CVE-2025-8291.patch deleted file mode 100644 index 188c4136066f6e1da106db50e33a84fcb77799db..0000000000000000000000000000000000000000 --- a/backport-CVE-2025-8291.patch +++ /dev/null @@ -1,307 +0,0 @@ -From 1e1d79a9de506ceb3dabb1ba9114c7ddb555253e Mon Sep 17 00:00:00 2001 -From: Serhiy Storchaka -Date: Tue, 7 Oct 2025 20:55:44 +0300 -Subject: [PATCH] [3.13] gh-139700: Check consistency of the zip64 end of - central directory record (GH-139702) (GH-139708) (cherry picked from commit - 333d4a6f4967d3ace91492a39ededbcf3faa76a6) - -Co-authored-by: Serhiy Storchaka -Support records with "zip64 extensible data" if there are no bytes -prepended to the ZIP file. -(cherry picked from commit 162997bb70e067668c039700141770687bc8f267) ---- - Lib/test/test_zipfile.py | 82 ++++++++++++++++++- - Lib/zipfile.py | 51 +++++++----- - ...-10-07-19-31-34.gh-issue-139700.vNHU1O.rst | 3 + - 3 files changed, 113 insertions(+), 23 deletions(-) - create mode 100644 Misc/NEWS.d/next/Security/2025-10-07-19-31-34.gh-issue-139700.vNHU1O.rst - -diff --git a/Lib/test/test_zipfile.py b/Lib/test/test_zipfile.py -index 52831a7bd7ce00..a0bca62ab88ac3 100644 ---- a/Lib/test/test_zipfile.py -+++ b/Lib/test/test_zipfile.py -@@ -887,6 +887,8 @@ def make_zip64_file( - self, file_size_64_set=False, file_size_extra=False, - compress_size_64_set=False, compress_size_extra=False, - header_offset_64_set=False, header_offset_extra=False, -+ extensible_data=b'', -+ end_of_central_dir_size=None, offset_to_end_of_central_dir=None, - ): - """Generate bytes sequence for a zip with (incomplete) zip64 data. - -@@ -940,6 +942,12 @@ def make_zip64_file( - - central_dir_size = struct.pack(' 1: - raise BadZipFile("zipfiles that span multiple disks are not supported") - -- # Assume no 'zip64 extensible data' -- fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2) -+ offset -= sizeEndCentDir64 -+ if reloff > offset: -+ raise BadZipFile("Corrupt zip64 end of central directory locator") -+ # First, check the assumption that there is no prepended data. -+ fpin.seek(reloff) -+ extrasz = offset - reloff - data = fpin.read(sizeEndCentDir64) - if len(data) != sizeEndCentDir64: -- return endrec -+ raise OSError("Unknown I/O error") -+ if not data.startswith(stringEndArchive64) and reloff != offset: -+ # Since we already have seen the Zip64 EOCD Locator, it's -+ # possible we got here because there is prepended data. -+ # Assume no 'zip64 extensible data' -+ fpin.seek(offset) -+ extrasz = 0 -+ data = fpin.read(sizeEndCentDir64) -+ if len(data) != sizeEndCentDir64: -+ raise OSError("Unknown I/O error") -+ if not data.startswith(stringEndArchive64): -+ raise BadZipFile("Zip64 end of central directory record not found") -+ - sig, sz, create_version, read_version, disk_num, disk_dir, \ - dircount, dircount2, dirsize, diroffset = \ - struct.unpack(structEndArchive64, data) -- if sig != stringEndArchive64: -- return endrec -+ if (diroffset + dirsize != reloff or -+ sz + 12 != sizeEndCentDir64 + extrasz): -+ raise BadZipFile("Corrupt zip64 end of central directory record") - - # Update the original endrec using data from the ZIP64 record - endrec[_ECD_SIGNATURE] = sig -@@ -280,6 +296,7 @@ def _EndRecData64(fpin, offset, endrec): - endrec[_ECD_ENTRIES_TOTAL] = dircount2 - endrec[_ECD_SIZE] = dirsize - endrec[_ECD_OFFSET] = diroffset -+ endrec[_ECD_LOCATION] = offset - extrasz - return endrec - - -@@ -313,7 +330,7 @@ def _EndRecData(fpin): - endrec.append(filesize - sizeEndCentDir) - - # Try to read the "Zip64 end of central directory" structure -- return _EndRecData64(fpin, -sizeEndCentDir, endrec) -+ return _EndRecData64(fpin, filesize - sizeEndCentDir, endrec) - - # Either this is not a ZIP file, or it is a ZIP file with an archive - # comment. Search the end of the file for the "end of central directory" -@@ -337,8 +354,7 @@ def _EndRecData(fpin): - endrec.append(maxCommentStart + start) - - # Try to read the "Zip64 end of central directory" structure -- return _EndRecData64(fpin, maxCommentStart + start - filesize, -- endrec) -+ return _EndRecData64(fpin, maxCommentStart + start, endrec) - - # Unable to find a valid end of central directory structure - return None -@@ -1386,9 +1402,6 @@ def _RealGetContents(self): - - # "concat" is zero, unless zip was concatenated to another file - concat = endrec[_ECD_LOCATION] - size_cd - offset_cd -- if endrec[_ECD_SIGNATURE] == stringEndArchive64: -- # If Zip64 extension structures are present, account for them -- concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator) - - if self.debug > 2: - inferred = concat + offset_cd -@@ -1989,7 +2002,7 @@ def _write_end_record(self): - " would require ZIP64 extensions") - zip64endrec = struct.pack( - structEndArchive64, stringEndArchive64, -- 44, 45, 45, 0, 0, centDirCount, centDirCount, -+ sizeEndCentDir64 - 12, 45, 45, 0, 0, centDirCount, centDirCount, - centDirSize, centDirOffset) - self.fp.write(zip64endrec) - -diff --git a/Misc/NEWS.d/next/Security/2025-10-07-19-31-34.gh-issue-139700.vNHU1O.rst b/Misc/NEWS.d/next/Security/2025-10-07-19-31-34.gh-issue-139700.vNHU1O.rst -new file mode 100644 -index 00000000000000..a8e7a1f1878c6b ---- /dev/null -+++ b/Misc/NEWS.d/next/Security/2025-10-07-19-31-34.gh-issue-139700.vNHU1O.rst -@@ -0,0 +1,3 @@ -+Check consistency of the zip64 end of central directory record. Support -+records with "zip64 extensible data" if there are no bytes prepended to the -+ZIP file. diff --git a/python3.spec b/python3.spec index fbb249db98c9f3ae285f7260627a4a23479e8d71..87b7b36327648acd18524776d4115f9fc34b78ff 100644 --- a/python3.spec +++ b/python3.spec @@ -5,8 +5,8 @@ Name: python3 Summary: Interpreter of the Python3 programming language URL: https://www.python.org/ -Version: 3.11.13 -Release: 5 +Version: 3.11.14 +Release: 1 License: Python-2.0 %global branchversion 3.11 @@ -95,10 +95,7 @@ Source1: pyconfig.h Patch1: 00001-rpath.patch Patch251: 00251-change-user-install-location.patch -Patch6000: backport-CVE-2025-8194.patch -Patch6001: backport-CVE-2025-6069.patch -Patch6002: backport-CVE-2025-8291.patch -Patch6003: backport-CVE-2025-6075.patch +Patch6000: backport-CVE-2025-6075.patch Patch9000: add-the-sm3-method-for-obtaining-the-salt-value.patch Patch9001: 0001-add-loongarch64-support-for-python.patch @@ -858,6 +855,9 @@ export BEP_GTDLIST="$BEP_GTDLIST_TMP" %{_mandir}/*/* %changelog +* Mon Nov 17 2025 lipengyu - 3.11.14-1 +- update to 3.11.14 + * Wed Nov 05 2025 lipengyu - 3.11.13-5 - Type:CVE - CVE:CVE-2025-6075