Package: release.debian.org
Severity: normal
Tags: trixie
X-Debbugs-Cc: [email protected], [email protected]
Control: affects -1 + src:python3.13
User: [email protected]
Usertags: pu

This update fixes various low severity security issues in
python3.13, all changes are cherrypicks from the 3.13
upstream release branch and covered by tests. Tests via
debusine looked all fine and were also complemented by
manual tests on a trixie host. Debdiff below.

Cheers,
        Moritz

diff -Nru python3.13-3.13.5/debian/changelog python3.13-3.13.5/debian/changelog
--- python3.13-3.13.5/debian/changelog  2025-06-25 20:55:22.000000000 +0200
+++ python3.13-3.13.5/debian/changelog  2026-04-06 14:24:14.000000000 +0200
@@ -1,3 +1,22 @@
+python3.13 (3.13.5-2+deb13u1) trixie; urgency=medium
+
+  * CVE-2025-11468 (Closes: #1126787)
+  * CVE-2025-12084
+  * CVE-2025-13462
+  * CVE-2025-13836
+  * CVE-2025-13837
+  * CVE-2025-6069
+  * CVE-2025-6075
+  * CVE-2025-8194 (Closes: #1124764)
+  * CVE-2025-8291
+  * CVE-2025-15282 (Closes: #1126780)
+  * CVE-2026-0672 (Closes: #1126762)
+  * CVE-2026-0865 (Closes: #1126740)
+  * CVE-2026-1299 (Closes: #1126745)
+  * CVE-2026-2297
+
+ -- Moritz Mühlenhoff <[email protected]>  Mon, 06 Apr 2026 14:24:14 +0200
+
 python3.13 (3.13.5-2) unstable; urgency=medium
 
   * Patch: Revert disabling of logger while handling log record.
diff -Nru python3.13-3.13.5/debian/patches/CVE-2025-11468.patch 
python3.13-3.13.5/debian/patches/CVE-2025-11468.patch
--- python3.13-3.13.5/debian/patches/CVE-2025-11468.patch       1970-01-01 
01:00:00.000000000 +0100
+++ python3.13-3.13.5/debian/patches/CVE-2025-11468.patch       2026-04-06 
13:43:23.000000000 +0200
@@ -0,0 +1,77 @@
+From f738386838021c762efea6c9802c82de65e87796 Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <[email protected]>
+Date: Sun, 25 Jan 2026 18:09:53 +0100
+Subject: [PATCH] [3.13] gh-143935: Email preserve parens when folding comments
+ (GH-143936) (#144035)
+
+--- python3.13-3.13.5.orig/Lib/email/_header_value_parser.py
++++ python3.13-3.13.5/Lib/email/_header_value_parser.py
+@@ -101,6 +101,12 @@ def make_quoted_pairs(value):
+     return str(value).replace('\\', '\\\\').replace('"', '\\"')
+ 
+ 
++def make_parenthesis_pairs(value):
++    """Escape parenthesis and backslash for use within a comment."""
++    return str(value).replace('\\', '\\\\') \
++        .replace('(', '\\(').replace(')', '\\)')
++
++
+ def quote_string(value):
+     escaped = make_quoted_pairs(value)
+     return f'"{escaped}"'
+@@ -933,7 +939,7 @@ class WhiteSpaceTerminal(Terminal):
+         return ' '
+ 
+     def startswith_fws(self):
+-        return True
++        return self and self[0] in WSP
+ 
+ 
+ class ValueTerminal(Terminal):
+@@ -2924,6 +2930,13 @@ def _refold_parse_tree(parse_tree, *, po
+                     [ValueTerminal(make_quoted_pairs(p), 'ptext')
+                      for p in newparts] +
+                     [ValueTerminal('"', 'ptext')])
++            if part.token_type == 'comment':
++                newparts = (
++                    [ValueTerminal('(', 'ptext')] +
++                    [ValueTerminal(make_parenthesis_pairs(p), 'ptext')
++                     if p.token_type == 'ptext' else p
++                     for p in newparts] +
++                    [ValueTerminal(')', 'ptext')])
+             if not part.as_ew_allowed:
+                 wrap_as_ew_blocked += 1
+                 newparts.append(end_ew_not_allowed)
+--- python3.13-3.13.5.orig/Lib/test/test_email/test__header_value_parser.py
++++ python3.13-3.13.5/Lib/test/test_email/test__header_value_parser.py
+@@ -3219,6 +3219,29 @@ class TestFolding(TestEmailBase):
+             with self.subTest(to=to):
+                 self._test(parser.get_address_list(to)[0], folded, 
policy=policy)
+ 
++    def test_address_list_with_long_unwrapable_comment(self):
++        policy = self.policy.clone(max_line_length=40)
++        cases = [
++            # (to, folded)
++            ('(loremipsumdolorsitametconsecteturadipi)<[email protected]>',
++             '(loremipsumdolorsitametconsecteturadipi)<[email protected]>\n'),
++            ('<[email protected]>(loremipsumdolorsitametconsecteturadipi)',
++             '<[email protected]>(loremipsumdolorsitametconsecteturadipi)\n'),
++            ('(loremipsum dolorsitametconsecteturadipi)<[email protected]>',
++             '(loremipsum dolorsitametconsecteturadipi)<[email protected]>\n'),
++             ('<[email protected]>(loremipsum dolorsitametconsecteturadipi)',
++             '<[email protected]>(loremipsum\n 
dolorsitametconsecteturadipi)\n'),
++            ('(Escaped \\( \\) chars \\\\ in comments stay 
escaped)<[email protected]>',
++             '(Escaped \\( \\) chars \\\\ in comments stay\n 
escaped)<[email protected]>\n'),
++            
('((loremipsum)(loremipsum)(loremipsum)(loremipsum))<[email protected]>',
++             
'((loremipsum)(loremipsum)(loremipsum)(loremipsum))<[email protected]>\n'),
++            ('((loremipsum)(loremipsum)(loremipsum) 
(loremipsum))<[email protected]>',
++             '((loremipsum)(loremipsum)(loremipsum)\n 
(loremipsum))<[email protected]>\n'),
++        ]
++        for (to, folded) in cases:
++            with self.subTest(to=to):
++                self._test(parser.get_address_list(to)[0], folded, 
policy=policy)
++
+     # XXX Need tests with comments on various sides of a unicode token,
+     # and with unicode tokens in the comments.  Spaces inside the quotes
+     # currently don't do the right thing.
diff -Nru python3.13-3.13.5/debian/patches/CVE-2025-12084.patch 
python3.13-3.13.5/debian/patches/CVE-2025-12084.patch
--- python3.13-3.13.5/debian/patches/CVE-2025-12084.patch       1970-01-01 
01:00:00.000000000 +0100
+++ python3.13-3.13.5/debian/patches/CVE-2025-12084.patch       2026-04-06 
13:48:20.000000000 +0200
@@ -0,0 +1,108 @@
+From ddcd2acd85d891a53e281c773b3093f9db953964 Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <[email protected]>
+Date: Fri, 5 Dec 2025 16:24:38 +0100
+Subject: [PATCH] [3.13] gh-142145: Remove quadratic behavior in node ID cache
+ clearing (GH-142146) (#142210)
+
+
+From 86747f1a1a7b4f0ea2d47fe94b841c224bae5073 Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <[email protected]>
+Date: Sun, 21 Dec 2025 00:56:59 +0100
+Subject: [PATCH] [3.13] gh-142754: Ensure that Element & Attr instances have
+ the ownerDocument attribute (GH-142794) (#142819)
+
+
+
+--- python3.13-3.13.5.orig/Lib/test/test_minidom.py
++++ python3.13-3.13.5/Lib/test/test_minidom.py
+@@ -2,13 +2,14 @@
+ 
+ import copy
+ import pickle
++import time
+ import io
+ from test import support
+ import unittest
+ 
+ import xml.dom.minidom
+ 
+-from xml.dom.minidom import parse, Attr, Node, Document, parseString
++from xml.dom.minidom import parse, Attr, Node, Document, Element, parseString
+ from xml.dom.minidom import getDOMImplementation
+ from xml.parsers.expat import ExpatError
+ 
+@@ -173,6 +174,31 @@ class MinidomTest(unittest.TestCase):
+         self.assertEqual(dom.documentElement.childNodes[-1].data, "Hello")
+         dom.unlink()
+ 
++    def testAppendChildNoQuadraticComplexity(self):
++        impl = getDOMImplementation()
++
++        newdoc = impl.createDocument(None, "some_tag", None)
++        top_element = newdoc.documentElement
++        children = [newdoc.createElement(f"child-{i}") for i in range(1, 2 ** 
15 + 1)]
++        element = top_element
++
++        start = time.time()
++        for child in children:
++            element.appendChild(child)
++            element = child
++        end = time.time()
++
++        # This example used to take at least 30 seconds.
++        self.assertLess(end - start, 10)
++
++    def testSetAttributeNodeWithoutOwnerDocument(self):
++        # regression test for gh-142754
++        elem = Element("test")
++        attr = Attr("id")
++        attr.value = "test-id"
++        elem.setAttributeNode(attr)
++        self.assertEqual(elem.getAttribute("id"), "test-id")
++
+     def testAppendChildFragment(self):
+         dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes()
+         dom.documentElement.appendChild(frag)
+--- python3.13-3.13.5.orig/Lib/xml/dom/minidom.py
++++ python3.13-3.13.5/Lib/xml/dom/minidom.py
+@@ -292,13 +292,6 @@ def _append_child(self, node):
+     childNodes.append(node)
+     node.parentNode = self
+ 
+-def _in_document(node):
+-    # return True iff node is part of a document tree
+-    while node is not None:
+-        if node.nodeType == Node.DOCUMENT_NODE:
+-            return True
+-        node = node.parentNode
+-    return False
+ 
+ def _write_data(writer, text, attr):
+     "Writes datachars to writer."
+@@ -371,6 +364,7 @@ class Attr(Node):
+     def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None,
+                  prefix=None):
+         self.ownerElement = None
++        self.ownerDocument = None
+         self._name = qName
+         self.namespaceURI = namespaceURI
+         self._prefix = prefix
+@@ -696,6 +690,7 @@ class Element(Node):
+ 
+     def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None,
+                  localName=None):
++        self.ownerDocument = None
+         self.parentNode = None
+         self.tagName = self.nodeName = tagName
+         self.prefix = prefix
+@@ -1555,7 +1550,7 @@ def _clear_id_cache(node):
+     if node.nodeType == Node.DOCUMENT_NODE:
+         node._id_cache.clear()
+         node._id_search_stack = None
+-    elif _in_document(node):
++    elif node.ownerDocument:
+         node.ownerDocument._id_cache.clear()
+         node.ownerDocument._id_search_stack= None
+ 
diff -Nru python3.13-3.13.5/debian/patches/CVE-2025-13462.patch 
python3.13-3.13.5/debian/patches/CVE-2025-13462.patch
--- python3.13-3.13.5/debian/patches/CVE-2025-13462.patch       1970-01-01 
01:00:00.000000000 +0100
+++ python3.13-3.13.5/debian/patches/CVE-2025-13462.patch       2026-04-06 
13:49:37.000000000 +0200
@@ -0,0 +1,103 @@
+From ae99fe3a33b43e303a05f012815cef60b611a9c7 Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <[email protected]>
+Date: Tue, 17 Mar 2026 10:51:43 +0100
+Subject: [PATCH] [3.13] gh-141707: Skip TarInfo DIRTYPE normalization during
+ GNU long name handling (GH-145818)
+
+
+--- python3.13-3.13.5.orig/Lib/tarfile.py
++++ python3.13-3.13.5/Lib/tarfile.py
+@@ -1267,6 +1267,20 @@ class TarInfo(object):
+     @classmethod
+     def frombuf(cls, buf, encoding, errors):
+         """Construct a TarInfo object from a 512 byte bytes object.
++
++        To support the old v7 tar format AREGTYPE headers are
++        transformed to DIRTYPE headers if their name ends in '/'.
++        """
++        return cls._frombuf(buf, encoding, errors)
++
++    @classmethod
++    def _frombuf(cls, buf, encoding, errors, *, dircheck=True):
++        """Construct a TarInfo object from a 512 byte bytes object.
++
++        If ``dircheck`` is set to ``True`` then ``AREGTYPE`` headers will
++        be normalized to ``DIRTYPE`` if the name ends in a trailing slash.
++        ``dircheck`` must be set to ``False`` if this function is called
++        on a follow-up header such as ``GNUTYPE_LONGNAME``.
+         """
+         if len(buf) == 0:
+             raise EmptyHeaderError("empty header")
+@@ -1297,7 +1311,7 @@ class TarInfo(object):
+ 
+         # Old V7 tar format represents a directory as a regular
+         # file with a trailing slash.
+-        if obj.type == AREGTYPE and obj.name.endswith("/"):
++        if dircheck and obj.type == AREGTYPE and obj.name.endswith("/"):
+             obj.type = DIRTYPE
+ 
+         # The old GNU sparse format occupies some of the unused
+@@ -1332,8 +1346,15 @@ class TarInfo(object):
+         """Return the next TarInfo object from TarFile object
+            tarfile.
+         """
++        return cls._fromtarfile(tarfile)
++
++    @classmethod
++    def _fromtarfile(cls, tarfile, *, dircheck=True):
++        """
++        See dircheck documentation in _frombuf().
++        """
+         buf = tarfile.fileobj.read(BLOCKSIZE)
+-        obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
++        obj = cls._frombuf(buf, tarfile.encoding, tarfile.errors, 
dircheck=dircheck)
+         obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
+         return obj._proc_member(tarfile)
+ 
+@@ -1391,7 +1412,7 @@ class TarInfo(object):
+ 
+         # Fetch the next header and process it.
+         try:
+-            next = self.fromtarfile(tarfile)
++            next = self._fromtarfile(tarfile, dircheck=False)
+         except HeaderError as e:
+             raise SubsequentHeaderError(str(e)) from None
+ 
+@@ -1526,7 +1547,7 @@ class TarInfo(object):
+ 
+         # Fetch the next header.
+         try:
+-            next = self.fromtarfile(tarfile)
++            next = self._fromtarfile(tarfile, dircheck=False)
+         except HeaderError as e:
+             raise SubsequentHeaderError(str(e)) from None
+ 
+--- python3.13-3.13.5.orig/Lib/test/test_tarfile.py
++++ python3.13-3.13.5/Lib/test/test_tarfile.py
+@@ -1164,6 +1164,25 @@ class LongnameTest:
+                 self.assertIsNotNone(tar.getmember(longdir))
+                 self.assertIsNotNone(tar.getmember(longdir.removesuffix('/')))
+ 
++    def test_longname_file_not_directory(self):
++        # Test reading a longname file and ensure it is not handled as a 
directory
++        # Issue #141707
++        buf = io.BytesIO()
++        with tarfile.open(mode='w', fileobj=buf, format=self.format) as tar:
++            ti = tarfile.TarInfo()
++            ti.type = tarfile.AREGTYPE
++            ti.name = ('a' * 99) + '/' + ('b' * 3)
++            tar.addfile(ti)
++
++            expected = {t.name: t.type for t in tar.getmembers()}
++
++        buf.seek(0)
++        with tarfile.open(mode='r', fileobj=buf) as tar:
++            actual = {t.name: t.type for t in tar.getmembers()}
++
++        self.assertEqual(expected, actual)
++
++
+ class GNUReadTest(LongnameTest, ReadTest, unittest.TestCase):
+ 
+     subdir = "gnu"
diff -Nru python3.13-3.13.5/debian/patches/CVE-2025-13836.patch 
python3.13-3.13.5/debian/patches/CVE-2025-13836.patch
--- python3.13-3.13.5/debian/patches/CVE-2025-13836.patch       1970-01-01 
01:00:00.000000000 +0100
+++ python3.13-3.13.5/debian/patches/CVE-2025-13836.patch       2026-04-06 
13:50:58.000000000 +0200
@@ -0,0 +1,127 @@
+From 289f29b0fe38baf2d7cb5854f4bb573cc34a6a15 Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <[email protected]>
+Date: Fri, 5 Dec 2025 16:21:57 +0100
+Subject: [PATCH] [3.13] gh-119451: Fix a potential denial of service in
+ http.client (GH-119454) (#142139)
+
+
+--- python3.13-3.13.5.orig/Lib/http/client.py
++++ python3.13-3.13.5/Lib/http/client.py
+@@ -111,6 +111,11 @@ responses = {v: v.phrase for v in http.H
+ _MAXLINE = 65536
+ _MAXHEADERS = 100
+ 
++# Data larger than this will be read in chunks, to prevent extreme
++# overallocation.
++_MIN_READ_BUF_SIZE = 1 << 20
++
++
+ # Header name/value ABNF (http://tools.ietf.org/html/rfc7230#section-3.2)
+ #
+ # VCHAR          = %x21-7E
+@@ -639,10 +644,25 @@ class HTTPResponse(io.BufferedIOBase):
+         reading. If the bytes are truly not available (due to EOF), then the
+         IncompleteRead exception can be used to detect the problem.
+         """
+-        data = self.fp.read(amt)
+-        if len(data) < amt:
+-            raise IncompleteRead(data, amt-len(data))
+-        return data
++        cursize = min(amt, _MIN_READ_BUF_SIZE)
++        data = self.fp.read(cursize)
++        if len(data) >= amt:
++            return data
++        if len(data) < cursize:
++            raise IncompleteRead(data, amt - len(data))
++
++        data = io.BytesIO(data)
++        data.seek(0, 2)
++        while True:
++            # This is a geometric increase in read size (never more than
++            # doubling out the current length of data per loop iteration).
++            delta = min(cursize, amt - cursize)
++            data.write(self.fp.read(delta))
++            if data.tell() >= amt:
++                return data.getvalue()
++            cursize += delta
++            if data.tell() < cursize:
++                raise IncompleteRead(data.getvalue(), amt - data.tell())
+ 
+     def _safe_readinto(self, b):
+         """Same as _safe_read, but for reading into a buffer."""
+--- python3.13-3.13.5.orig/Lib/test/test_httplib.py
++++ python3.13-3.13.5/Lib/test/test_httplib.py
+@@ -1455,6 +1455,72 @@ class BasicTest(TestCase, ExtraAssertion
+         thread.join()
+         self.assertEqual(result, b"proxied data\n")
+ 
++    def test_large_content_length(self):
++        serv = socket.create_server((HOST, 0))
++        self.addCleanup(serv.close)
++
++        def run_server():
++            [conn, address] = serv.accept()
++            with conn:
++                while conn.recv(1024):
++                    conn.sendall(
++                        b"HTTP/1.1 200 Ok\r\n"
++                        b"Content-Length: %d\r\n"
++                        b"\r\n" % size)
++                    conn.sendall(b'A' * (size//3))
++                    conn.sendall(b'B' * (size - size//3))
++
++        thread = threading.Thread(target=run_server)
++        thread.start()
++        self.addCleanup(thread.join, 1.0)
++
++        conn = client.HTTPConnection(*serv.getsockname())
++        try:
++            for w in range(15, 27):
++                size = 1 << w
++                conn.request("GET", "/")
++                with conn.getresponse() as response:
++                    self.assertEqual(len(response.read()), size)
++        finally:
++            conn.close()
++            thread.join(1.0)
++
++    def test_large_content_length_truncated(self):
++        serv = socket.create_server((HOST, 0))
++        self.addCleanup(serv.close)
++
++        def run_server():
++            while True:
++                [conn, address] = serv.accept()
++                with conn:
++                    conn.recv(1024)
++                    if not size:
++                        break
++                    conn.sendall(
++                        b"HTTP/1.1 200 Ok\r\n"
++                        b"Content-Length: %d\r\n"
++                        b"\r\n"
++                        b"Text" % size)
++
++        thread = threading.Thread(target=run_server)
++        thread.start()
++        self.addCleanup(thread.join, 1.0)
++
++        conn = client.HTTPConnection(*serv.getsockname())
++        try:
++            for w in range(18, 65):
++                size = 1 << w
++                conn.request("GET", "/")
++                with conn.getresponse() as response:
++                    self.assertRaises(client.IncompleteRead, response.read)
++                conn.close()
++        finally:
++            conn.close()
++            size = 0
++            conn.request("GET", "/")
++            conn.close()
++            thread.join(1.0)
++
+     def test_putrequest_override_domain_validation(self):
+         """
+         It should be possible to override the default validation
diff -Nru python3.13-3.13.5/debian/patches/CVE-2025-13837.patch 
python3.13-3.13.5/debian/patches/CVE-2025-13837.patch
--- python3.13-3.13.5/debian/patches/CVE-2025-13837.patch       1970-01-01 
01:00:00.000000000 +0100
+++ python3.13-3.13.5/debian/patches/CVE-2025-13837.patch       2026-04-06 
13:51:52.000000000 +0200
@@ -0,0 +1,132 @@
+From 71fa8eb8233b37f16c88b6e3e583b461b205d1ba Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <[email protected]>
+Date: Mon, 1 Dec 2025 16:50:28 +0100
+Subject: [PATCH] [3.13] gh-119342: Fix a potential denial of service in
+ plistlib (GH-119343) (GH-142144)
+
+--- python3.13-3.13.5.orig/Lib/plistlib.py
++++ python3.13-3.13.5/Lib/plistlib.py
+@@ -73,6 +73,9 @@ from xml.parsers.expat import ParserCrea
+ PlistFormat = enum.Enum('PlistFormat', 'FMT_XML FMT_BINARY', module=__name__)
+ globals().update(PlistFormat.__members__)
+ 
++# Data larger than this will be read in chunks, to prevent extreme
++# overallocation.
++_MIN_READ_BUF_SIZE = 1 << 20
+ 
+ class UID:
+     def __init__(self, data):
+@@ -508,12 +511,24 @@ class _BinaryPlistParser:
+ 
+         return tokenL
+ 
++    def _read(self, size):
++        cursize = min(size, _MIN_READ_BUF_SIZE)
++        data = self._fp.read(cursize)
++        while True:
++            if len(data) != cursize:
++                raise InvalidFileException
++            if cursize == size:
++                return data
++            delta = min(cursize, size - cursize)
++            data += self._fp.read(delta)
++            cursize += delta
++
+     def _read_ints(self, n, size):
+-        data = self._fp.read(size * n)
++        data = self._read(size * n)
+         if size in _BINARY_FORMAT:
+             return struct.unpack(f'>{n}{_BINARY_FORMAT[size]}', data)
+         else:
+-            if not size or len(data) != size * n:
++            if not size:
+                 raise InvalidFileException()
+             return tuple(int.from_bytes(data[i: i + size], 'big')
+                          for i in range(0, size * n, size))
+@@ -573,22 +588,16 @@ class _BinaryPlistParser:
+ 
+         elif tokenH == 0x40:  # data
+             s = self._get_size(tokenL)
+-            result = self._fp.read(s)
+-            if len(result) != s:
+-                raise InvalidFileException()
++            result = self._read(s)
+ 
+         elif tokenH == 0x50:  # ascii string
+             s = self._get_size(tokenL)
+-            data = self._fp.read(s)
+-            if len(data) != s:
+-                raise InvalidFileException()
++            data = self._read(s)
+             result = data.decode('ascii')
+ 
+         elif tokenH == 0x60:  # unicode string
+             s = self._get_size(tokenL) * 2
+-            data = self._fp.read(s)
+-            if len(data) != s:
+-                raise InvalidFileException()
++            data = self._read(s)
+             result = data.decode('utf-16be')
+ 
+         elif tokenH == 0x80:  # UID
+--- python3.13-3.13.5.orig/Lib/test/test_plistlib.py
++++ python3.13-3.13.5/Lib/test/test_plistlib.py
+@@ -904,8 +904,7 @@ class TestPlistlib(unittest.TestCase):
+ 
+ class TestBinaryPlistlib(unittest.TestCase):
+ 
+-    @staticmethod
+-    def decode(*objects, offset_size=1, ref_size=1):
++    def build(self, *objects, offset_size=1, ref_size=1):
+         data = [b'bplist00']
+         offset = 8
+         offsets = []
+@@ -917,7 +916,11 @@ class TestBinaryPlistlib(unittest.TestCa
+                            len(objects), 0, offset)
+         data.extend(offsets)
+         data.append(tail)
+-        return plistlib.loads(b''.join(data), fmt=plistlib.FMT_BINARY)
++        return b''.join(data)
++
++    def decode(self, *objects, offset_size=1, ref_size=1):
++        data = self.build(*objects, offset_size=offset_size, 
ref_size=ref_size)
++        return plistlib.loads(data, fmt=plistlib.FMT_BINARY)
+ 
+     def test_nonstandard_refs_size(self):
+         # Issue #21538: Refs and offsets are 24-bit integers
+@@ -1025,6 +1028,34 @@ class TestBinaryPlistlib(unittest.TestCa
+                 with self.assertRaises(plistlib.InvalidFileException):
+                     plistlib.loads(b'bplist00' + data, 
fmt=plistlib.FMT_BINARY)
+ 
++    def test_truncated_large_data(self):
++        self.addCleanup(os_helper.unlink, os_helper.TESTFN)
++        def check(data):
++            with open(os_helper.TESTFN, 'wb') as f:
++                f.write(data)
++            # buffered file
++            with open(os_helper.TESTFN, 'rb') as f:
++                with self.assertRaises(plistlib.InvalidFileException):
++                    plistlib.load(f, fmt=plistlib.FMT_BINARY)
++            # unbuffered file
++            with open(os_helper.TESTFN, 'rb', buffering=0) as f:
++                with self.assertRaises(plistlib.InvalidFileException):
++                    plistlib.load(f, fmt=plistlib.FMT_BINARY)
++        for w in range(20, 64):
++            s = 1 << w
++            # data
++            check(self.build(b'\x4f\x13' + s.to_bytes(8, 'big')))
++            # ascii string
++            check(self.build(b'\x5f\x13' + s.to_bytes(8, 'big')))
++            # unicode string
++            check(self.build(b'\x6f\x13' + s.to_bytes(8, 'big')))
++            # array
++            check(self.build(b'\xaf\x13' + s.to_bytes(8, 'big')))
++            # dict
++            check(self.build(b'\xdf\x13' + s.to_bytes(8, 'big')))
++            # number of objects
++            check(b'bplist00' + struct.pack('>6xBBQQQ', 1, 1, s, 0, 8))
++
+     def test_load_aware_datetime(self):
+         data = 
(b'bplist003B\x04>\xd0d\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00'
+                 
b'\x01\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00'
diff -Nru python3.13-3.13.5/debian/patches/CVE-2025-15282.patch 
python3.13-3.13.5/debian/patches/CVE-2025-15282.patch
--- python3.13-3.13.5/debian/patches/CVE-2025-15282.patch       1970-01-01 
01:00:00.000000000 +0100
+++ python3.13-3.13.5/debian/patches/CVE-2025-15282.patch       2026-04-06 
14:17:20.000000000 +0200
@@ -0,0 +1,44 @@
+From a35ca3be5842505dab74dc0b90b89cde0405017a Mon Sep 17 00:00:00 2001
+From: Seth Michael Larson <[email protected]>
+Date: Sun, 25 Jan 2026 11:06:01 -0600
+Subject: [PATCH] [3.13] gh-143925: Reject control characters in data: URL
+ mediatypes (#144111)
+
+--- python3.13-3.13.5.orig/Lib/test/test_urllib.py
++++ python3.13-3.13.5/Lib/test/test_urllib.py
+@@ -12,6 +12,7 @@ from test import support
+ from test.support import os_helper
+ from test.support import socket_helper
+ from test.support import warnings_helper
++from test.support import control_characters_c0
+ from test.support.testcase import ExtraAssertions
+ import os
+ try:
+@@ -677,6 +678,13 @@ class urlopen_DataTests(unittest.TestCas
+         # missing padding character
+         
self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=')
+ 
++    def test_invalid_mediatype(self):
++        for c0 in control_characters_c0():
++            self.assertRaises(ValueError,urllib.request.urlopen,
++                              f'data:text/html;{c0},data')
++        for c0 in control_characters_c0():
++            self.assertRaises(ValueError,urllib.request.urlopen,
++                              f'data:text/html{c0};base64,ZGF0YQ==')
+ 
+ class urlretrieve_FileTests(unittest.TestCase):
+     """Test urllib.urlretrieve() on local files"""
+--- python3.13-3.13.5.orig/Lib/urllib/request.py
++++ python3.13-3.13.5/Lib/urllib/request.py
+@@ -1630,6 +1630,11 @@ class DataHandler(BaseHandler):
+         scheme, data = url.split(":",1)
+         mediatype, data = data.split(",",1)
+ 
++        # Disallow control characters within mediatype.
++        if re.search(r"[\x00-\x1F\x7F]", mediatype):
++            raise ValueError(
++                "Control characters not allowed in data: mediatype")
++
+         # even base64 encoded data URLs might be quoted so unquote in any 
case:
+         data = unquote_to_bytes(data)
+         if mediatype.endswith(";base64"):
diff -Nru python3.13-3.13.5/debian/patches/CVE-2025-6069.patch 
python3.13-3.13.5/debian/patches/CVE-2025-6069.patch
--- python3.13-3.13.5/debian/patches/CVE-2025-6069.patch        1970-01-01 
01:00:00.000000000 +0100
+++ python3.13-3.13.5/debian/patches/CVE-2025-6069.patch        2026-04-06 
14:13:29.000000000 +0200
@@ -0,0 +1,223 @@
+From 4455cbabf991e202185a25a631af206f60bbc949 Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <[email protected]>
+Date: Fri, 13 Jun 2025 19:20:30 +0200
+Subject: [PATCH] [3.13] gh-135462: Fix quadratic complexity in processing
+ special input in HTMLParser (GH-135464) (GH-135482)
+
+--- python3.13-3.13.5.orig/Lib/html/parser.py
++++ python3.13-3.13.5/Lib/html/parser.py
+@@ -27,6 +27,7 @@ charref = re.compile('&#(?:[0-9]+|[xX][0
+ attr_charref = 
re.compile(r'&(#[0-9]+|#[xX][0-9a-fA-F]+|[a-zA-Z][a-zA-Z0-9]*)[;=]?')
+ 
+ starttagopen = re.compile('<[a-zA-Z]')
++endtagopen = re.compile('</[a-zA-Z]')
+ piclose = re.compile('>')
+ commentclose = re.compile(r'--\s*>')
+ # Note:
+@@ -195,7 +196,7 @@ class HTMLParser(_markupbase.ParserBase)
+                     k = self.parse_pi(i)
+                 elif startswith("<!", i):
+                     k = self.parse_html_declaration(i)
+-                elif (i + 1) < n:
++                elif (i + 1) < n or end:
+                     self.handle_data("<")
+                     k = i + 1
+                 else:
+@@ -203,17 +204,35 @@ class HTMLParser(_markupbase.ParserBase)
+                 if k < 0:
+                     if not end:
+                         break
+-                    k = rawdata.find('>', i + 1)
+-                    if k < 0:
+-                        k = rawdata.find('<', i + 1)
+-                        if k < 0:
+-                            k = i + 1
++                    if starttagopen.match(rawdata, i):  # < + letter
++                        pass
++                    elif startswith("</", i):
++                        if i + 2 == n:
++                            self.handle_data("</")
++                        elif endtagopen.match(rawdata, i):  # </ + letter
++                            pass
++                        else:
++                            # bogus comment
++                            self.handle_comment(rawdata[i+2:])
++                    elif startswith("<!--", i):
++                        j = n
++                        for suffix in ("--!", "--", "-"):
++                            if rawdata.endswith(suffix, i+4):
++                                j -= len(suffix)
++                                break
++                        self.handle_comment(rawdata[i+4:j])
++                    elif startswith("<![CDATA[", i):
++                        self.unknown_decl(rawdata[i+3:])
++                    elif rawdata[i:i+9].lower() == '<!doctype':
++                        self.handle_decl(rawdata[i+2:])
++                    elif startswith("<!", i):
++                        # bogus comment
++                        self.handle_comment(rawdata[i+2:])
++                    elif startswith("<?", i):
++                        self.handle_pi(rawdata[i+2:])
+                     else:
+-                        k += 1
+-                    if self.convert_charrefs and not self.cdata_elem:
+-                        self.handle_data(unescape(rawdata[i:k]))
+-                    else:
+-                        self.handle_data(rawdata[i:k])
++                        raise AssertionError("we should not get here!")
++                    k = n
+                 i = self.updatepos(i, k)
+             elif startswith("&#", i):
+                 match = charref.match(rawdata, i)
+--- python3.13-3.13.5.orig/Lib/test/test_htmlparser.py
++++ python3.13-3.13.5/Lib/test/test_htmlparser.py
+@@ -5,6 +5,7 @@ import pprint
+ import unittest
+ 
+ from unittest.mock import patch
++from test import support
+ 
+ 
+ class EventCollector(html.parser.HTMLParser):
+@@ -430,28 +431,34 @@ text
+                             ('data', '<'),
+                             ('starttag', 'bc<', [('a', None)]),
+                             ('endtag', 'html'),
+-                            ('data', '\n<img src="URL>'),
+-                            ('comment', '/img'),
+-                            ('endtag', 'html<')])
++                            ('data', '\n')])
+ 
+     def test_starttag_junk_chars(self):
++        self._run_check("<", [('data', '<')])
++        self._run_check("<>", [('data', '<>')])
++        self._run_check("< >", [('data', '< >')])
++        self._run_check("< ", [('data', '< ')])
+         self._run_check("</>", [])
++        self._run_check("<$>", [('data', '<$>')])
+         self._run_check("</$>", [('comment', '$')])
+         self._run_check("</", [('data', '</')])
+-        self._run_check("</a", [('data', '</a')])
++        self._run_check("</a", [])
++        self._run_check("</ a>", [('endtag', 'a')])
++        self._run_check("</ a", [('comment', ' a')])
+         self._run_check("<a<a>", [('starttag', 'a<a', [])])
+         self._run_check("</a<a>", [('endtag', 'a<a')])
+-        self._run_check("<!", [('data', '<!')])
+-        self._run_check("<a", [('data', '<a')])
+-        self._run_check("<a foo='bar'", [('data', "<a foo='bar'")])
+-        self._run_check("<a foo='bar", [('data', "<a foo='bar")])
+-        self._run_check("<a foo='>'", [('data', "<a foo='>'")])
+-        self._run_check("<a foo='>", [('data', "<a foo='>")])
++        self._run_check("<!", [('comment', '')])
++        self._run_check("<a", [])
++        self._run_check("<a foo='bar'", [])
++        self._run_check("<a foo='bar", [])
++        self._run_check("<a foo='>'", [])
++        self._run_check("<a foo='>", [])
+         self._run_check("<a$>", [('starttag', 'a$', [])])
+         self._run_check("<a$b>", [('starttag', 'a$b', [])])
+         self._run_check("<a$b/>", [('startendtag', 'a$b', [])])
+         self._run_check("<a$b  >", [('starttag', 'a$b', [])])
+         self._run_check("<a$b  />", [('startendtag', 'a$b', [])])
++        self._run_check("</a$b>", [('endtag', 'a$b')])
+ 
+     def test_slashes_in_starttag(self):
+         self._run_check('<a foo="var"/>', [('startendtag', 'a', [('foo', 
'var')])])
+@@ -576,21 +583,50 @@ text
+         for html, expected in data:
+             self._run_check(html, expected)
+ 
+-    def test_EOF_in_comments_or_decls(self):
++    def test_eof_in_comments(self):
+         data = [
+-            ('<!', [('data', '<!')]),
+-            ('<!-', [('data', '<!-')]),
+-            ('<!--', [('data', '<!--')]),
+-            ('<![', [('data', '<![')]),
+-            ('<![CDATA[', [('data', '<![CDATA[')]),
+-            ('<![CDATA[x', [('data', '<![CDATA[x')]),
+-            ('<!DOCTYPE', [('data', '<!DOCTYPE')]),
+-            ('<!DOCTYPE HTML', [('data', '<!DOCTYPE HTML')]),
++            ('<!--', [('comment', '')]),
++            ('<!---', [('comment', '')]),
++            ('<!----', [('comment', '')]),
++            ('<!-----', [('comment', '-')]),
++            ('<!------', [('comment', '--')]),
++            ('<!----!', [('comment', '')]),
++            ('<!---!', [('comment', '-!')]),
++            ('<!---!>', [('comment', '-!>')]),
++            ('<!--foo', [('comment', 'foo')]),
++            ('<!--foo-', [('comment', 'foo')]),
++            ('<!--foo--', [('comment', 'foo')]),
++            ('<!--foo--!', [('comment', 'foo')]),
++            ('<!--<!--', [('comment', '<!')]),
++            ('<!--<!--!', [('comment', '<!')]),
+         ]
+         for html, expected in data:
+             self._run_check(html, expected)
++
++    def test_eof_in_declarations(self):
++        data = [
++            ('<!', [('comment', '')]),
++            ('<!-', [('comment', '-')]),
++            ('<![', [('comment', '[')]),
++            ('<![CDATA[', [('unknown decl', 'CDATA[')]),
++            ('<![CDATA[x', [('unknown decl', 'CDATA[x')]),
++            ('<![CDATA[x]', [('unknown decl', 'CDATA[x]')]),
++            ('<![CDATA[x]]', [('unknown decl', 'CDATA[x]]')]),
++            ('<!DOCTYPE', [('decl', 'DOCTYPE')]),
++            ('<!DOCTYPE ', [('decl', 'DOCTYPE ')]),
++            ('<!DOCTYPE html', [('decl', 'DOCTYPE html')]),
++            ('<!DOCTYPE html ', [('decl', 'DOCTYPE html ')]),
++            ('<!DOCTYPE html PUBLIC', [('decl', 'DOCTYPE html PUBLIC')]),
++            ('<!DOCTYPE html PUBLIC "foo', [('decl', 'DOCTYPE html PUBLIC 
"foo')]),
++            ('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "foo',
++             [('decl', 'DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" 
"foo')]),
++        ]
++        for html, expected in data:
++            self._run_check(html, expected)
++
+     def test_bogus_comments(self):
+-        html = ('<! not really a comment >'
++        html = ('<!ELEMENT br EMPTY>'
++                '<! not really a comment >'
+                 '<! not a comment either -->'
+                 '<! -- close enough -->'
+                 '<!><!<-- this was an empty comment>'
+@@ -604,6 +640,7 @@ text
+                 '<![CDATA]]>'  # required '[' after CDATA
+         )
+         expected = [
++            ('comment', 'ELEMENT br EMPTY'),
+             ('comment', ' not really a comment '),
+             ('comment', ' not a comment either --'),
+             ('comment', ' -- close enough --'),
+@@ -684,6 +721,26 @@ text
+              ('endtag', 'a'), ('data', ' bar & baz')]
+         )
+ 
++    @support.requires_resource('cpu')
++    def test_eof_no_quadratic_complexity(self):
++        # Each of these examples used to take about an hour.
++        # Now they take a fraction of a second.
++        def check(source):
++            parser = html.parser.HTMLParser()
++            parser.feed(source)
++            parser.close()
++        n = 120_000
++        check("<a " * n)
++        check("<a a=" * n)
++        check("</a " * 14 * n)
++        check("</a a=" * 11 * n)
++        check("<!--" * 4 * n)
++        check("<!" * 60 * n)
++        check("<?" * 19 * n)
++        check("</$" * 15 * n)
++        check("<![CDATA[" * 9 * n)
++        check("<!doctype" * 35 * n)
++
+ 
+ class AttributesTestCase(TestCaseBase):
+ 
diff -Nru python3.13-3.13.5/debian/patches/CVE-2025-6075.patch 
python3.13-3.13.5/debian/patches/CVE-2025-6075.patch
--- python3.13-3.13.5/debian/patches/CVE-2025-6075.patch        1970-01-01 
01:00:00.000000000 +0100
+++ python3.13-3.13.5/debian/patches/CVE-2025-6075.patch        2026-04-06 
14:14:21.000000000 +0200
@@ -0,0 +1,347 @@
+From 9ab89c026aa9611c4b0b67c288b8303a480fe742 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=C5=81ukasz=20Langa?= <[email protected]>
+Date: Fri, 31 Oct 2025 17:58:09 +0100
+Subject: [PATCH] [3.13] gh-136065: Fix quadratic complexity in
+ os.path.expandvars() (GH-134952) (GH-140845)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+--- python3.13-3.13.5.orig/Lib/ntpath.py
++++ python3.13-3.13.5/Lib/ntpath.py
+@@ -400,17 +400,23 @@ def expanduser(path):
+ # XXX With COMMAND.COM you can use any characters in a variable name,
+ # XXX except '^|<>='.
+ 
++_varpattern = r"'[^']*'?|%(%|[^%]*%?)|\$(\$|[-\w]+|\{[^}]*\}?)"
++_varsub = None
++_varsubb = None
++
+ def expandvars(path):
+     """Expand shell variables of the forms $var, ${var} and %var%.
+ 
+     Unknown variables are left unchanged."""
+     path = os.fspath(path)
++    global _varsub, _varsubb
+     if isinstance(path, bytes):
+         if b'$' not in path and b'%' not in path:
+             return path
+-        import string
+-        varchars = bytes(string.ascii_letters + string.digits + '_-', 'ascii')
+-        quote = b'\''
++        if not _varsubb:
++            import re
++            _varsubb = re.compile(_varpattern.encode(), re.ASCII).sub
++        sub = _varsubb
+         percent = b'%'
+         brace = b'{'
+         rbrace = b'}'
+@@ -419,94 +425,44 @@ def expandvars(path):
+     else:
+         if '$' not in path and '%' not in path:
+             return path
+-        import string
+-        varchars = string.ascii_letters + string.digits + '_-'
+-        quote = '\''
++        if not _varsub:
++            import re
++            _varsub = re.compile(_varpattern, re.ASCII).sub
++        sub = _varsub
+         percent = '%'
+         brace = '{'
+         rbrace = '}'
+         dollar = '$'
+         environ = os.environ
+-    res = path[:0]
+-    index = 0
+-    pathlen = len(path)
+-    while index < pathlen:
+-        c = path[index:index+1]
+-        if c == quote:   # no expansion within single quotes
+-            path = path[index + 1:]
+-            pathlen = len(path)
+-            try:
+-                index = path.index(c)
+-                res += c + path[:index + 1]
+-            except ValueError:
+-                res += c + path
+-                index = pathlen - 1
+-        elif c == percent:  # variable or '%'
+-            if path[index + 1:index + 2] == percent:
+-                res += c
+-                index += 1
+-            else:
+-                path = path[index+1:]
+-                pathlen = len(path)
+-                try:
+-                    index = path.index(percent)
+-                except ValueError:
+-                    res += percent + path
+-                    index = pathlen - 1
+-                else:
+-                    var = path[:index]
+-                    try:
+-                        if environ is None:
+-                            value = os.fsencode(os.environ[os.fsdecode(var)])
+-                        else:
+-                            value = environ[var]
+-                    except KeyError:
+-                        value = percent + var + percent
+-                    res += value
+-        elif c == dollar:  # variable or '$$'
+-            if path[index + 1:index + 2] == dollar:
+-                res += c
+-                index += 1
+-            elif path[index + 1:index + 2] == brace:
+-                path = path[index+2:]
+-                pathlen = len(path)
+-                try:
+-                    index = path.index(rbrace)
+-                except ValueError:
+-                    res += dollar + brace + path
+-                    index = pathlen - 1
+-                else:
+-                    var = path[:index]
+-                    try:
+-                        if environ is None:
+-                            value = os.fsencode(os.environ[os.fsdecode(var)])
+-                        else:
+-                            value = environ[var]
+-                    except KeyError:
+-                        value = dollar + brace + var + rbrace
+-                    res += value
+-            else:
+-                var = path[:0]
+-                index += 1
+-                c = path[index:index + 1]
+-                while c and c in varchars:
+-                    var += c
+-                    index += 1
+-                    c = path[index:index + 1]
+-                try:
+-                    if environ is None:
+-                        value = os.fsencode(os.environ[os.fsdecode(var)])
+-                    else:
+-                        value = environ[var]
+-                except KeyError:
+-                    value = dollar + var
+-                res += value
+-                if c:
+-                    index -= 1
++
++    def repl(m):
++        lastindex = m.lastindex
++        if lastindex is None:
++            return m[0]
++        name = m[lastindex]
++        if lastindex == 1:
++            if name == percent:
++                return name
++            if not name.endswith(percent):
++                return m[0]
++            name = name[:-1]
+         else:
+-            res += c
+-        index += 1
+-    return res
++            if name == dollar:
++                return name
++            if name.startswith(brace):
++                if not name.endswith(rbrace):
++                    return m[0]
++                name = name[1:-1]
++
++        try:
++            if environ is None:
++                return os.fsencode(os.environ[os.fsdecode(name)])
++            else:
++                return environ[name]
++        except KeyError:
++            return m[0]
++
++    return sub(repl, path)
+ 
+ 
+ # Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
+--- python3.13-3.13.5.orig/Lib/posixpath.py
++++ python3.13-3.13.5/Lib/posixpath.py
+@@ -284,42 +284,41 @@ def expanduser(path):
+ # This expands the forms $variable and ${variable} only.
+ # Non-existent variables are left unchanged.
+ 
+-_varprog = None
+-_varprogb = None
++_varpattern = r'\$(\w+|\{[^}]*\}?)'
++_varsub = None
++_varsubb = None
+ 
+ def expandvars(path):
+     """Expand shell variables of form $var and ${var}.  Unknown variables
+     are left unchanged."""
+     path = os.fspath(path)
+-    global _varprog, _varprogb
++    global _varsub, _varsubb
+     if isinstance(path, bytes):
+         if b'$' not in path:
+             return path
+-        if not _varprogb:
++        if not _varsubb:
+             import re
+-            _varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII)
+-        search = _varprogb.search
++            _varsubb = re.compile(_varpattern.encode(), re.ASCII).sub
++        sub = _varsubb
+         start = b'{'
+         end = b'}'
+         environ = getattr(os, 'environb', None)
+     else:
+         if '$' not in path:
+             return path
+-        if not _varprog:
++        if not _varsub:
+             import re
+-            _varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII)
+-        search = _varprog.search
++            _varsub = re.compile(_varpattern, re.ASCII).sub
++        sub = _varsub
+         start = '{'
+         end = '}'
+         environ = os.environ
+-    i = 0
+-    while True:
+-        m = search(path, i)
+-        if not m:
+-            break
+-        i, j = m.span(0)
+-        name = m.group(1)
+-        if name.startswith(start) and name.endswith(end):
++
++    def repl(m):
++        name = m[1]
++        if name.startswith(start):
++            if not name.endswith(end):
++                return m[0]
+             name = name[1:-1]
+         try:
+             if environ is None:
+@@ -327,13 +326,11 @@ def expandvars(path):
+             else:
+                 value = environ[name]
+         except KeyError:
+-            i = j
++            return m[0]
+         else:
+-            tail = path[j:]
+-            path = path[:i] + value
+-            i = len(path)
+-            path += tail
+-    return path
++            return value
++
++    return sub(repl, path)
+ 
+ 
+ # Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
+--- python3.13-3.13.5.orig/Lib/test/test_genericpath.py
++++ python3.13-3.13.5/Lib/test/test_genericpath.py
+@@ -7,9 +7,9 @@ import os
+ import sys
+ import unittest
+ import warnings
+-from test.support import (
+-    is_apple, is_emscripten, os_helper, warnings_helper
+-)
++from test import support
++from test.support import os_helper, is_emscripten
++from test.support import warnings_helper
+ from test.support.script_helper import assert_python_ok
+ from test.support.os_helper import FakePath
+ 
+@@ -446,6 +446,19 @@ class CommonTest(GenericTest):
+                   os.fsencode('$bar%s bar' % nonascii))
+             check(b'$spam}bar', os.fsencode('%s}bar' % nonascii))
+ 
++    @support.requires_resource('cpu')
++    def test_expandvars_large(self):
++        expandvars = self.pathmodule.expandvars
++        with os_helper.EnvironmentVarGuard() as env:
++            env.clear()
++            env["A"] = "B"
++            n = 100_000
++            self.assertEqual(expandvars('$A'*n), 'B'*n)
++            self.assertEqual(expandvars('${A}'*n), 'B'*n)
++            self.assertEqual(expandvars('$A!'*n), 'B!'*n)
++            self.assertEqual(expandvars('${A}A'*n), 'BA'*n)
++            self.assertEqual(expandvars('${'*10*n), '${'*10*n)
++
+     def test_abspath(self):
+         self.assertIn("foo", self.pathmodule.abspath("foo"))
+         with warnings.catch_warnings():
+@@ -503,7 +516,7 @@ class CommonTest(GenericTest):
+             # directory (when the bytes name is used).
+             and sys.platform not in {
+                 "win32", "emscripten", "wasi"
+-            } and not is_apple
++            } and not support.is_apple
+         ):
+             name = os_helper.TESTFN_UNDECODABLE
+         elif os_helper.TESTFN_NONASCII:
+--- python3.13-3.13.5.orig/Lib/test/test_ntpath.py
++++ python3.13-3.13.5/Lib/test/test_ntpath.py
+@@ -8,8 +8,7 @@ import unittest
+ import warnings
+ from ntpath import ALLOW_MISSING
+ from test import support
+-from test.support import cpython_only, os_helper
+-from test.support import TestFailed, is_emscripten
++from test.support import os_helper, is_emscripten
+ from test.support.os_helper import FakePath
+ from test import test_genericpath
+ from tempfile import TemporaryFile
+@@ -59,7 +58,7 @@ def tester(fn, wantResult):
+     fn = fn.replace("\\", "\\\\")
+     gotResult = eval(fn)
+     if wantResult != gotResult and _norm(wantResult) != _norm(gotResult):
+-        raise TestFailed("%s should return: %s but returned: %s" \
++        raise support.TestFailed("%s should return: %s but returned: %s" \
+               %(str(fn), str(wantResult), str(gotResult)))
+ 
+     # then with bytes
+@@ -75,7 +74,7 @@ def tester(fn, wantResult):
+         warnings.simplefilter("ignore", DeprecationWarning)
+         gotResult = eval(fn)
+     if _norm(wantResult) != _norm(gotResult):
+-        raise TestFailed("%s should return: %s but returned: %s" \
++        raise support.TestFailed("%s should return: %s but returned: %s" \
+               %(str(fn), str(wantResult), repr(gotResult)))
+ 
+ 
+@@ -1022,6 +1021,19 @@ class TestNtpath(NtpathTestCase):
+             check('%spam%bar', '%sbar' % nonascii)
+             check('%{}%bar'.format(nonascii), 'ham%sbar' % nonascii)
+ 
++    @support.requires_resource('cpu')
++    def test_expandvars_large(self):
++        expandvars = ntpath.expandvars
++        with os_helper.EnvironmentVarGuard() as env:
++            env.clear()
++            env["A"] = "B"
++            n = 100_000
++            self.assertEqual(expandvars('%A%'*n), 'B'*n)
++            self.assertEqual(expandvars('%A%A'*n), 'BA'*n)
++            self.assertEqual(expandvars("''"*n + '%%'), "''"*n + '%')
++            self.assertEqual(expandvars("%%"*n), "%"*n)
++            self.assertEqual(expandvars("$$"*n), "$"*n)
++
+     def test_expanduser(self):
+         tester('ntpath.expanduser("test")', 'test')
+ 
+@@ -1440,7 +1452,7 @@ class TestNtpath(NtpathTestCase):
+         self.assertTrue(os.path.exists(r"\\.\CON"))
+ 
+     @unittest.skipIf(sys.platform != 'win32', "Fast paths are only for win32")
+-    @cpython_only
++    @support.cpython_only
+     def test_fast_paths_in_use(self):
+         # There are fast paths of these functions implemented in 
posixmodule.c.
+         # Confirm that they are being used, and not the Python fallbacks in
diff -Nru python3.13-3.13.5/debian/patches/CVE-2025-8194.patch 
python3.13-3.13.5/debian/patches/CVE-2025-8194.patch
--- python3.13-3.13.5/debian/patches/CVE-2025-8194.patch        1970-01-01 
01:00:00.000000000 +0100
+++ python3.13-3.13.5/debian/patches/CVE-2025-8194.patch        2026-04-06 
14:15:29.000000000 +0200
@@ -0,0 +1,191 @@
+From cdae923ffe187d6ef916c0f665a31249619193fe Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <[email protected]>
+Date: Mon, 28 Jul 2025 17:59:33 +0200
+Subject: [PATCH] [3.13] gh-130577: tarfile now validates archives to ensure
+ member offsets are non-negative (GH-137027) (#137170)
+
+--- python3.13-3.13.5.orig/Lib/tarfile.py
++++ python3.13-3.13.5/Lib/tarfile.py
+@@ -1657,6 +1657,9 @@ class TarInfo(object):
+         """Round up a byte count by BLOCKSIZE and return it,
+            e.g. _block(834) => 1024.
+         """
++        # Only non-negative offsets are allowed
++        if count < 0:
++            raise InvalidHeaderError("invalid offset")
+         blocks, remainder = divmod(count, BLOCKSIZE)
+         if remainder:
+             blocks += 1
+--- python3.13-3.13.5.orig/Lib/test/test_tarfile.py
++++ python3.13-3.13.5/Lib/test/test_tarfile.py
+@@ -50,6 +50,7 @@ bz2name = os.path.join(TEMPDIR, "testtar
+ xzname = os.path.join(TEMPDIR, "testtar.tar.xz")
+ tmpname = os.path.join(TEMPDIR, "tmp.tar")
+ dotlessname = os.path.join(TEMPDIR, "testtar")
++SPACE = b" "
+ 
+ sha256_regtype = (
+     "e09e4bc8b3c9d9177e77256353b36c159f5f040531bbd4b024a8f9b9196c71ce"
+@@ -4597,6 +4598,161 @@ class OverwriteTests(archiver_tests.Over
+         ar.extractall(self.testdir, filter='fully_trusted')
+ 
+ 
++class OffsetValidationTests(unittest.TestCase):
++    tarname = tmpname
++    invalid_posix_header = (
++        # name: 100 bytes
++        tarfile.NUL * tarfile.LENGTH_NAME
++        # mode, space, null terminator: 8 bytes
++        + b"000755" + SPACE + tarfile.NUL
++        # uid, space, null terminator: 8 bytes
++        + b"000001" + SPACE + tarfile.NUL
++        # gid, space, null terminator: 8 bytes
++        + b"000001" + SPACE + tarfile.NUL
++        # size, space: 12 bytes
++        + b"\xff" * 11 + SPACE
++        # mtime, space: 12 bytes
++        + tarfile.NUL * 11 + SPACE
++        # chksum: 8 bytes
++        + b"0011407" + tarfile.NUL
++        # type: 1 byte
++        + tarfile.REGTYPE
++        # linkname: 100 bytes
++        + tarfile.NUL * tarfile.LENGTH_LINK
++        # magic: 6 bytes, version: 2 bytes
++        + tarfile.POSIX_MAGIC
++        # uname: 32 bytes
++        + tarfile.NUL * 32
++        # gname: 32 bytes
++        + tarfile.NUL * 32
++        # devmajor, space, null terminator: 8 bytes
++        + tarfile.NUL * 6 + SPACE + tarfile.NUL
++        # devminor, space, null terminator: 8 bytes
++        + tarfile.NUL * 6 + SPACE + tarfile.NUL
++        # prefix: 155 bytes
++        + tarfile.NUL * tarfile.LENGTH_PREFIX
++        # padding: 12 bytes
++        + tarfile.NUL * 12
++    )
++    invalid_gnu_header = (
++        # name: 100 bytes
++        tarfile.NUL * tarfile.LENGTH_NAME
++        # mode, null terminator: 8 bytes
++        + b"0000755" + tarfile.NUL
++        # uid, null terminator: 8 bytes
++        + b"0000001" + tarfile.NUL
++        # gid, space, null terminator: 8 bytes
++        + b"0000001" + tarfile.NUL
++        # size, space: 12 bytes
++        + b"\xff" * 11 + SPACE
++        # mtime, space: 12 bytes
++        + tarfile.NUL * 11 + SPACE
++        # chksum: 8 bytes
++        + b"0011327" + tarfile.NUL
++        # type: 1 byte
++        + tarfile.REGTYPE
++        # linkname: 100 bytes
++        + tarfile.NUL * tarfile.LENGTH_LINK
++        # magic: 8 bytes
++        + tarfile.GNU_MAGIC
++        # uname: 32 bytes
++        + tarfile.NUL * 32
++        # gname: 32 bytes
++        + tarfile.NUL * 32
++        # devmajor, null terminator: 8 bytes
++        + tarfile.NUL * 8
++        # devminor, null terminator: 8 bytes
++        + tarfile.NUL * 8
++        # padding: 167 bytes
++        + tarfile.NUL * 167
++    )
++    invalid_v7_header = (
++        # name: 100 bytes
++        tarfile.NUL * tarfile.LENGTH_NAME
++        # mode, space, null terminator: 8 bytes
++        + b"000755" + SPACE + tarfile.NUL
++        # uid, space, null terminator: 8 bytes
++        + b"000001" + SPACE + tarfile.NUL
++        # gid, space, null terminator: 8 bytes
++        + b"000001" + SPACE + tarfile.NUL
++        # size, space: 12 bytes
++        + b"\xff" * 11 + SPACE
++        # mtime, space: 12 bytes
++        + tarfile.NUL * 11 + SPACE
++        # chksum: 8 bytes
++        + b"0010070" + tarfile.NUL
++        # type: 1 byte
++        + tarfile.REGTYPE
++        # linkname: 100 bytes
++        + tarfile.NUL * tarfile.LENGTH_LINK
++        # padding: 255 bytes
++        + tarfile.NUL * 255
++    )
++    valid_gnu_header = tarfile.TarInfo("filename").tobuf(tarfile.GNU_FORMAT)
++    data_block = b"\xff" * tarfile.BLOCKSIZE
++
++    def _write_buffer(self, buffer):
++        with open(self.tarname, "wb") as f:
++            f.write(buffer)
++
++    def _get_members(self, ignore_zeros=None):
++        with open(self.tarname, "rb") as f:
++            with tarfile.open(
++                mode="r", fileobj=f, ignore_zeros=ignore_zeros
++            ) as tar:
++                return tar.getmembers()
++
++    def _assert_raises_read_error_exception(self):
++        with self.assertRaisesRegex(
++            tarfile.ReadError, "file could not be opened successfully"
++        ):
++            self._get_members()
++
++    def test_invalid_offset_header_validations(self):
++        for tar_format, invalid_header in (
++            ("posix", self.invalid_posix_header),
++            ("gnu", self.invalid_gnu_header),
++            ("v7", self.invalid_v7_header),
++        ):
++            with self.subTest(format=tar_format):
++                self._write_buffer(invalid_header)
++                self._assert_raises_read_error_exception()
++
++    def test_early_stop_at_invalid_offset_header(self):
++        buffer = self.valid_gnu_header + self.invalid_gnu_header + 
self.valid_gnu_header
++        self._write_buffer(buffer)
++        members = self._get_members()
++        self.assertEqual(len(members), 1)
++        self.assertEqual(members[0].name, "filename")
++        self.assertEqual(members[0].offset, 0)
++
++    def test_ignore_invalid_archive(self):
++        # 3 invalid headers with their respective data
++        buffer = (self.invalid_gnu_header + self.data_block) * 3
++        self._write_buffer(buffer)
++        members = self._get_members(ignore_zeros=True)
++        self.assertEqual(len(members), 0)
++
++    def test_ignore_invalid_offset_headers(self):
++        for first_block, second_block, expected_offset in (
++            (
++                (self.valid_gnu_header),
++                (self.invalid_gnu_header + self.data_block),
++                0,
++            ),
++            (
++                (self.invalid_gnu_header + self.data_block),
++                (self.valid_gnu_header),
++                1024,
++            ),
++        ):
++            self._write_buffer(first_block + second_block)
++            members = self._get_members(ignore_zeros=True)
++            self.assertEqual(len(members), 1)
++            self.assertEqual(members[0].name, "filename")
++            self.assertEqual(members[0].offset, expected_offset)
++
++
+ def setUpModule():
+     os_helper.unlink(TEMPDIR)
+     os.makedirs(TEMPDIR)
diff -Nru python3.13-3.13.5/debian/patches/CVE-2025-8291.patch 
python3.13-3.13.5/debian/patches/CVE-2025-8291.patch
--- python3.13-3.13.5/debian/patches/CVE-2025-8291.patch        1970-01-01 
01:00:00.000000000 +0100
+++ python3.13-3.13.5/debian/patches/CVE-2025-8291.patch        2026-04-06 
14:16:17.000000000 +0200
@@ -0,0 +1,282 @@
+From 333d4a6f4967d3ace91492a39ededbcf3faa76a6 Mon Sep 17 00:00:00 2001
+From: Serhiy Storchaka <[email protected]>
+Date: Tue, 7 Oct 2025 20:55:44 +0300
+Subject: [PATCH] [3.13] gh-139700: Check consistency of the zip64 end of
+ central directory record (GH-139702) (GH-139708)
+
+--- python3.13-3.13.5.orig/Lib/test/test_zipfile/test_core.py
++++ python3.13-3.13.5/Lib/test/test_zipfile/test_core.py
+@@ -884,6 +884,8 @@ class StoredTestZip64InSmallFiles(Abstra
+         self, file_size_64_set=False, file_size_extra=False,
+         compress_size_64_set=False, compress_size_extra=False,
+         header_offset_64_set=False, header_offset_extra=False,
++        extensible_data=b'',
++        end_of_central_dir_size=None, offset_to_end_of_central_dir=None,
+     ):
+         """Generate bytes sequence for a zip with (incomplete) zip64 data.
+ 
+@@ -937,6 +939,12 @@ class StoredTestZip64InSmallFiles(Abstra
+ 
+         central_dir_size = struct.pack('<Q', 58 + 8 * 
len(central_zip64_fields))
+         offset_to_central_dir = struct.pack('<Q', 50 + 8 * 
len(local_zip64_fields))
++        if end_of_central_dir_size is None:
++            end_of_central_dir_size = 44 + len(extensible_data)
++        if offset_to_end_of_central_dir is None:
++            offset_to_end_of_central_dir = (108
++                                            + 8 * len(local_zip64_fields)
++                                            + 8 * len(central_zip64_fields))
+ 
+         local_extra_length = struct.pack("<H", 4 + 8 * 
len(local_zip64_fields))
+         central_extra_length = struct.pack("<H", 4 + 8 * 
len(central_zip64_fields))
+@@ -965,14 +973,17 @@ class StoredTestZip64InSmallFiles(Abstra
+             + filename
+             + central_extra
+             # Zip64 end of central directory
+-            + b"PK\x06\x06,\x00\x00\x00\x00\x00\x00\x00-\x00-"
+-            + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00"
++            + b"PK\x06\x06"
++            + struct.pack('<Q', end_of_central_dir_size)
++            + 
b"-\x00-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00"
+             + b"\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00"
+             + central_dir_size
+             + offset_to_central_dir
++            + extensible_data
+             # Zip64 end of central directory locator
+-            + b"PK\x06\x07\x00\x00\x00\x00l\x00\x00\x00\x00\x00\x00\x00\x01"
+-            + b"\x00\x00\x00"
++            + b"PK\x06\x07\x00\x00\x00\x00"
++            + struct.pack('<Q', offset_to_end_of_central_dir)
++            + b"\x01\x00\x00\x00"
+             # end of central directory
+             + b"PK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x00:\x00\x00\x002\x00"
+             + b"\x00\x00\x00\x00"
+@@ -1003,6 +1014,7 @@ class StoredTestZip64InSmallFiles(Abstra
+         with self.assertRaises(zipfile.BadZipFile) as e:
+             zipfile.ZipFile(io.BytesIO(missing_file_size_extra))
+         self.assertIn('file size', str(e.exception).lower())
++        
self.assertTrue(zipfile.is_zipfile(io.BytesIO(missing_file_size_extra)))
+ 
+         # zip64 file size present, zip64 compress size present, one field in
+         # extra, expecting two, equals missing compress size.
+@@ -1014,6 +1026,7 @@ class StoredTestZip64InSmallFiles(Abstra
+         with self.assertRaises(zipfile.BadZipFile) as e:
+             zipfile.ZipFile(io.BytesIO(missing_compress_size_extra))
+         self.assertIn('compress size', str(e.exception).lower())
++        
self.assertTrue(zipfile.is_zipfile(io.BytesIO(missing_compress_size_extra)))
+ 
+         # zip64 compress size present, no fields in extra, expecting one,
+         # equals missing compress size.
+@@ -1023,6 +1036,7 @@ class StoredTestZip64InSmallFiles(Abstra
+         with self.assertRaises(zipfile.BadZipFile) as e:
+             zipfile.ZipFile(io.BytesIO(missing_compress_size_extra))
+         self.assertIn('compress size', str(e.exception).lower())
++        
self.assertTrue(zipfile.is_zipfile(io.BytesIO(missing_compress_size_extra)))
+ 
+         # zip64 file size present, zip64 compress size present, zip64 header
+         # offset present, two fields in extra, expecting three, equals missing
+@@ -1037,6 +1051,7 @@ class StoredTestZip64InSmallFiles(Abstra
+         with self.assertRaises(zipfile.BadZipFile) as e:
+             zipfile.ZipFile(io.BytesIO(missing_header_offset_extra))
+         self.assertIn('header offset', str(e.exception).lower())
++        
self.assertTrue(zipfile.is_zipfile(io.BytesIO(missing_header_offset_extra)))
+ 
+         # zip64 compress size present, zip64 header offset present, one field
+         # in extra, expecting two, equals missing header offset
+@@ -1049,6 +1064,7 @@ class StoredTestZip64InSmallFiles(Abstra
+         with self.assertRaises(zipfile.BadZipFile) as e:
+             zipfile.ZipFile(io.BytesIO(missing_header_offset_extra))
+         self.assertIn('header offset', str(e.exception).lower())
++        
self.assertTrue(zipfile.is_zipfile(io.BytesIO(missing_header_offset_extra)))
+ 
+         # zip64 file size present, zip64 header offset present, one field in
+         # extra, expecting two, equals missing header offset
+@@ -1061,6 +1077,7 @@ class StoredTestZip64InSmallFiles(Abstra
+         with self.assertRaises(zipfile.BadZipFile) as e:
+             zipfile.ZipFile(io.BytesIO(missing_header_offset_extra))
+         self.assertIn('header offset', str(e.exception).lower())
++        
self.assertTrue(zipfile.is_zipfile(io.BytesIO(missing_header_offset_extra)))
+ 
+         # zip64 header offset present, no fields in extra, expecting one,
+         # equals missing header offset
+@@ -1072,6 +1089,63 @@ class StoredTestZip64InSmallFiles(Abstra
+         with self.assertRaises(zipfile.BadZipFile) as e:
+             zipfile.ZipFile(io.BytesIO(missing_header_offset_extra))
+         self.assertIn('header offset', str(e.exception).lower())
++        
self.assertTrue(zipfile.is_zipfile(io.BytesIO(missing_header_offset_extra)))
++
++    def test_bad_zip64_end_of_central_dir(self):
++        zipdata = self.make_zip64_file(end_of_central_dir_size=0)
++        with self.assertRaisesRegex(zipfile.BadZipFile, 'Corrupt.*record'):
++            zipfile.ZipFile(io.BytesIO(zipdata))
++        self.assertFalse(zipfile.is_zipfile(io.BytesIO(zipdata)))
++
++        zipdata = self.make_zip64_file(end_of_central_dir_size=100)
++        with self.assertRaisesRegex(zipfile.BadZipFile, 'Corrupt.*record'):
++            zipfile.ZipFile(io.BytesIO(zipdata))
++        self.assertFalse(zipfile.is_zipfile(io.BytesIO(zipdata)))
++
++        zipdata = self.make_zip64_file(offset_to_end_of_central_dir=0)
++        with self.assertRaisesRegex(zipfile.BadZipFile, 'Corrupt.*record'):
++            zipfile.ZipFile(io.BytesIO(zipdata))
++        self.assertFalse(zipfile.is_zipfile(io.BytesIO(zipdata)))
++
++        zipdata = self.make_zip64_file(offset_to_end_of_central_dir=1000)
++        with self.assertRaisesRegex(zipfile.BadZipFile, 'Corrupt.*locator'):
++            zipfile.ZipFile(io.BytesIO(zipdata))
++        self.assertFalse(zipfile.is_zipfile(io.BytesIO(zipdata)))
++
++    def test_zip64_end_of_central_dir_record_not_found(self):
++        zipdata = self.make_zip64_file()
++        zipdata = zipdata.replace(b"PK\x06\x06", b'\x00'*4)
++        with self.assertRaisesRegex(zipfile.BadZipFile, 'record not found'):
++            zipfile.ZipFile(io.BytesIO(zipdata))
++        self.assertFalse(zipfile.is_zipfile(io.BytesIO(zipdata)))
++
++        zipdata = self.make_zip64_file(
++            extensible_data=b'\xca\xfe\x04\x00\x00\x00data')
++        zipdata = zipdata.replace(b"PK\x06\x06", b'\x00'*4)
++        with self.assertRaisesRegex(zipfile.BadZipFile, 'record not found'):
++            zipfile.ZipFile(io.BytesIO(zipdata))
++        self.assertFalse(zipfile.is_zipfile(io.BytesIO(zipdata)))
++
++    def test_zip64_extensible_data(self):
++        # These values are what is set in the make_zip64_file method.
++        expected_file_size = 8
++        expected_compress_size = 8
++        expected_header_offset = 0
++        expected_content = b"test1234"
++
++        zipdata = self.make_zip64_file(
++            extensible_data=b'\xca\xfe\x04\x00\x00\x00data')
++        with zipfile.ZipFile(io.BytesIO(zipdata)) as zf:
++            zinfo = zf.infolist()[0]
++            self.assertEqual(zinfo.file_size, expected_file_size)
++            self.assertEqual(zinfo.compress_size, expected_compress_size)
++            self.assertEqual(zinfo.header_offset, expected_header_offset)
++            self.assertEqual(zf.read(zinfo), expected_content)
++        self.assertTrue(zipfile.is_zipfile(io.BytesIO(zipdata)))
++
++        with self.assertRaisesRegex(zipfile.BadZipFile, 'record not found'):
++            zipfile.ZipFile(io.BytesIO(b'prepended' + zipdata))
++        self.assertFalse(zipfile.is_zipfile(io.BytesIO(b'prepended' + 
zipdata)))
+ 
+     def test_generated_valid_zip64_extra(self):
+         # These values are what is set in the make_zip64_file method.
+--- python3.13-3.13.5.orig/Lib/zipfile/__init__.py
++++ python3.13-3.13.5/Lib/zipfile/__init__.py
+@@ -245,7 +245,7 @@ def is_zipfile(filename):
+         else:
+             with open(filename, "rb") as fp:
+                 result = _check_zipfile(fp)
+-    except OSError:
++    except (OSError, BadZipFile):
+         pass
+     return result
+ 
+@@ -253,16 +253,15 @@ def _EndRecData64(fpin, offset, endrec):
+     """
+     Read the ZIP64 end-of-archive records and use that to update endrec
+     """
+-    try:
+-        fpin.seek(offset - sizeEndCentDir64Locator, 2)
+-    except OSError:
+-        # If the seek fails, the file is not large enough to contain a ZIP64
++    offset -= sizeEndCentDir64Locator
++    if offset < 0:
++        # The file is not large enough to contain a ZIP64
+         # end-of-archive record, so just return the end record we were given.
+         return endrec
+-
++    fpin.seek(offset)
+     data = fpin.read(sizeEndCentDir64Locator)
+     if len(data) != sizeEndCentDir64Locator:
+-        return endrec
++        raise OSError("Unknown I/O error")
+     sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, 
data)
+     if sig != stringEndArchive64Locator:
+         return endrec
+@@ -270,16 +269,33 @@ def _EndRecData64(fpin, offset, endrec):
+     if diskno != 0 or disks > 1:
+         raise BadZipFile("zipfiles that span multiple disks are not 
supported")
+ 
+-    # Assume no 'zip64 extensible data'
+-    fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
++    offset -= sizeEndCentDir64
++    if reloff > offset:
++        raise BadZipFile("Corrupt zip64 end of central directory locator")
++    # First, check the assumption that there is no prepended data.
++    fpin.seek(reloff)
++    extrasz = offset - reloff
+     data = fpin.read(sizeEndCentDir64)
+     if len(data) != sizeEndCentDir64:
+-        return endrec
++        raise OSError("Unknown I/O error")
++    if not data.startswith(stringEndArchive64) and reloff != offset:
++        # Since we already have seen the Zip64 EOCD Locator, it's
++        # possible we got here because there is prepended data.
++        # Assume no 'zip64 extensible data'
++        fpin.seek(offset)
++        extrasz = 0
++        data = fpin.read(sizeEndCentDir64)
++        if len(data) != sizeEndCentDir64:
++            raise OSError("Unknown I/O error")
++    if not data.startswith(stringEndArchive64):
++        raise BadZipFile("Zip64 end of central directory record not found")
++
+     sig, sz, create_version, read_version, disk_num, disk_dir, \
+         dircount, dircount2, dirsize, diroffset = \
+         struct.unpack(structEndArchive64, data)
+-    if sig != stringEndArchive64:
+-        return endrec
++    if (diroffset + dirsize != reloff or
++        sz + 12 != sizeEndCentDir64 + extrasz):
++        raise BadZipFile("Corrupt zip64 end of central directory record")
+ 
+     # Update the original endrec using data from the ZIP64 record
+     endrec[_ECD_SIGNATURE] = sig
+@@ -289,6 +305,7 @@ def _EndRecData64(fpin, offset, endrec):
+     endrec[_ECD_ENTRIES_TOTAL] = dircount2
+     endrec[_ECD_SIZE] = dirsize
+     endrec[_ECD_OFFSET] = diroffset
++    endrec[_ECD_LOCATION] = offset - extrasz
+     return endrec
+ 
+ 
+@@ -322,7 +339,7 @@ def _EndRecData(fpin):
+         endrec.append(filesize - sizeEndCentDir)
+ 
+         # Try to read the "Zip64 end of central directory" structure
+-        return _EndRecData64(fpin, -sizeEndCentDir, endrec)
++        return _EndRecData64(fpin, filesize - sizeEndCentDir, endrec)
+ 
+     # Either this is not a ZIP file, or it is a ZIP file with an archive
+     # comment.  Search the end of the file for the "end of central directory"
+@@ -346,8 +363,7 @@ def _EndRecData(fpin):
+         endrec.append(maxCommentStart + start)
+ 
+         # Try to read the "Zip64 end of central directory" structure
+-        return _EndRecData64(fpin, maxCommentStart + start - filesize,
+-                             endrec)
++        return _EndRecData64(fpin, maxCommentStart + start, endrec)
+ 
+     # Unable to find a valid end of central directory structure
+     return None
+@@ -1458,9 +1474,6 @@ class ZipFile:
+ 
+         # "concat" is zero, unless zip was concatenated to another file
+         concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
+-        if endrec[_ECD_SIGNATURE] == stringEndArchive64:
+-            # If Zip64 extension structures are present, account for them
+-            concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
+ 
+         if self.debug > 2:
+             inferred = concat + offset_cd
+@@ -2082,7 +2095,7 @@ class ZipFile:
+                                    " would require ZIP64 extensions")
+             zip64endrec = struct.pack(
+                 structEndArchive64, stringEndArchive64,
+-                44, 45, 45, 0, 0, centDirCount, centDirCount,
++                sizeEndCentDir64 - 12, 45, 45, 0, 0, centDirCount, 
centDirCount,
+                 centDirSize, centDirOffset)
+             self.fp.write(zip64endrec)
+ 
diff -Nru python3.13-3.13.5/debian/patches/CVE-2026-0672.patch 
python3.13-3.13.5/debian/patches/CVE-2026-0672.patch
--- python3.13-3.13.5/debian/patches/CVE-2026-0672.patch        1970-01-01 
01:00:00.000000000 +0100
+++ python3.13-3.13.5/debian/patches/CVE-2026-0672.patch        2026-04-06 
14:18:35.000000000 +0200
@@ -0,0 +1,161 @@
+From 918387e4912d12ffc166c8f2a38df92b6ec756ca Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <[email protected]>
+Date: Sun, 25 Jan 2026 18:09:22 +0100
+Subject: [PATCH] [3.13] gh-143919: Reject control characters in http cookies
+ (#144090)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+--- python3.13-3.13.5.orig/Doc/library/http.cookies.rst
++++ python3.13-3.13.5/Doc/library/http.cookies.rst
+@@ -272,9 +272,9 @@ The following example demonstrates how t
+    Set-Cookie: chips=ahoy
+    Set-Cookie: vienna=finger
+    >>> C = cookies.SimpleCookie()
+-   >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
++   >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=;";')
+    >>> print(C)
+-   Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
++   Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=;"
+    >>> C = cookies.SimpleCookie()
+    >>> C["oreo"] = "doublestuff"
+    >>> C["oreo"]["path"] = "/"
+--- python3.13-3.13.5.orig/Lib/http/cookies.py
++++ python3.13-3.13.5/Lib/http/cookies.py
+@@ -87,9 +87,9 @@ within a string.  Escaped quotation mark
+ such trickeries do not confuse it.
+ 
+    >>> C = cookies.SimpleCookie()
+-   >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
++   >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=;";')
+    >>> print(C)
+-   Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
++   Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=;"
+ 
+ Each element of the Cookie also supports all of the RFC 2109
+ Cookie attributes.  Here's an example which sets the Path
+@@ -170,6 +170,15 @@ _Translator.update({
+ })
+ 
+ _is_legal_key = re.compile('[%s]+' % re.escape(_LegalChars)).fullmatch
++_control_character_re = re.compile(r'[\x00-\x1F\x7F]')
++
++
++def _has_control_character(*val):
++    """Detects control characters within a value.
++    Supports any type, as header values can be any type.
++    """
++    return any(_control_character_re.search(str(v)) for v in val)
++
+ 
+ def _quote(str):
+     r"""Quote a string for use in a cookie header.
+@@ -292,12 +301,16 @@ class Morsel(dict):
+         K = K.lower()
+         if not K in self._reserved:
+             raise CookieError("Invalid attribute %r" % (K,))
++        if _has_control_character(K, V):
++            raise CookieError(f"Control characters are not allowed in cookies 
{K!r} {V!r}")
+         dict.__setitem__(self, K, V)
+ 
+     def setdefault(self, key, val=None):
+         key = key.lower()
+         if key not in self._reserved:
+             raise CookieError("Invalid attribute %r" % (key,))
++        if _has_control_character(key, val):
++            raise CookieError("Control characters are not allowed in cookies 
%r %r" % (key, val,))
+         return dict.setdefault(self, key, val)
+ 
+     def __eq__(self, morsel):
+@@ -333,6 +346,9 @@ class Morsel(dict):
+             raise CookieError('Attempt to set a reserved key %r' % (key,))
+         if not _is_legal_key(key):
+             raise CookieError('Illegal key %r' % (key,))
++        if _has_control_character(key, val, coded_val):
++            raise CookieError(
++                "Control characters are not allowed in cookies %r %r %r" % 
(key, val, coded_val,))
+ 
+         # It's a good key, so save it.
+         self._key = key
+@@ -486,7 +502,10 @@ class BaseCookie(dict):
+         result = []
+         items = sorted(self.items())
+         for key, value in items:
+-            result.append(value.output(attrs, header))
++            value_output = value.output(attrs, header)
++            if _has_control_character(value_output):
++                raise CookieError("Control characters are not allowed in 
cookies")
++            result.append(value_output)
+         return sep.join(result)
+ 
+     __str__ = output
+--- python3.13-3.13.5.orig/Lib/test/test_http_cookies.py
++++ python3.13-3.13.5/Lib/test/test_http_cookies.py
+@@ -18,10 +18,10 @@ class CookieTests(unittest.TestCase, Ext
+              'repr': "<SimpleCookie: chips='ahoy' vienna='finger'>",
+              'output': 'Set-Cookie: chips=ahoy\nSet-Cookie: vienna=finger'},
+ 
+-            {'data': 'keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"',
+-             'dict': {'keebler' : 'E=mc2; L="Loves"; fudge=\012;'},
+-             'repr': '''<SimpleCookie: keebler='E=mc2; L="Loves"; 
fudge=\\n;'>''',
+-             'output': 'Set-Cookie: keebler="E=mc2; L=\\"Loves\\"; 
fudge=\\012;"'},
++            {'data': 'keebler="E=mc2; L=\\"Loves\\"; fudge=;"',
++             'dict': {'keebler' : 'E=mc2; L="Loves"; fudge=;'},
++             'repr': '''<SimpleCookie: keebler='E=mc2; L="Loves"; 
fudge=;'>''',
++             'output': 'Set-Cookie: keebler="E=mc2; L=\\"Loves\\"; fudge=;"'},
+ 
+             # Check illegal cookies that have an '=' char in an unquoted value
+             {'data': 'keebler=E=mc2',
+@@ -564,6 +564,50 @@ class MorselTests(unittest.TestCase):
+                 r'Set-Cookie: key=coded_val; '
+                 r'expires=\w+, \d+ \w+ \d+ \d+:\d+:\d+ \w+')
+ 
++    def test_control_characters(self):
++        for c0 in support.control_characters_c0():
++            morsel = cookies.Morsel()
++
++            # .__setitem__()
++            with self.assertRaises(cookies.CookieError):
++                morsel[c0] = "val"
++            with self.assertRaises(cookies.CookieError):
++                morsel["path"] = c0
++
++            # .setdefault()
++            with self.assertRaises(cookies.CookieError):
++                morsel.setdefault("path", c0)
++            with self.assertRaises(cookies.CookieError):
++                morsel.setdefault(c0, "val")
++
++            # .set()
++            with self.assertRaises(cookies.CookieError):
++                morsel.set(c0, "val", "coded-value")
++            with self.assertRaises(cookies.CookieError):
++                morsel.set("path", c0, "coded-value")
++            with self.assertRaises(cookies.CookieError):
++                morsel.set("path", "val", c0)
++
++    def test_control_characters_output(self):
++        # Tests that even if the internals of Morsel are modified
++        # that a call to .output() has control character safeguards.
++        for c0 in support.control_characters_c0():
++            morsel = cookies.Morsel()
++            morsel.set("key", "value", "coded-value")
++            morsel._key = c0  # Override private variable.
++            cookie = cookies.SimpleCookie()
++            cookie["cookie"] = morsel
++            with self.assertRaises(cookies.CookieError):
++                cookie.output()
++
++            morsel = cookies.Morsel()
++            morsel.set("key", "value", "coded-value")
++            morsel._coded_value = c0  # Override private variable.
++            cookie = cookies.SimpleCookie()
++            cookie["cookie"] = morsel
++            with self.assertRaises(cookies.CookieError):
++                cookie.output()
++
+ 
+ def load_tests(loader, tests, pattern):
+     tests.addTest(doctest.DocTestSuite(cookies))
diff -Nru python3.13-3.13.5/debian/patches/CVE-2026-0865.patch 
python3.13-3.13.5/debian/patches/CVE-2026-0865.patch
--- python3.13-3.13.5/debian/patches/CVE-2026-0865.patch        1970-01-01 
01:00:00.000000000 +0100
+++ python3.13-3.13.5/debian/patches/CVE-2026-0865.patch        2026-04-06 
14:22:01.000000000 +0200
@@ -0,0 +1,156 @@
+From 22e4d55285cee52bc4dbe061324e5f30bd4dee58 Mon Sep 17 00:00:00 2001
+From: "Gregory P. Smith" <[email protected]>
+Date: Sat, 17 Jan 2026 10:23:57 -0800
+Subject: [PATCH] [3.13] gh-143916: Reject control characters in
+ wsgiref.headers.Headers (GH-143917) (#143973)
+
+
+From 83ecd18779f286d872f68bfce175651e407d9fff Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <[email protected]>
+Date: Mon, 23 Feb 2026 14:38:04 +0100
+Subject: [PATCH] [3.13] gh-143916: Allow HTAB in wsgiref header values
+ (GH-144762)
+
+
+--- python3.13-3.13.5.orig/Lib/test/support/__init__.py
++++ python3.13-3.13.5/Lib/test/support/__init__.py
+@@ -2811,3 +2811,10 @@ def linked_to_musl():
+     except (OSError, subprocess.CalledProcessError):
+         return False
+     return ('musl' in stdout)
++
++
++def control_characters_c0() -> list[str]:
++    """Returns a list of C0 control characters as strings.
++    C0 control characters defined as the byte range 0x00-0x1F, and 0x7F.
++    """
++    return [chr(c) for c in range(0x00, 0x20)] + ["\x7F"]
+--- python3.13-3.13.5.orig/Lib/test/test_wsgiref.py
++++ python3.13-3.13.5/Lib/test/test_wsgiref.py
+@@ -1,6 +1,6 @@
+ from unittest import mock
+ from test import support
+-from test.support import socket_helper
++from test.support import socket_helper, control_characters_c0
+ from test.test_httpservers import NoLogRequestHandler
+ from unittest import TestCase
+ from wsgiref.util import setup_testing_defaults
+@@ -503,6 +503,22 @@ class HeaderTests(TestCase):
+             '\r\n'
+         )
+ 
++    def testRaisesControlCharacters(self):
++        for c0 in control_characters_c0():
++            with self.subTest(c0):
++                headers = Headers()
++                self.assertRaises(ValueError, headers.__setitem__, 
f"key{c0}", "val")
++                self.assertRaises(ValueError, headers.add_header, f"key{c0}", 
"val", param="param")
++                # HTAB (\x09) is allowed in values, not names.
++                if c0 == "\t":
++                    headers["key"] = f"val{c0}"
++                    headers.add_header("key", f"val{c0}")
++                    headers.setdefault(f"key", f"val{c0}")
++                else:
++                    self.assertRaises(ValueError, headers.__setitem__, "key", 
f"val{c0}")
++                    self.assertRaises(ValueError, headers.add_header, "key", 
f"val{c0}", param="param")
++                    self.assertRaises(ValueError, headers.add_header, "key", 
"val", param=f"param{c0}")
++
+ class ErrorHandler(BaseCGIHandler):
+     """Simple handler subclass for testing BaseHandler"""
+ 
+--- python3.13-3.13.5.orig/Lib/wsgiref/headers.py
++++ python3.13-3.13.5/Lib/wsgiref/headers.py
+@@ -9,6 +9,11 @@ written by Barry Warsaw.
+ # existence of which force quoting of the parameter value.
+ import re
+ tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
++# Disallowed characters for headers and values.
++# HTAB (\x09) is allowed in header values, but
++# not in header names. (RFC 9110 Section 5.5)
++_name_disallowed_re = re.compile(r'[\x00-\x1F\x7F]')
++_value_disallowed_re = re.compile(r'[\x00-\x08\x0A-\x1F\x7F]')
+ 
+ def _formatparam(param, value=None, quote=1):
+     """Convenience function to format and return a key=value pair.
+@@ -35,12 +40,15 @@ class Headers:
+         self._headers = headers
+         if __debug__:
+             for k, v in headers:
+-                self._convert_string_type(k)
+-                self._convert_string_type(v)
++                self._convert_string_type(k, name=True)
++                self._convert_string_type(v, name=False)
+ 
+-    def _convert_string_type(self, value):
++    def _convert_string_type(self, value, *, name):
+         """Convert/check value type."""
+         if type(value) is str:
++            regex = (_name_disallowed_re if name else _value_disallowed_re)
++            if regex.search(value):
++                raise ValueError("Control characters not allowed in headers")
+             return value
+         raise AssertionError("Header names/values must be"
+             " of type str (got {0})".format(repr(value)))
+@@ -53,14 +61,14 @@ class Headers:
+         """Set the value of a header."""
+         del self[name]
+         self._headers.append(
+-            (self._convert_string_type(name), self._convert_string_type(val)))
++            (self._convert_string_type(name, name=True), 
self._convert_string_type(val, name=False)))
+ 
+     def __delitem__(self,name):
+         """Delete all occurrences of a header, if present.
+ 
+         Does *not* raise an exception if the header is missing.
+         """
+-        name = self._convert_string_type(name.lower())
++        name = self._convert_string_type(name.lower(), name=True)
+         self._headers[:] = [kv for kv in self._headers if kv[0].lower() != 
name]
+ 
+     def __getitem__(self,name):
+@@ -87,13 +95,13 @@ class Headers:
+         fields deleted and re-inserted are always appended to the header list.
+         If no fields exist with the given name, returns an empty list.
+         """
+-        name = self._convert_string_type(name.lower())
++        name = self._convert_string_type(name.lower(), name=True)
+         return [kv[1] for kv in self._headers if kv[0].lower()==name]
+ 
+ 
+     def get(self,name,default=None):
+         """Get the first header value for 'name', or return 'default'"""
+-        name = self._convert_string_type(name.lower())
++        name = self._convert_string_type(name.lower(), name=True)
+         for k,v in self._headers:
+             if k.lower()==name:
+                 return v
+@@ -148,8 +156,8 @@ class Headers:
+         and value 'value'."""
+         result = self.get(name)
+         if result is None:
+-            self._headers.append((self._convert_string_type(name),
+-                self._convert_string_type(value)))
++            self._headers.append((self._convert_string_type(name, name=True),
++                self._convert_string_type(value, name=False)))
+             return value
+         else:
+             return result
+@@ -172,13 +180,13 @@ class Headers:
+         """
+         parts = []
+         if _value is not None:
+-            _value = self._convert_string_type(_value)
++            _value = self._convert_string_type(_value, name=False)
+             parts.append(_value)
+         for k, v in _params.items():
+-            k = self._convert_string_type(k)
++            k = self._convert_string_type(k, name=True)
+             if v is None:
+                 parts.append(k.replace('_', '-'))
+             else:
+-                v = self._convert_string_type(v)
++                v = self._convert_string_type(v, name=False)
+                 parts.append(_formatparam(k.replace('_', '-'), v))
+-        self._headers.append((self._convert_string_type(_name), "; 
".join(parts)))
++        self._headers.append((self._convert_string_type(_name, name=True), "; 
".join(parts)))
diff -Nru python3.13-3.13.5/debian/patches/CVE-2026-1299.patch 
python3.13-3.13.5/debian/patches/CVE-2026-1299.patch
--- python3.13-3.13.5/debian/patches/CVE-2026-1299.patch        1970-01-01 
01:00:00.000000000 +0100
+++ python3.13-3.13.5/debian/patches/CVE-2026-1299.patch        2026-04-06 
14:23:05.000000000 +0200
@@ -0,0 +1,77 @@
+From 0a925ab591c45d6638f37b5e57796f36fa0e56d8 Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <[email protected]>
+Date: Sun, 25 Jan 2026 18:09:26 +0100
+Subject: [PATCH] [3.13] gh-144125: email: verify headers are sound in
+ BytesGenerator (#144181)
+
+--- python3.13-3.13.5.orig/Lib/email/generator.py
++++ python3.13-3.13.5/Lib/email/generator.py
+@@ -22,6 +22,7 @@ NL = '\n'  # XXX: no longer used by the
+ NLCRE = re.compile(r'\r\n|\r|\n')
+ fcre = re.compile(r'^From ', re.MULTILINE)
+ NEWLINE_WITHOUT_FWSP = re.compile(r'\r\n[^ \t]|\r[^ \n\t]|\n[^ \t]')
++NEWLINE_WITHOUT_FWSP_BYTES = re.compile(br'\r\n[^ \t]|\r[^ \n\t]|\n[^ \t]')
+ 
+ 
+ class Generator:
+@@ -429,7 +430,16 @@ class BytesGenerator(Generator):
+         # This is almost the same as the string version, except for handling
+         # strings with 8bit bytes.
+         for h, v in msg.raw_items():
+-            self._fp.write(self.policy.fold_binary(h, v))
++            folded = self.policy.fold_binary(h, v)
++            if self.policy.verify_generated_headers:
++                linesep = self.policy.linesep.encode()
++                if not folded.endswith(linesep):
++                    raise HeaderWriteError(
++                        f'folded header does not end with {linesep!r}: 
{folded!r}')
++                if 
NEWLINE_WITHOUT_FWSP_BYTES.search(folded.removesuffix(linesep)):
++                    raise HeaderWriteError(
++                        f'folded header contains newline: {folded!r}')
++            self._fp.write(folded)
+         # A blank line always separates headers from body
+         self.write(self._NL)
+ 
+--- python3.13-3.13.5.orig/Lib/test/test_email/test_generator.py
++++ python3.13-3.13.5/Lib/test/test_email/test_generator.py
+@@ -313,7 +313,7 @@ class TestGenerator(TestGeneratorBase, T
+         self.assertEqual(s.getvalue(), self.typ(expected))
+ 
+     def test_verify_generated_headers(self):
+-        """gh-121650: by default the generator prevents header injection"""
++        # gh-121650: by default the generator prevents header injection
+         class LiteralHeader(str):
+             name = 'Header'
+             def fold(self, **kwargs):
+@@ -334,6 +334,8 @@ class TestGenerator(TestGeneratorBase, T
+ 
+                 with self.assertRaises(email.errors.HeaderWriteError):
+                     message.as_string()
++                with self.assertRaises(email.errors.HeaderWriteError):
++                    message.as_bytes()
+ 
+ 
+ class TestBytesGenerator(TestGeneratorBase, TestEmailBase):
+--- python3.13-3.13.5.orig/Lib/test/test_email/test_policy.py
++++ python3.13-3.13.5/Lib/test/test_email/test_policy.py
+@@ -296,7 +296,7 @@ class PolicyAPITests(unittest.TestCase):
+                     policy.fold("Subject", subject)
+ 
+     def test_verify_generated_headers(self):
+-        """Turning protection off allows header injection"""
++        # Turning protection off allows header injection
+         policy = email.policy.default.clone(verify_generated_headers=False)
+         for text in (
+             'Header: Value\r\nBad: Injection\r\n',
+@@ -319,6 +319,10 @@ class PolicyAPITests(unittest.TestCase):
+                     message.as_string(),
+                     f"{text}\nBody",
+                 )
++                self.assertEqual(
++                    message.as_bytes(),
++                    f"{text}\nBody".encode(),
++                )
+ 
+     # XXX: Need subclassing tests.
+     # For adding subclassed objects, make sure the usual rules apply (subclass
diff -Nru python3.13-3.13.5/debian/patches/CVE-2026-2297.patch 
python3.13-3.13.5/debian/patches/CVE-2026-2297.patch
--- python3.13-3.13.5/debian/patches/CVE-2026-2297.patch        1970-01-01 
01:00:00.000000000 +0100
+++ python3.13-3.13.5/debian/patches/CVE-2026-2297.patch        2026-04-06 
14:24:01.000000000 +0200
@@ -0,0 +1,18 @@
+From 482d6f8bdba9da3725d272e8bb4a2d25fb6a603e Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <[email protected]>
+Date: Wed, 4 Mar 2026 21:17:04 +0100
+Subject: [PATCH] [3.13] gh-145506: Fixes CVE-2026-2297 by ensuring
+ SourcelessFileLoader uses io.open_code (GH-145507)
+
+--- python3.13-3.13.5.orig/Lib/importlib/_bootstrap_external.py
++++ python3.13-3.13.5/Lib/importlib/_bootstrap_external.py
+@@ -1213,7 +1213,7 @@ class FileLoader:
+ 
+     def get_data(self, path):
+         """Return the data from path as raw bytes."""
+-        if isinstance(self, (SourceLoader, ExtensionFileLoader)):
++        if isinstance(self, (SourceLoader, SourcelessFileLoader, 
ExtensionFileLoader)):
+             with _io.open_code(str(path)) as file:
+                 return file.read()
+         else:
diff -Nru python3.13-3.13.5/debian/patches/series 
python3.13-3.13.5/debian/patches/series
--- python3.13-3.13.5/debian/patches/series     2025-06-25 20:55:22.000000000 
+0200
+++ python3.13-3.13.5/debian/patches/series     2026-04-06 14:23:46.000000000 
+0200
@@ -28,3 +28,17 @@
 no-sphinx-8.2.diff
 Add-Debian-specific-documentation-path-to-IDLE-menu.diff
 revert-logger-disable.diff
+CVE-2025-11468.patch
+CVE-2025-12084.patch
+CVE-2025-13462.patch
+CVE-2025-13836.patch
+CVE-2025-13837.patch
+CVE-2025-6069.patch
+CVE-2025-6075.patch
+CVE-2025-8194.patch
+CVE-2025-8291.patch
+CVE-2025-15282.patch
+CVE-2026-0672.patch
+CVE-2026-0865.patch
+CVE-2026-1299.patch
+CVE-2026-2297.patch

Reply via email to