--- Begin Message ---
Package: release.debian.org
Severity: normal
Tags: buster
User: release.debian....@packages.debian.org
Usertags: pu
Similar to the python2.7 update which landed in Buster 10.2. Debdiff
below. All these are fixed in bullseye/sid (but none had a dedicated
bug)
Cheers,
Moritz
diff -Nru python3.7-3.7.3/debian/changelog python3.7-3.7.3/debian/changelog
--- python3.7-3.7.3/debian/changelog 2019-04-03 07:39:12.000000000 +0200
+++ python3.7-3.7.3/debian/changelog 2019-12-20 18:01:46.000000000 +0100
@@ -1,3 +1,14 @@
+python3.7 (3.7.3-2+deb10u1) buster; urgency=medium
+
+ * CVE-2019-9740
+ * CVE-2019-9947
+ * CVE-2019-9948
+ * CVE-2019-10160
+ * CVE-2019-16056
+ * CVE-2019-16935
+
+ -- Moritz Mühlenhoff <j...@debian.org> Fri, 20 Dec 2019 19:57:59 +0100
+
python3.7 (3.7.3-2) unstable; urgency=medium
* d/p/arm-alignment.diff: Don't allow unaligned memory accesses in the
diff -Nru python3.7-3.7.3/debian/patches/CVE-2019-10160-1.diff
python3.7-3.7.3/debian/patches/CVE-2019-10160-1.diff
--- python3.7-3.7.3/debian/patches/CVE-2019-10160-1.diff 1970-01-01
01:00:00.000000000 +0100
+++ python3.7-3.7.3/debian/patches/CVE-2019-10160-1.diff 2019-12-20
17:57:53.000000000 +0100
@@ -0,0 +1,59 @@
+From 4d723e76e1ad17e9e7d5e828e59bb47e76f2174b Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <31488909+miss-isling...@users.noreply.github.com>
+Date: Tue, 30 Apr 2019 05:21:02 -0700
+Subject: [PATCH] bpo-36742: Fixes handling of pre-normalization characters in
+ urlsplit() (GH-13017)
+
+(cherry picked from commit d537ab0ff9767ef024f26246899728f0116b1ec3)
+
+Co-authored-by: Steve Dower <steve.do...@python.org>
+---
+ Lib/test/test_urlparse.py | 6 ++++++
+ Lib/urllib/parse.py | 11 +++++++----
+ .../Security/2019-04-29-15-34-59.bpo-36742.QCUY0i.rst | 1 +
+ 3 files changed, 14 insertions(+), 4 deletions(-)
+ create mode 100644
Misc/NEWS.d/next/Security/2019-04-29-15-34-59.bpo-36742.QCUY0i.rst
+
+diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
+index e6638aee2244..c26235449461 100644
+--- a/Lib/test/test_urlparse.py
++++ b/Lib/test/test_urlparse.py
+@@ -1001,6 +1001,12 @@ def test_urlsplit_normalization(self):
+ self.assertIn('\u2100', denorm_chars)
+ self.assertIn('\uFF03', denorm_chars)
+
++ # bpo-36742: Verify port separators are ignored when they
++ # existed prior to decomposition
++ urllib.parse.urlsplit('http://\u30d5\u309a:80')
++ with self.assertRaises(ValueError):
++ urllib.parse.urlsplit('http://\u30d5\u309a\ufe1380')
++
+ for scheme in ["http", "https", "ftp"]:
+ for c in denorm_chars:
+ url = "{}://netloc{}false.netloc/path".format(scheme, c)
+diff --git a/Lib/urllib/parse.py b/Lib/urllib/parse.py
+index 1eec26e0f1f3..f5b3487ea9d6 100644
+--- a/Lib/urllib/parse.py
++++ b/Lib/urllib/parse.py
+@@ -397,13 +397,16 @@ def _checknetloc(netloc):
+ # looking for characters like \u2100 that expand to 'a/c'
+ # IDNA uses NFKC equivalence, so normalize for this check
+ import unicodedata
+- netloc2 = unicodedata.normalize('NFKC', netloc)
+- if netloc == netloc2:
++ n = netloc.rpartition('@')[2] # ignore anything to the left of '@'
++ n = n.replace(':', '') # ignore characters already included
++ n = n.replace('#', '') # but not the surrounding text
++ n = n.replace('?', '')
++ netloc2 = unicodedata.normalize('NFKC', n)
++ if n == netloc2:
+ return
+- _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is
okay
+ for c in '/?#@:':
+ if c in netloc2:
+- raise ValueError("netloc '" + netloc2 + "' contains invalid " +
++ raise ValueError("netloc '" + netloc + "' contains invalid " +
+ "characters under NFKC normalization")
+
+ def urlsplit(url, scheme='', allow_fragments=True):
diff -Nru python3.7-3.7.3/debian/patches/CVE-2019-10160-2.diff
python3.7-3.7.3/debian/patches/CVE-2019-10160-2.diff
--- python3.7-3.7.3/debian/patches/CVE-2019-10160-2.diff 1970-01-01
01:00:00.000000000 +0100
+++ python3.7-3.7.3/debian/patches/CVE-2019-10160-2.diff 2019-12-20
17:57:53.000000000 +0100
@@ -0,0 +1,54 @@
+From 250b62acc59921d399f0db47db3b462cd6037e09 Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <31488909+miss-isling...@users.noreply.github.com>
+Date: Tue, 4 Jun 2019 09:15:13 -0700
+Subject: [PATCH] bpo-36742: Corrects fix to handle decomposition in usernames
+ (GH-13812)
+
+(cherry picked from commit 8d0ef0b5edeae52960c7ed05ae8a12388324f87e)
+
+Co-authored-by: Steve Dower <steve.do...@python.org>
+---
+ Lib/test/test_urlparse.py | 11 ++++++-----
+ Lib/urllib/parse.py | 6 +++---
+ 2 files changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
+index c26235449461..68f633ca3a7d 100644
+--- a/Lib/test/test_urlparse.py
++++ b/Lib/test/test_urlparse.py
+@@ -1008,11 +1008,12 @@ def test_urlsplit_normalization(self):
+ urllib.parse.urlsplit('http://\u30d5\u309a\ufe1380')
+
+ for scheme in ["http", "https", "ftp"]:
+- for c in denorm_chars:
+- url = "{}://netloc{}false.netloc/path".format(scheme, c)
+- with self.subTest(url=url, char='{:04X}'.format(ord(c))):
+- with self.assertRaises(ValueError):
+- urllib.parse.urlsplit(url)
++ for netloc in ["netloc{}false.netloc", "n{}user@netloc"]:
++ for c in denorm_chars:
++ url = "{}://{}/path".format(scheme, netloc.format(c))
++ with self.subTest(url=url, char='{:04X}'.format(ord(c))):
++ with self.assertRaises(ValueError):
++ urllib.parse.urlsplit(url)
+
+ class Utility_Tests(unittest.TestCase):
+ """Testcase to test the various utility functions in the urllib."""
+diff --git a/Lib/urllib/parse.py b/Lib/urllib/parse.py
+index f5b3487ea9d6..4c8e77fe3912 100644
+--- a/Lib/urllib/parse.py
++++ b/Lib/urllib/parse.py
+@@ -397,9 +397,9 @@ def _checknetloc(netloc):
+ # looking for characters like \u2100 that expand to 'a/c'
+ # IDNA uses NFKC equivalence, so normalize for this check
+ import unicodedata
+- n = netloc.rpartition('@')[2] # ignore anything to the left of '@'
+- n = n.replace(':', '') # ignore characters already included
+- n = n.replace('#', '') # but not the surrounding text
++ n = netloc.replace('@', '') # ignore characters already included
++ n = n.replace(':', '') # but not the surrounding text
++ n = n.replace('#', '')
+ n = n.replace('?', '')
+ netloc2 = unicodedata.normalize('NFKC', n)
+ if n == netloc2:
diff -Nru python3.7-3.7.3/debian/patches/CVE-2019-16056.diff
python3.7-3.7.3/debian/patches/CVE-2019-16056.diff
--- python3.7-3.7.3/debian/patches/CVE-2019-16056.diff 1970-01-01
01:00:00.000000000 +0100
+++ python3.7-3.7.3/debian/patches/CVE-2019-16056.diff 2019-12-20
17:57:53.000000000 +0100
@@ -0,0 +1,123 @@
+From c48d606adcef395e59fd555496c42203b01dd3e8 Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <31488909+miss-isling...@users.noreply.github.com>
+Date: Fri, 9 Aug 2019 01:30:33 -0700
+Subject: [PATCH] bpo-34155: Dont parse domains containing @ (GH-13079)
+
+Before:
+
+ >>> email.message_from_string('From:
a...@malicious.org@important.com',
policy=email.policy.default)['from'].addresses
+ (Address(display_name='', username='a', domain='malicious.org'),)
+
+ >>> parseaddr('a...@malicious.org@important.com')
+ ('', 'a...@malicious.org')
+
+ After:
+
+ >>> email.message_from_string('From:
a...@malicious.org@important.com',
policy=email.policy.default)['from'].addresses
+ (Address(display_name='', username='', domain=''),)
+
+ >>> parseaddr('a...@malicious.org@important.com')
+ ('', 'a@')
+
+https://bugs.python.org/issue34155
+(cherry picked from commit 8cb65d1381b027f0b09ee36bfed7f35bb4dec9a9)
+
+Co-authored-by: jpic <j...@users.noreply.github.com>
+---
+ Lib/email/_header_value_parser.py | 2 ++
+ Lib/email/_parseaddr.py | 11 ++++++++++-
+ Lib/test/test_email/test__header_value_parser.py | 10 ++++++++++
+ Lib/test/test_email/test_email.py | 14 ++++++++++++++
+ .../2019-05-04-13-33-37.bpo-34155.MJll68.rst | 1 +
+ 5 files changed, 37 insertions(+), 1 deletion(-)
+ create mode 100644
Misc/NEWS.d/next/Security/2019-05-04-13-33-37.bpo-34155.MJll68.rst
+
+diff --git a/Lib/email/_header_value_parser.py
b/Lib/email/_header_value_parser.py
+index 801ae728dd136..c09f4f121ffb6 100644
+--- a/Lib/email/_header_value_parser.py
++++ b/Lib/email/_header_value_parser.py
+@@ -1585,6 +1585,8 @@ def get_domain(value):
+ token, value = get_dot_atom(value)
+ except errors.HeaderParseError:
+ token, value = get_atom(value)
++ if value and value[0] == '@':
++ raise errors.HeaderParseError('Invalid Domain')
+ if leader is not None:
+ token[:0] = [leader]
+ domain.append(token)
+diff --git a/Lib/email/_parseaddr.py b/Lib/email/_parseaddr.py
+index cdfa3729adc79..41ff6f8c000d5 100644
+--- a/Lib/email/_parseaddr.py
++++ b/Lib/email/_parseaddr.py
+@@ -379,7 +379,12 @@ def getaddrspec(self):
+ aslist.append('@')
+ self.pos += 1
+ self.gotonext()
+- return EMPTYSTRING.join(aslist) + self.getdomain()
++ domain = self.getdomain()
++ if not domain:
++ # Invalid domain, return an empty address instead of returning a
++ # local part to denote failed parsing.
++ return EMPTYSTRING
++ return EMPTYSTRING.join(aslist) + domain
+
+ def getdomain(self):
+ """Get the complete domain name from an address."""
+@@ -394,6 +399,10 @@ def getdomain(self):
+ elif self.field[self.pos] == '.':
+ self.pos += 1
+ sdlist.append('.')
++ elif self.field[self.pos] == '@':
++ # bpo-34155: Don't parse domains with two `@` like
++ # `a...@malicious.org@important.com`.
++ return EMPTYSTRING
+ elif self.field[self.pos] in self.atomends:
+ break
+ else:
+diff --git a/Lib/test/test_email/test__header_value_parser.py
b/Lib/test/test_email/test__header_value_parser.py
+index 9e862feab10c9..0f19f8bcc2e0f 100644
+--- a/Lib/test/test_email/test__header_value_parser.py
++++ b/Lib/test/test_email/test__header_value_parser.py
+@@ -1448,6 +1448,16 @@ def test_get_addr_spec_dot_atom(self):
+ self.assertEqual(addr_spec.domain, 'example.com')
+ self.assertEqual(addr_spec.addr_spec, 'star.a.s...@example.com')
+
++ def test_get_addr_spec_multiple_domains(self):
++ with self.assertRaises(errors.HeaderParseError):
++ parser.get_addr_spec('s...@a.star@example.com')
++
++ with self.assertRaises(errors.HeaderParseError):
++ parser.get_addr_spec('star@a...@example.com')
++
++ with self.assertRaises(errors.HeaderParseError):
++ parser.get_addr_spec('star@172.17....@example.com')
++
+ # get_obs_route
+
+ def test_get_obs_route_simple(self):
+diff --git a/Lib/test/test_email/test_email.py
b/Lib/test/test_email/test_email.py
+index c29cc56203b1f..aa775881c5521 100644
+--- a/Lib/test/test_email/test_email.py
++++ b/Lib/test/test_email/test_email.py
+@@ -3041,6 +3041,20 @@ def test_parseaddr_empty(self):
+ self.assertEqual(utils.parseaddr('<>'), ('', ''))
+ self.assertEqual(utils.formataddr(utils.parseaddr('<>')), '')
+
++ def test_parseaddr_multiple_domains(self):
++ self.assertEqual(
++ utils.parseaddr('a@b@c'),
++ ('', '')
++ )
++ self.assertEqual(
++ utils.parseaddr('a@b.c@c'),
++ ('', '')
++ )
++ self.assertEqual(
++ utils.parseaddr('a@172.17.0.1@c'),
++ ('', '')
++ )
++
+ def test_noquote_dump(self):
+ self.assertEqual(
+ utils.formataddr(('A Silly Person', 'per...@dom.ain')),
diff -Nru python3.7-3.7.3/debian/patches/CVE-2019-16935.diff
python3.7-3.7.3/debian/patches/CVE-2019-16935.diff
--- python3.7-3.7.3/debian/patches/CVE-2019-16935.diff 1970-01-01
01:00:00.000000000 +0100
+++ python3.7-3.7.3/debian/patches/CVE-2019-16935.diff 2019-12-20
17:57:53.000000000 +0100
@@ -0,0 +1,72 @@
+From 39a0c7555530e31c6941a78da19b6a5b61170687 Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <31488909+miss-isling...@users.noreply.github.com>
+Date: Fri, 27 Sep 2019 13:18:14 -0700
+Subject: [PATCH] bpo-38243, xmlrpc.server: Escape the server_title (GH-16373)
+
+Escape the server title of xmlrpc.server.DocXMLRPCServer
+when rendering the document page as HTML.
+(cherry picked from commit e8650a4f8c7fb76f570d4ca9c1fbe44e91c8dfaa)
+
+Co-authored-by: Dong-hee Na <donghee.n...@gmail.com>
+---
+ Lib/test/test_docxmlrpc.py | 16 ++++++++++++++++
+ Lib/xmlrpc/server.py | 3 ++-
+ .../2019-09-25-13-21-09.bpo-38243.1pfz24.rst | 3 +++
+ 3 files changed, 21 insertions(+), 1 deletion(-)
+ create mode 100644
Misc/NEWS.d/next/Security/2019-09-25-13-21-09.bpo-38243.1pfz24.rst
+
+diff --git a/Lib/test/test_docxmlrpc.py b/Lib/test/test_docxmlrpc.py
+index f077f05f5b4f7..38215659b67d9 100644
+--- a/Lib/test/test_docxmlrpc.py
++++ b/Lib/test/test_docxmlrpc.py
+@@ -1,5 +1,6 @@
+ from xmlrpc.server import DocXMLRPCServer
+ import http.client
++import re
+ import sys
+ import threading
+ from test import support
+@@ -193,6 +194,21 @@ def test_annotations(self):
+ b'method_annotation</strong></a>(x: bytes)</dt></dl>'),
+ response.read())
+
++ def test_server_title_escape(self):
++ # bpo-38243: Ensure that the server title and documentation
++ # are escaped for HTML.
++ self.serv.set_server_title('test_title<script>')
++ self.serv.set_server_documentation('test_documentation<script>')
++ self.assertEqual('test_title<script>', self.serv.server_title)
++ self.assertEqual('test_documentation<script>',
++ self.serv.server_documentation)
++
++ generated = self.serv.generate_html_documentation()
++ title = re.search(r'<title>(.+?)</title>', generated).group()
++ documentation = re.search(r'<p><tt>(.+?)</tt></p>', generated).group()
++ self.assertEqual('<title>Python: test_title<script></title>',
title)
++ self.assertEqual('<p><tt>test_documentation<script></tt></p>',
documentation)
++
+
+ if __name__ == '__main__':
+ unittest.main()
+diff --git a/Lib/xmlrpc/server.py b/Lib/xmlrpc/server.py
+index f1c467eb1b2b8..32aba4df4c7eb 100644
+--- a/Lib/xmlrpc/server.py
++++ b/Lib/xmlrpc/server.py
+@@ -108,6 +108,7 @@ def export_add(self, x, y):
+ from http.server import BaseHTTPRequestHandler
+ from functools import partial
+ from inspect import signature
++import html
+ import http.server
+ import socketserver
+ import sys
+@@ -894,7 +895,7 @@ def generate_html_documentation(self):
+ methods
+ )
+
+- return documenter.page(self.server_title, documentation)
++ return documenter.page(html.escape(self.server_title), documentation)
+
+ class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
+ """XML-RPC and documentation request handler class.
diff -Nru python3.7-3.7.3/debian/patches/CVE-2019-9740_CVE-2019-9947.diff
python3.7-3.7.3/debian/patches/CVE-2019-9740_CVE-2019-9947.diff
--- python3.7-3.7.3/debian/patches/CVE-2019-9740_CVE-2019-9947.diff
1970-01-01 01:00:00.000000000 +0100
+++ python3.7-3.7.3/debian/patches/CVE-2019-9740_CVE-2019-9947.diff
2019-12-20 17:57:53.000000000 +0100
@@ -0,0 +1,140 @@
+From 7e200e0763f5b71c199aaf98bd5588f291585619 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Miro=20Hron=C4=8Dok?= <m...@hroncok.cz>
+Date: Tue, 7 May 2019 17:28:47 +0200
+Subject: [PATCH] bpo-30458: Disallow control chars in http URLs. (GH-12755)
+ (GH-13154)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Disallow control chars in http URLs in urllib.urlopen. This addresses a
potential security problem for applications that do not sanity check their URLs
where http request headers could be injected.
+
+Disable https related urllib tests on a build without ssl (GH-13032)
+These tests require an SSL enabled build. Skip these tests when python is
built without SSL to fix test failures.
+
+Use http.client.InvalidURL instead of ValueError as the new error case's
exception. (GH-13044)
+
+Backport Co-Authored-By: Miro Hrončok <m...@hroncok.cz>
+---
+ Lib/http/client.py | 15 ++++++
+ Lib/test/test_urllib.py | 53 +++++++++++++++++++
+ Lib/test/test_xmlrpc.py | 7 ++-
+ .../2019-04-10-08-53-30.bpo-30458.51E-DA.rst | 1 +
+ 4 files changed, 75 insertions(+), 1 deletion(-)
+ create mode 100644
Misc/NEWS.d/next/Security/2019-04-10-08-53-30.bpo-30458.51E-DA.rst
+
+diff --git a/Lib/http/client.py b/Lib/http/client.py
+index 1de151c38e92f..2afd452fe30fa 100644
+--- a/Lib/http/client.py
++++ b/Lib/http/client.py
+@@ -140,6 +140,16 @@
+ _is_legal_header_name = re.compile(rb'[^:\s][^:\r\n]*').fullmatch
+ _is_illegal_header_value = re.compile(rb'\n(?![ \t])|\r(?![ \t\n])').search
+
++# These characters are not allowed within HTTP URL paths.
++# See https://tools.ietf.org/html/rfc3986#section-3.3 and the
++# https://tools.ietf.org/html/rfc3986#appendix-A pchar definition.
++# Prevents CVE-2019-9740. Includes control characters such as \r\n.
++# We don't restrict chars above \x7f as putrequest() limits us to ASCII.
++_contains_disallowed_url_pchar_re = re.compile('[\x00-\x20\x7f]')
++# Arguably only these _should_ allowed:
++# _is_allowed_url_pchars_re =
re.compile(r"^[/!$&'()*+,;=:@%a-zA-Z0-9._~-]+$")
++# We are more lenient for assumed real world compatibility purposes.
++
+ # We always set the Content-Length header for these methods because some
+ # servers will otherwise respond with a 411
+ _METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
+@@ -1101,6 +1111,11 @@ def putrequest(self, method, url, skip_host=False,
+ self._method = method
+ if not url:
+ url = '/'
++ # Prevent CVE-2019-9740.
++ match = _contains_disallowed_url_pchar_re.search(url)
++ if match:
++ raise InvalidURL(f"URL can't contain control characters. {url!r} "
++ f"(found at least {match.group()!r})")
+ request = '%s %s %s' % (method, url, self._http_vsn_str)
+
+ # Non-ASCII characters should have been eliminated earlier
+diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
+index 2ac73b58d8320..7214492eca9d8 100644
+--- a/Lib/test/test_urllib.py
++++ b/Lib/test/test_urllib.py
+@@ -329,6 +329,59 @@ def test_willclose(self):
+ finally:
+ self.unfakehttp()
+
++ @unittest.skipUnless(ssl, "ssl module required")
++ def test_url_with_control_char_rejected(self):
++ for char_no in list(range(0, 0x21)) + [0x7f]:
++ char = chr(char_no)
++ schemeless_url = f"//localhost:7777/test{char}/"
++ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
++ try:
++ # We explicitly test urllib.request.urlopen() instead of the
top
++ # level 'def urlopen()' function defined in this... (quite
ugly)
++ # test suite. They use different url opening codepaths.
Plain
++ # urlopen uses FancyURLOpener which goes via a codepath that
++ # calls urllib.parse.quote() on the URL which makes all of the
++ # above attempts at injection within the url _path_ safe.
++ escaped_char_repr = repr(char).replace('\\', r'\\')
++ InvalidURL = http.client.InvalidURL
++ with self.assertRaisesRegex(
++ InvalidURL, f"contain control.*{escaped_char_repr}"):
++ urllib.request.urlopen(f"http:{schemeless_url}")
++ with self.assertRaisesRegex(
++ InvalidURL, f"contain control.*{escaped_char_repr}"):
++ urllib.request.urlopen(f"https:{schemeless_url}")
++ # This code path quotes the URL so there is no injection.
++ resp = urlopen(f"http:{schemeless_url}")
++ self.assertNotIn(char, resp.geturl())
++ finally:
++ self.unfakehttp()
++
++ @unittest.skipUnless(ssl, "ssl module required")
++ def test_url_with_newline_header_injection_rejected(self):
++ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
++ host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST:
123"
++ schemeless_url = "//" + host + ":8080/test/?test=a"
++ try:
++ # We explicitly test urllib.request.urlopen() instead of the top
++ # level 'def urlopen()' function defined in this... (quite ugly)
++ # test suite. They use different url opening codepaths. Plain
++ # urlopen uses FancyURLOpener which goes via a codepath that
++ # calls urllib.parse.quote() on the URL which makes all of the
++ # above attempts at injection within the url _path_ safe.
++ InvalidURL = http.client.InvalidURL
++ with self.assertRaisesRegex(
++ InvalidURL, r"contain control.*\\r.*(found at least . .)"):
++ urllib.request.urlopen(f"http:{schemeless_url}")
++ with self.assertRaisesRegex(InvalidURL, r"contain control.*\\n"):
++ urllib.request.urlopen(f"https:{schemeless_url}")
++ # This code path quotes the URL so there is no injection.
++ resp = urlopen(f"http:{schemeless_url}")
++ self.assertNotIn(' ', resp.geturl())
++ self.assertNotIn('\r', resp.geturl())
++ self.assertNotIn('\n', resp.geturl())
++ finally:
++ self.unfakehttp()
++
+ def test_read_0_9(self):
+ # "0.9" response accepted (but not "simple responses" without
+ # a status line)
+diff --git a/Lib/test/test_xmlrpc.py b/Lib/test/test_xmlrpc.py
+index 32263f7f0b3b0..0e002ec4ef9f8 100644
+--- a/Lib/test/test_xmlrpc.py
++++ b/Lib/test/test_xmlrpc.py
+@@ -945,7 +945,12 @@ def test_unicode_host(self):
+ def test_partial_post(self):
+ # Check that a partial POST doesn't make the server loop: issue
#14001.
+ conn = http.client.HTTPConnection(ADDR, PORT)
+- conn.request('POST', '/RPC2 HTTP/1.0\r\nContent-Length:
100\r\n\r\nbye')
++ conn.send('POST /RPC2 HTTP/1.0\r\n'
++ 'Content-Length: 100\r\n\r\n'
++ 'bye HTTP/1.1\r\n'
++ f'Host: {ADDR}:{PORT}\r\n'
++ 'Accept-Encoding: identity\r\n'
++ 'Content-Length: 0\r\n\r\n'.encode('ascii'))
+ conn.close()
+
+ def test_context_manager(self):
diff -Nru python3.7-3.7.3/debian/patches/CVE-2019-9948.diff
python3.7-3.7.3/debian/patches/CVE-2019-9948.diff
--- python3.7-3.7.3/debian/patches/CVE-2019-9948.diff 1970-01-01
01:00:00.000000000 +0100
+++ python3.7-3.7.3/debian/patches/CVE-2019-9948.diff 2019-12-20
17:57:53.000000000 +0100
@@ -0,0 +1,67 @@
+From 34bab215596671d0dec2066ae7d7450cd73f638b Mon Sep 17 00:00:00 2001
+From: Victor Stinner <vstin...@redhat.com>
+Date: Wed, 22 May 2019 23:28:28 +0200
+Subject: [PATCH] bpo-35907, CVE-2019-9948: urllib rejects local_file:// scheme
+ (GH-13474) (GH-13505)
+
+CVE-2019-9948: Avoid file reading as disallowing the unnecessary URL
+scheme in URLopener().open() and URLopener().retrieve()
+of urllib.request.
+
+Co-Authored-By: SH <push0...@gmail.com>
+(cherry picked from commit 0c2b6a3943aa7b022e8eb4bfd9bffcddebf9a587)
+---
+ Lib/test/test_urllib.py | 18 ++++++++++++++++++
+ Lib/urllib/request.py | 2 +-
+ 3 files changed, 21 insertions(+), 1 deletion(-)
+ create mode 100644
Misc/NEWS.d/next/Security/2019-05-21-23-20-18.bpo-35907.NC_zNK.rst
+
+diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
+index 7214492eca9d..7ec365b928a5 100644
+--- a/Lib/test/test_urllib.py
++++ b/Lib/test/test_urllib.py
+@@ -16,6 +16,7 @@
+ ssl = None
+ import sys
+ import tempfile
++import warnings
+ from nturl2path import url2pathname, pathname2url
+
+ from base64 import b64encode
+@@ -1463,6 +1464,23 @@ def open_spam(self, url):
+ "spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
+ "//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
+
++ def test_local_file_open(self):
++ # bpo-35907, CVE-2019-9948: urllib must reject local_file:// scheme
++ class DummyURLopener(urllib.request.URLopener):
++ def open_local_file(self, url):
++ return url
++
++ with warnings.catch_warnings(record=True):
++ warnings.simplefilter("ignore", DeprecationWarning)
++
++ for url in ('local_file://example', 'local-file://example'):
++ self.assertRaises(OSError, urllib.request.urlopen, url)
++ self.assertRaises(OSError, urllib.request.URLopener().open,
url)
++ self.assertRaises(OSError,
urllib.request.URLopener().retrieve, url)
++ self.assertRaises(OSError, DummyURLopener().open, url)
++ self.assertRaises(OSError, DummyURLopener().retrieve, url)
++
++
+ # Just commented them out.
+ # Can't really tell why keep failing in windows and sparc.
+ # Everywhere else they work ok, but on those machines, sometimes
+diff --git a/Lib/urllib/request.py b/Lib/urllib/request.py
+index d38f725d8e9f..37b254862887 100644
+--- a/Lib/urllib/request.py
++++ b/Lib/urllib/request.py
+@@ -1746,7 +1746,7 @@ def open(self, fullurl, data=None):
+ name = 'open_' + urltype
+ self.type = urltype
+ name = name.replace('-', '_')
+- if not hasattr(self, name):
++ if not hasattr(self, name) or name == 'open_local_file':
+ if proxy:
+ return self.open_unknown_proxy(proxy, fullurl, data)
+ else:
diff -Nru python3.7-3.7.3/debian/patches/series
python3.7-3.7.3/debian/patches/series
--- python3.7-3.7.3/debian/patches/series 2019-04-03 07:36:11.000000000
+0200
+++ python3.7-3.7.3/debian/patches/series 2019-12-20 17:58:50.000000000
+0100
@@ -37,3 +37,9 @@
build-math-object.diff
issue35998.diff
arm-alignment.diff
+CVE-2019-9740_CVE-2019-9947.diff
+CVE-2019-9948.diff
+CVE-2019-10160-1.diff
+CVE-2019-10160-2.diff
+CVE-2019-16056.diff
+CVE-2019-16935.diff
--- End Message ---