L %!s(int64=2) %!d(string=hai) anos
achega
82ea9ac2ce
Modificáronse 100 ficheiros con 14468 adicións e 0 borrados
  1. BIN=BIN
      env/Lib/site-packages/__pycache__/easy_install.cpython-39.pyc
  2. BIN=BIN
      env/Lib/site-packages/__pycache__/six.cpython-39.pyc
  3. 46 0
      env/Lib/site-packages/asyncio-3.4.3.dist-info/DESCRIPTION.rst
  4. 1 0
      env/Lib/site-packages/asyncio-3.4.3.dist-info/INSTALLER
  5. 59 0
      env/Lib/site-packages/asyncio-3.4.3.dist-info/METADATA
  6. 53 0
      env/Lib/site-packages/asyncio-3.4.3.dist-info/RECORD
  7. 0 0
      env/Lib/site-packages/asyncio-3.4.3.dist-info/REQUESTED
  8. 5 0
      env/Lib/site-packages/asyncio-3.4.3.dist-info/WHEEL
  9. 1 0
      env/Lib/site-packages/asyncio-3.4.3.dist-info/metadata.json
  10. 1 0
      env/Lib/site-packages/asyncio-3.4.3.dist-info/top_level.txt
  11. 50 0
      env/Lib/site-packages/asyncio/__init__.py
  12. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/__init__.cpython-39.pyc
  13. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/base_subprocess.cpython-39.pyc
  14. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/constants.cpython-39.pyc
  15. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/coroutines.cpython-39.pyc
  16. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/events.cpython-39.pyc
  17. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/futures.cpython-39.pyc
  18. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/locks.cpython-39.pyc
  19. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/log.cpython-39.pyc
  20. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/proactor_events.cpython-39.pyc
  21. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/protocols.cpython-39.pyc
  22. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/queues.cpython-39.pyc
  23. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/selector_events.cpython-39.pyc
  24. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/selectors.cpython-39.pyc
  25. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/sslproto.cpython-39.pyc
  26. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/streams.cpython-39.pyc
  27. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/subprocess.cpython-39.pyc
  28. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/test_support.cpython-39.pyc
  29. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/test_utils.cpython-39.pyc
  30. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/transports.cpython-39.pyc
  31. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/unix_events.cpython-39.pyc
  32. BIN=BIN
      env/Lib/site-packages/asyncio/__pycache__/windows_utils.cpython-39.pyc
  33. 1179 0
      env/Lib/site-packages/asyncio/base_events.py
  34. 270 0
      env/Lib/site-packages/asyncio/base_subprocess.py
  35. 7 0
      env/Lib/site-packages/asyncio/constants.py
  36. 199 0
      env/Lib/site-packages/asyncio/coroutines.py
  37. 597 0
      env/Lib/site-packages/asyncio/events.py
  38. 409 0
      env/Lib/site-packages/asyncio/futures.py
  39. 469 0
      env/Lib/site-packages/asyncio/locks.py
  40. 7 0
      env/Lib/site-packages/asyncio/log.py
  41. 547 0
      env/Lib/site-packages/asyncio/proactor_events.py
  42. 134 0
      env/Lib/site-packages/asyncio/protocols.py
  43. 302 0
      env/Lib/site-packages/asyncio/queues.py
  44. 1070 0
      env/Lib/site-packages/asyncio/selector_events.py
  45. 594 0
      env/Lib/site-packages/asyncio/selectors.py
  46. 668 0
      env/Lib/site-packages/asyncio/sslproto.py
  47. 486 0
      env/Lib/site-packages/asyncio/streams.py
  48. 215 0
      env/Lib/site-packages/asyncio/subprocess.py
  49. 667 0
      env/Lib/site-packages/asyncio/tasks.py
  50. 305 0
      env/Lib/site-packages/asyncio/test_support.py
  51. 446 0
      env/Lib/site-packages/asyncio/test_utils.py
  52. 300 0
      env/Lib/site-packages/asyncio/transports.py
  53. 998 0
      env/Lib/site-packages/asyncio/unix_events.py
  54. 774 0
      env/Lib/site-packages/asyncio/windows_events.py
  55. 223 0
      env/Lib/site-packages/asyncio/windows_utils.py
  56. 5 0
      env/Lib/site-packages/easy_install.py
  57. 1 0
      env/Lib/site-packages/pip-23.0.1.dist-info/INSTALLER
  58. 20 0
      env/Lib/site-packages/pip-23.0.1.dist-info/LICENSE.txt
  59. 88 0
      env/Lib/site-packages/pip-23.0.1.dist-info/METADATA
  60. 1002 0
      env/Lib/site-packages/pip-23.0.1.dist-info/RECORD
  61. 0 0
      env/Lib/site-packages/pip-23.0.1.dist-info/REQUESTED
  62. 5 0
      env/Lib/site-packages/pip-23.0.1.dist-info/WHEEL
  63. 4 0
      env/Lib/site-packages/pip-23.0.1.dist-info/entry_points.txt
  64. 1 0
      env/Lib/site-packages/pip-23.0.1.dist-info/top_level.txt
  65. 13 0
      env/Lib/site-packages/pip/__init__.py
  66. 31 0
      env/Lib/site-packages/pip/__main__.py
  67. 50 0
      env/Lib/site-packages/pip/__pip-runner__.py
  68. BIN=BIN
      env/Lib/site-packages/pip/__pycache__/__init__.cpython-39.pyc
  69. BIN=BIN
      env/Lib/site-packages/pip/__pycache__/__main__.cpython-39.pyc
  70. BIN=BIN
      env/Lib/site-packages/pip/__pycache__/__pip-runner__.cpython-39.pyc
  71. 19 0
      env/Lib/site-packages/pip/_internal/__init__.py
  72. BIN=BIN
      env/Lib/site-packages/pip/_internal/__pycache__/__init__.cpython-39.pyc
  73. BIN=BIN
      env/Lib/site-packages/pip/_internal/__pycache__/build_env.cpython-39.pyc
  74. BIN=BIN
      env/Lib/site-packages/pip/_internal/__pycache__/cache.cpython-39.pyc
  75. BIN=BIN
      env/Lib/site-packages/pip/_internal/__pycache__/configuration.cpython-39.pyc
  76. BIN=BIN
      env/Lib/site-packages/pip/_internal/__pycache__/exceptions.cpython-39.pyc
  77. BIN=BIN
      env/Lib/site-packages/pip/_internal/__pycache__/main.cpython-39.pyc
  78. BIN=BIN
      env/Lib/site-packages/pip/_internal/__pycache__/pyproject.cpython-39.pyc
  79. BIN=BIN
      env/Lib/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-39.pyc
  80. BIN=BIN
      env/Lib/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-39.pyc
  81. 311 0
      env/Lib/site-packages/pip/_internal/build_env.py
  82. 293 0
      env/Lib/site-packages/pip/_internal/cache.py
  83. 4 0
      env/Lib/site-packages/pip/_internal/cli/__init__.py
  84. BIN=BIN
      env/Lib/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-39.pyc
  85. BIN=BIN
      env/Lib/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-39.pyc
  86. BIN=BIN
      env/Lib/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-39.pyc
  87. BIN=BIN
      env/Lib/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-39.pyc
  88. BIN=BIN
      env/Lib/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-39.pyc
  89. BIN=BIN
      env/Lib/site-packages/pip/_internal/cli/__pycache__/main.cpython-39.pyc
  90. BIN=BIN
      env/Lib/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-39.pyc
  91. BIN=BIN
      env/Lib/site-packages/pip/_internal/cli/__pycache__/parser.cpython-39.pyc
  92. BIN=BIN
      env/Lib/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-39.pyc
  93. BIN=BIN
      env/Lib/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-39.pyc
  94. BIN=BIN
      env/Lib/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-39.pyc
  95. BIN=BIN
      env/Lib/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-39.pyc
  96. 171 0
      env/Lib/site-packages/pip/_internal/cli/autocompletion.py
  97. 216 0
      env/Lib/site-packages/pip/_internal/cli/base_command.py
  98. 1055 0
      env/Lib/site-packages/pip/_internal/cli/cmdoptions.py
  99. 27 0
      env/Lib/site-packages/pip/_internal/cli/command_context.py
  100. 70 0
      env/Lib/site-packages/pip/_internal/cli/main.py

BIN=BIN
env/Lib/site-packages/__pycache__/easy_install.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/__pycache__/six.cpython-39.pyc


+ 46 - 0
env/Lib/site-packages/asyncio-3.4.3.dist-info/DESCRIPTION.rst

@@ -0,0 +1,46 @@
+Tulip is the codename for my reference implementation of PEP 3156.
+
+PEP 3156: http://www.python.org/dev/peps/pep-3156/
+
+*** This requires Python 3.3 or later! ***
+
+Copyright/license: Open source, Apache 2.0. Enjoy.
+
+Master Mercurial repo: http://code.google.com/p/tulip/
+
+The actual code lives in the 'asyncio' subdirectory.
+Tests are in the 'tests' subdirectory.
+
+To run tests:
+  - make test
+
+To run coverage (coverage package is required):
+  - make coverage
+
+On Windows, things are a little more complicated.  Assume 'P' is your
+Python binary (for example C:\Python33\python.exe).
+
+You must first build the _overlapped.pyd extension and have it placed
+in the asyncio directory, as follows:
+
+    C> P setup.py build_ext --inplace
+
+If this complains about vcvars.bat, you probably don't have the
+required version of Visual Studio installed.  Compiling extensions for
+Python 3.3 requires Microsoft Visual C++ 2010 (MSVC 10.0) of any
+edition; you can download Visual Studio Express 2010 for free from
+http://www.visualstudio.com/downloads (scroll down to Visual C++ 2010
+Express).
+
+Once you have built the _overlapped.pyd extension successfully you can
+run the tests as follows:
+
+    C> P runtests.py
+
+And coverage as follows:
+
+    C> P runtests.py --coverage
+
+--Guido van Rossum <guido@python.org>
+
+

+ 1 - 0
env/Lib/site-packages/asyncio-3.4.3.dist-info/INSTALLER

@@ -0,0 +1 @@
+pip

+ 59 - 0
env/Lib/site-packages/asyncio-3.4.3.dist-info/METADATA

@@ -0,0 +1,59 @@
+Metadata-Version: 2.0
+Name: asyncio
+Version: 3.4.3
+Summary: reference implementation of PEP 3156
+Home-page: http://www.python.org/dev/peps/pep-3156/
+Author: UNKNOWN
+Author-email: UNKNOWN
+License: UNKNOWN
+Platform: UNKNOWN
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+
+Tulip is the codename for my reference implementation of PEP 3156.
+
+PEP 3156: http://www.python.org/dev/peps/pep-3156/
+
+*** This requires Python 3.3 or later! ***
+
+Copyright/license: Open source, Apache 2.0. Enjoy.
+
+Master Mercurial repo: http://code.google.com/p/tulip/
+
+The actual code lives in the 'asyncio' subdirectory.
+Tests are in the 'tests' subdirectory.
+
+To run tests:
+  - make test
+
+To run coverage (coverage package is required):
+  - make coverage
+
+On Windows, things are a little more complicated.  Assume 'P' is your
+Python binary (for example C:\Python33\python.exe).
+
+You must first build the _overlapped.pyd extension and have it placed
+in the asyncio directory, as follows:
+
+    C> P setup.py build_ext --inplace
+
+If this complains about vcvars.bat, you probably don't have the
+required version of Visual Studio installed.  Compiling extensions for
+Python 3.3 requires Microsoft Visual C++ 2010 (MSVC 10.0) of any
+edition; you can download Visual Studio Express 2010 for free from
+http://www.visualstudio.com/downloads (scroll down to Visual C++ 2010
+Express).
+
+Once you have built the _overlapped.pyd extension successfully you can
+run the tests as follows:
+
+    C> P runtests.py
+
+And coverage as follows:
+
+    C> P runtests.py --coverage
+
+--Guido van Rossum <guido@python.org>
+
+

+ 53 - 0
env/Lib/site-packages/asyncio-3.4.3.dist-info/RECORD

@@ -0,0 +1,53 @@
+asyncio-3.4.3.dist-info/DESCRIPTION.rst,sha256=bAZjof00nusb_zrWWkv9SvQx3MTvI2hct4h5kztkF0E,1299
+asyncio-3.4.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+asyncio-3.4.3.dist-info/METADATA,sha256=ugM5pmv1pGpcH0_Ank3SlGH2KWhm5hBjfbo9sku6QT4,1663
+asyncio-3.4.3.dist-info/RECORD,,
+asyncio-3.4.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+asyncio-3.4.3.dist-info/WHEEL,sha256=-aSo8rHuuPDEFzkcqqQ55pDyCjy25bYMLxSiHWKAOTc,92
+asyncio-3.4.3.dist-info/metadata.json,sha256=ZEfpR-htrGXt614v324AATAhUocWXJ20fLm37COMCII,445
+asyncio-3.4.3.dist-info/top_level.txt,sha256=WprmKZDC2yB-f6r9Mj2UujoXsgkJEZ8TjyElgaL16T8,8
+asyncio/__init__.py,sha256=KJCsXiIYG2d4fOB8PT6xHAu-0XrixyIx51KFn0gY9WY,1436
+asyncio/__pycache__/__init__.cpython-39.pyc,,
+asyncio/__pycache__/base_subprocess.cpython-39.pyc,,
+asyncio/__pycache__/constants.cpython-39.pyc,,
+asyncio/__pycache__/coroutines.cpython-39.pyc,,
+asyncio/__pycache__/events.cpython-39.pyc,,
+asyncio/__pycache__/futures.cpython-39.pyc,,
+asyncio/__pycache__/locks.cpython-39.pyc,,
+asyncio/__pycache__/log.cpython-39.pyc,,
+asyncio/__pycache__/proactor_events.cpython-39.pyc,,
+asyncio/__pycache__/protocols.cpython-39.pyc,,
+asyncio/__pycache__/queues.cpython-39.pyc,,
+asyncio/__pycache__/selector_events.cpython-39.pyc,,
+asyncio/__pycache__/selectors.cpython-39.pyc,,
+asyncio/__pycache__/sslproto.cpython-39.pyc,,
+asyncio/__pycache__/streams.cpython-39.pyc,,
+asyncio/__pycache__/subprocess.cpython-39.pyc,,
+asyncio/__pycache__/test_support.cpython-39.pyc,,
+asyncio/__pycache__/test_utils.cpython-39.pyc,,
+asyncio/__pycache__/transports.cpython-39.pyc,,
+asyncio/__pycache__/unix_events.cpython-39.pyc,,
+asyncio/__pycache__/windows_utils.cpython-39.pyc,,
+asyncio/base_events.py,sha256=MLq0JllHep1jnrK22Z0cDyZ1jUoI79Pclp5vdxIpYN8,44946
+asyncio/base_subprocess.py,sha256=X2P3bKLmODZRVc5PO5xe3LDbiOE8rcbS07J5N_O92cg,8399
+asyncio/constants.py,sha256=I8qh6SMz71N8m8gnzhSAsFQAnnZcDFMi9ZGEjapFAPQ,195
+asyncio/coroutines.py,sha256=ueF6INxKWXIzLclt8d3s2NqVzvIAtPTtP5HM5fXxcCA,6239
+asyncio/events.py,sha256=Gu43DTec0XH_0MNJufn-P3eDZqF0RReuhOg565z6_j8,19232
+asyncio/futures.py,sha256=idhT72s5Hd7-EntrEaisqCFXrvGJp3alcm_eFazSWh8,14625
+asyncio/locks.py,sha256=p4WipPNrbH9_sbO2jW-0kXNhVzxVJI-9y_vVKiwZcbA,14408
+asyncio/log.py,sha256=gOTMPe1LE4urpIZRnnREgBoj1qw18inTNqQHqWr36NI,124
+asyncio/proactor_events.py,sha256=vxaoliRdAiiZcvoFSdAGfjHgXS_Cqpvcg0xJFVFZROA,20124
+asyncio/protocols.py,sha256=_5E_og4fDaGDxDHhfv8YbTi3j58oKzOCHbPauM9xyxA,4512
+asyncio/queues.py,sha256=phBUIoLwYntH1Q-FbkF8WkpL5deXW0AyV5EvgzTHxPQ,9429
+asyncio/selector_events.py,sha256=E6K6_zHmyDsxcTDnYU4x9fFvl2md9pKxt1zqTDS02bQ,39120
+asyncio/selectors.py,sha256=DX7fB18eAkOULdk64sI5tDuQxJMMbzrX3Fp-luquwAw,18697
+asyncio/sslproto.py,sha256=uf4oVigVSARAzvuYnH0isAA2pkL9-sGCUQu36XneBTc,24997
+asyncio/streams.py,sha256=N5UKnRFXfrlDMghprdpUIkrz-EGy_N-5QOm5uxZnYfM,16167
+asyncio/subprocess.py,sha256=4duwmg4q5nXGyXuQoW3fMXTg6J-WGzRgUr58pS5t6cQ,7223
+asyncio/tasks.py,sha256=XMd-WRyzbzfAVitZhCmoll0enCuzG8ox6kgTmKgvsKc,23027
+asyncio/test_support.py,sha256=RXgLvJKTIUZaEX27CsV0OJYpj1F5wwijc1SHHcZfYfQ,12675
+asyncio/test_utils.py,sha256=8YuV10aAc8Y6lc-nJ6-1qfybxAwatNCW0Rpfzp1IR9M,12554
+asyncio/transports.py,sha256=LKBC2n4LDmIwx-6_Mpnfcv7hJAWHUIxeblSb3-IUOrs,9941
+asyncio/unix_events.py,sha256=XIfdOtdVtqLnnLM-yFmjXSuebdCmThY2BrCZ2IAocnI,34333
+asyncio/windows_events.py,sha256=exfmTU03t2_cgcmplHmZQVDIYkKHvUGGWdsaKFZQ-00,27697
+asyncio/windows_utils.py,sha256=ytNJq6mfUiVvBZqryHHmb4UrtZg7jbPNTkO0Myyua7c,6844

+ 0 - 0
env/Lib/site-packages/asyncio-3.4.3.dist-info/REQUESTED


+ 5 - 0
env/Lib/site-packages/asyncio-3.4.3.dist-info/WHEEL

@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.24.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+

+ 1 - 0
env/Lib/site-packages/asyncio-3.4.3.dist-info/metadata.json

@@ -0,0 +1 @@
+{"version": "3.4.3", "name": "asyncio", "metadata_version": "2.0", "summary": "reference implementation of PEP 3156", "generator": "bdist_wheel (0.24.0)", "classifiers": ["Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3"], "extensions": {"python.details": {"document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://www.python.org/dev/peps/pep-3156/"}}}}

+ 1 - 0
env/Lib/site-packages/asyncio-3.4.3.dist-info/top_level.txt

@@ -0,0 +1 @@
+asyncio

+ 50 - 0
env/Lib/site-packages/asyncio/__init__.py

@@ -0,0 +1,50 @@
+"""The asyncio package, tracking PEP 3156."""
+
+import sys
+
+# The selectors module is in the stdlib in Python 3.4 but not in 3.3.
+# Do this first, so the other submodules can use "from . import selectors".
+# Prefer asyncio/selectors.py over the stdlib one, as ours may be newer.
+try:
+    from . import selectors
+except ImportError:
+    import selectors  # Will also be exported.
+
+if sys.platform == 'win32':
+    # Similar thing for _overlapped.
+    try:
+        from . import _overlapped
+    except ImportError:
+        import _overlapped  # Will also be exported.
+
+# This relies on each of the submodules having an __all__ variable.
+from .base_events import *
+from .coroutines import *
+from .events import *
+from .futures import *
+from .locks import *
+from .protocols import *
+from .queues import *
+from .streams import *
+from .subprocess import *
+from .tasks import *
+from .transports import *
+
+__all__ = (base_events.__all__ +
+           coroutines.__all__ +
+           events.__all__ +
+           futures.__all__ +
+           locks.__all__ +
+           protocols.__all__ +
+           queues.__all__ +
+           streams.__all__ +
+           subprocess.__all__ +
+           tasks.__all__ +
+           transports.__all__)
+
+if sys.platform == 'win32':  # pragma: no cover
+    from .windows_events import *
+    __all__ += windows_events.__all__
+else:
+    from .unix_events import *  # pragma: no cover
+    __all__ += unix_events.__all__

BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/__init__.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/base_subprocess.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/constants.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/coroutines.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/events.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/futures.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/locks.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/log.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/proactor_events.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/protocols.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/queues.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/selector_events.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/selectors.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/sslproto.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/streams.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/subprocess.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/test_support.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/test_utils.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/transports.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/unix_events.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/asyncio/__pycache__/windows_utils.cpython-39.pyc


+ 1179 - 0
env/Lib/site-packages/asyncio/base_events.py

@@ -0,0 +1,1179 @@
+"""Base implementation of event loop.
+
+The event loop can be broken up into a multiplexer (the part
+responsible for notifying us of I/O events) and the event loop proper,
+which wraps a multiplexer with functionality for scheduling callbacks,
+immediately or at a given time in the future.
+
+Whenever a public API takes a callback, subsequent positional
+arguments will be passed to the callback if/when it is called.  This
+avoids the proliferation of trivial lambdas implementing closures.
+Keyword arguments for the callback are not supported; this is a
+conscious design decision, leaving the door open for keyword arguments
+to modify the meaning of the API call itself.
+"""
+
+
+import collections
+import concurrent.futures
+import heapq
+import inspect
+import logging
+import os
+import socket
+import subprocess
+import threading
+import time
+import traceback
+import sys
+import warnings
+
+from . import coroutines
+from . import events
+from . import futures
+from . import tasks
+from .coroutines import coroutine
+from .log import logger
+
+
+__all__ = ['BaseEventLoop']
+
+
+# Argument for default thread pool executor creation.
+_MAX_WORKERS = 5
+
+# Minimum number of _scheduled timer handles before cleanup of
+# cancelled handles is performed.
+_MIN_SCHEDULED_TIMER_HANDLES = 100
+
+# Minimum fraction of _scheduled timer handles that are cancelled
+# before cleanup of cancelled handles is performed.
+_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
+
+def _format_handle(handle):
+    cb = handle._callback
+    if inspect.ismethod(cb) and isinstance(cb.__self__, tasks.Task):
+        # format the task
+        return repr(cb.__self__)
+    else:
+        return str(handle)
+
+
+def _format_pipe(fd):
+    if fd == subprocess.PIPE:
+        return '<pipe>'
+    elif fd == subprocess.STDOUT:
+        return '<stdout>'
+    else:
+        return repr(fd)
+
+
+class _StopError(BaseException):
+    """Raised to stop the event loop."""
+
+
+def _check_resolved_address(sock, address):
+    # Ensure that the address is already resolved to avoid the trap of hanging
+    # the entire event loop when the address requires doing a DNS lookup.
+    #
+    # getaddrinfo() is slow (around 10 us per call): this function should only
+    # be called in debug mode
+    family = sock.family
+
+    if family == socket.AF_INET:
+        host, port = address
+    elif family == socket.AF_INET6:
+        host, port = address[:2]
+    else:
+        return
+
+    # On Windows, socket.inet_pton() is only available since Python 3.4
+    if hasattr(socket, 'inet_pton'):
+        # getaddrinfo() is slow and has known issue: prefer inet_pton()
+        # if available
+        try:
+            socket.inet_pton(family, host)
+        except OSError as exc:
+            raise ValueError("address must be resolved (IP address), "
+                             "got host %r: %s"
+                             % (host, exc))
+    else:
+        # Use getaddrinfo(flags=AI_NUMERICHOST) to ensure that the address is
+        # already resolved.
+        type_mask = 0
+        if hasattr(socket, 'SOCK_NONBLOCK'):
+            type_mask |= socket.SOCK_NONBLOCK
+        if hasattr(socket, 'SOCK_CLOEXEC'):
+            type_mask |= socket.SOCK_CLOEXEC
+        try:
+            socket.getaddrinfo(host, port,
+                               family=family,
+                               type=(sock.type & ~type_mask),
+                               proto=sock.proto,
+                               flags=socket.AI_NUMERICHOST)
+        except socket.gaierror as err:
+            raise ValueError("address must be resolved (IP address), "
+                             "got host %r: %s"
+                             % (host, err))
+
+def _raise_stop_error(*args):
+    raise _StopError
+
+
+def _run_until_complete_cb(fut):
+    exc = fut._exception
+    if (isinstance(exc, BaseException)
+    and not isinstance(exc, Exception)):
+        # Issue #22429: run_forever() already finished, no need to
+        # stop it.
+        return
+    _raise_stop_error()
+
+
+class Server(events.AbstractServer):
+
+    def __init__(self, loop, sockets):
+        self._loop = loop
+        self.sockets = sockets
+        self._active_count = 0
+        self._waiters = []
+
+    def __repr__(self):
+        return '<%s sockets=%r>' % (self.__class__.__name__, self.sockets)
+
+    def _attach(self):
+        assert self.sockets is not None
+        self._active_count += 1
+
+    def _detach(self):
+        assert self._active_count > 0
+        self._active_count -= 1
+        if self._active_count == 0 and self.sockets is None:
+            self._wakeup()
+
+    def close(self):
+        sockets = self.sockets
+        if sockets is None:
+            return
+        self.sockets = None
+        for sock in sockets:
+            self._loop._stop_serving(sock)
+        if self._active_count == 0:
+            self._wakeup()
+
+    def _wakeup(self):
+        waiters = self._waiters
+        self._waiters = None
+        for waiter in waiters:
+            if not waiter.done():
+                waiter.set_result(waiter)
+
+    @coroutine
+    def wait_closed(self):
+        if self.sockets is None or self._waiters is None:
+            return
+        waiter = futures.Future(loop=self._loop)
+        self._waiters.append(waiter)
+        yield from waiter
+
+
+class BaseEventLoop(events.AbstractEventLoop):
+
+    def __init__(self):
+        self._timer_cancelled_count = 0
+        self._closed = False
+        self._ready = collections.deque()
+        self._scheduled = []
+        self._default_executor = None
+        self._internal_fds = 0
+        # Identifier of the thread running the event loop, or None if the
+        # event loop is not running
+        self._thread_id = None
+        self._clock_resolution = time.get_clock_info('monotonic').resolution
+        self._exception_handler = None
+        self._debug = (not sys.flags.ignore_environment
+                       and bool(os.environ.get('PYTHONASYNCIODEBUG')))
+        # In debug mode, if the execution of a callback or a step of a task
+        # exceed this duration in seconds, the slow callback/task is logged.
+        self.slow_callback_duration = 0.1
+        self._current_handle = None
+
+    def __repr__(self):
+        return ('<%s running=%s closed=%s debug=%s>'
+                % (self.__class__.__name__, self.is_running(),
+                   self.is_closed(), self.get_debug()))
+
+    def create_task(self, coro):
+        """Schedule a coroutine object.
+
+        Return a task object.
+        """
+        self._check_closed()
+        task = tasks.Task(coro, loop=self)
+        if task._source_traceback:
+            del task._source_traceback[-1]
+        return task
+
+    def _make_socket_transport(self, sock, protocol, waiter=None, *,
+                               extra=None, server=None):
+        """Create socket transport."""
+        raise NotImplementedError
+
+    def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
+                            *, server_side=False, server_hostname=None,
+                            extra=None, server=None):
+        """Create SSL transport."""
+        raise NotImplementedError
+
+    def _make_datagram_transport(self, sock, protocol,
+                                 address=None, waiter=None, extra=None):
+        """Create datagram transport."""
+        raise NotImplementedError
+
+    def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
+                                  extra=None):
+        """Create read pipe transport."""
+        raise NotImplementedError
+
+    def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
+                                   extra=None):
+        """Create write pipe transport."""
+        raise NotImplementedError
+
+    @coroutine
+    def _make_subprocess_transport(self, protocol, args, shell,
+                                   stdin, stdout, stderr, bufsize,
+                                   extra=None, **kwargs):
+        """Create subprocess transport."""
+        raise NotImplementedError
+
+    def _write_to_self(self):
+        """Write a byte to self-pipe, to wake up the event loop.
+
+        This may be called from a different thread.
+
+        The subclass is responsible for implementing the self-pipe.
+        """
+        raise NotImplementedError
+
+    def _process_events(self, event_list):
+        """Process selector events."""
+        raise NotImplementedError
+
+    def _check_closed(self):
+        if self._closed:
+            raise RuntimeError('Event loop is closed')
+
+    def run_forever(self):
+        """Run until stop() is called."""
+        self._check_closed()
+        if self.is_running():
+            raise RuntimeError('Event loop is running.')
+        self._thread_id = threading.get_ident()
+        try:
+            while True:
+                try:
+                    self._run_once()
+                except _StopError:
+                    break
+        finally:
+            self._thread_id = None
+
+    def run_until_complete(self, future):
+        """Run until the Future is done.
+
+        If the argument is a coroutine, it is wrapped in a Task.
+
+        WARNING: It would be disastrous to call run_until_complete()
+        with the same coroutine twice -- it would wrap it in two
+        different Tasks and that can't be good.
+
+        Return the Future's result, or raise its exception.
+        """
+        self._check_closed()
+
+        new_task = not isinstance(future, futures.Future)
+        future = tasks.async(future, loop=self)
+        if new_task:
+            # An exception is raised if the future didn't complete, so there
+            # is no need to log the "destroy pending task" message
+            future._log_destroy_pending = False
+
+        future.add_done_callback(_run_until_complete_cb)
+        try:
+            self.run_forever()
+        except:
+            if new_task and future.done() and not future.cancelled():
+                # The coroutine raised a BaseException. Consume the exception
+                # to not log a warning, the caller doesn't have access to the
+                # local task.
+                future.exception()
+            raise
+        future.remove_done_callback(_run_until_complete_cb)
+        if not future.done():
+            raise RuntimeError('Event loop stopped before Future completed.')
+
+        return future.result()
+
+    def stop(self):
+        """Stop running the event loop.
+
+        Every callback scheduled before stop() is called will run. Callbacks
+        scheduled after stop() is called will not run. However, those callbacks
+        will run if run_forever is called again later.
+        """
+        self.call_soon(_raise_stop_error)
+
+    def close(self):
+        """Close the event loop.
+
+        This clears the queues and shuts down the executor,
+        but does not wait for the executor to finish.
+
+        The event loop must not be running.
+        """
+        if self.is_running():
+            raise RuntimeError("Cannot close a running event loop")
+        if self._closed:
+            return
+        if self._debug:
+            logger.debug("Close %r", self)
+        self._closed = True
+        self._ready.clear()
+        self._scheduled.clear()
+        executor = self._default_executor
+        if executor is not None:
+            self._default_executor = None
+            executor.shutdown(wait=False)
+
+    def is_closed(self):
+        """Returns True if the event loop was closed."""
+        return self._closed
+
+    # On Python 3.3 and older, objects with a destructor part of a reference
+    # cycle are never destroyed. It's not more the case on Python 3.4 thanks
+    # to the PEP 442.
+    if sys.version_info >= (3, 4):
+        def __del__(self):
+            if not self.is_closed():
+                warnings.warn("unclosed event loop %r" % self, ResourceWarning)
+                if not self.is_running():
+                    self.close()
+
+    def is_running(self):
+        """Returns True if the event loop is running."""
+        return (self._thread_id is not None)
+
+    def time(self):
+        """Return the time according to the event loop's clock.
+
+        This is a float expressed in seconds since an epoch, but the
+        epoch, precision, accuracy and drift are unspecified and may
+        differ per event loop.
+        """
+        return time.monotonic()
+
+    def call_later(self, delay, callback, *args):
+        """Arrange for a callback to be called at a given time.
+
+        Return a Handle: an opaque object with a cancel() method that
+        can be used to cancel the call.
+
+        The delay can be an int or float, expressed in seconds.  It is
+        always relative to the current time.
+
+        Each callback will be called exactly once.  If two callbacks
+        are scheduled for exactly the same time, it undefined which
+        will be called first.
+
+        Any positional arguments after the callback will be passed to
+        the callback when it is called.
+        """
+        timer = self.call_at(self.time() + delay, callback, *args)
+        if timer._source_traceback:
+            del timer._source_traceback[-1]
+        return timer
+
+    def call_at(self, when, callback, *args):
+        """Like call_later(), but uses an absolute time.
+
+        Absolute time corresponds to the event loop's time() method.
+        """
+        if (coroutines.iscoroutine(callback)
+        or coroutines.iscoroutinefunction(callback)):
+            raise TypeError("coroutines cannot be used with call_at()")
+        self._check_closed()
+        if self._debug:
+            self._check_thread()
+        timer = events.TimerHandle(when, callback, args, self)
+        if timer._source_traceback:
+            del timer._source_traceback[-1]
+        heapq.heappush(self._scheduled, timer)
+        timer._scheduled = True
+        return timer
+
+    def call_soon(self, callback, *args):
+        """Arrange for a callback to be called as soon as possible.
+
+        This operates as a FIFO queue: callbacks are called in the
+        order in which they are registered.  Each callback will be
+        called exactly once.
+
+        Any positional arguments after the callback will be passed to
+        the callback when it is called.
+        """
+        if self._debug:
+            self._check_thread()
+        handle = self._call_soon(callback, args)
+        if handle._source_traceback:
+            del handle._source_traceback[-1]
+        return handle
+
+    def _call_soon(self, callback, args):
+        if (coroutines.iscoroutine(callback)
+        or coroutines.iscoroutinefunction(callback)):
+            raise TypeError("coroutines cannot be used with call_soon()")
+        self._check_closed()
+        handle = events.Handle(callback, args, self)
+        if handle._source_traceback:
+            del handle._source_traceback[-1]
+        self._ready.append(handle)
+        return handle
+
+    def _check_thread(self):
+        """Check that the current thread is the thread running the event loop.
+
+        Non-thread-safe methods of this class make this assumption and will
+        likely behave incorrectly when the assumption is violated.
+
+        Should only be called when (self._debug == True).  The caller is
+        responsible for checking this condition for performance reasons.
+        """
+        if self._thread_id is None:
+            return
+        thread_id = threading.get_ident()
+        if thread_id != self._thread_id:
+            raise RuntimeError(
+                "Non-thread-safe operation invoked on an event loop other "
+                "than the current one")
+
+    def call_soon_threadsafe(self, callback, *args):
+        """Like call_soon(), but thread-safe."""
+        handle = self._call_soon(callback, args)
+        if handle._source_traceback:
+            del handle._source_traceback[-1]
+        self._write_to_self()
+        return handle
+
+    def run_in_executor(self, executor, callback, *args):
+        if (coroutines.iscoroutine(callback)
+        or coroutines.iscoroutinefunction(callback)):
+            raise TypeError("coroutines cannot be used with run_in_executor()")
+        self._check_closed()
+        if isinstance(callback, events.Handle):
+            assert not args
+            assert not isinstance(callback, events.TimerHandle)
+            if callback._cancelled:
+                f = futures.Future(loop=self)
+                f.set_result(None)
+                return f
+            callback, args = callback._callback, callback._args
+        if executor is None:
+            executor = self._default_executor
+            if executor is None:
+                executor = concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS)
+                self._default_executor = executor
+        return futures.wrap_future(executor.submit(callback, *args), loop=self)
+
+    def set_default_executor(self, executor):
+        self._default_executor = executor
+
+    def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
+        msg = ["%s:%r" % (host, port)]
+        if family:
+            msg.append('family=%r' % family)
+        if type:
+            msg.append('type=%r' % type)
+        if proto:
+            msg.append('proto=%r' % proto)
+        if flags:
+            msg.append('flags=%r' % flags)
+        msg = ', '.join(msg)
+        logger.debug('Get address info %s', msg)
+
+        t0 = self.time()
+        addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
+        dt = self.time() - t0
+
+        msg = ('Getting address info %s took %.3f ms: %r'
+               % (msg, dt * 1e3, addrinfo))
+        if dt >= self.slow_callback_duration:
+            logger.info(msg)
+        else:
+            logger.debug(msg)
+        return addrinfo
+
+    def getaddrinfo(self, host, port, *,
+                    family=0, type=0, proto=0, flags=0):
+        if self._debug:
+            return self.run_in_executor(None, self._getaddrinfo_debug,
+                                        host, port, family, type, proto, flags)
+        else:
+            return self.run_in_executor(None, socket.getaddrinfo,
+                                        host, port, family, type, proto, flags)
+
+    def getnameinfo(self, sockaddr, flags=0):
+        return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags)
+
+    @coroutine
+    def create_connection(self, protocol_factory, host=None, port=None, *,
+                          ssl=None, family=0, proto=0, flags=0, sock=None,
+                          local_addr=None, server_hostname=None):
+        """Connect to a TCP server.
+
+        Create a streaming transport connection to a given Internet host and
+        port: socket family AF_INET or socket.AF_INET6 depending on host (or
+        family if specified), socket type SOCK_STREAM. protocol_factory must be
+        a callable returning a protocol instance.
+
+        This method is a coroutine which will try to establish the connection
+        in the background.  When successful, the coroutine returns a
+        (transport, protocol) pair.
+        """
+        if server_hostname is not None and not ssl:
+            raise ValueError('server_hostname is only meaningful with ssl')
+
+        if server_hostname is None and ssl:
+            # Use host as default for server_hostname.  It is an error
+            # if host is empty or not set, e.g. when an
+            # already-connected socket was passed or when only a port
+            # is given.  To avoid this error, you can pass
+            # server_hostname='' -- this will bypass the hostname
+            # check.  (This also means that if host is a numeric
+            # IP/IPv6 address, we will attempt to verify that exact
+            # address; this will probably fail, but it is possible to
+            # create a certificate for a specific IP address, so we
+            # don't judge it here.)
+            if not host:
+                raise ValueError('You must set server_hostname '
+                                 'when using ssl without a host')
+            server_hostname = host
+
+        if host is not None or port is not None:
+            if sock is not None:
+                raise ValueError(
+                    'host/port and sock can not be specified at the same time')
+
+            f1 = self.getaddrinfo(
+                host, port, family=family,
+                type=socket.SOCK_STREAM, proto=proto, flags=flags)
+            fs = [f1]
+            if local_addr is not None:
+                f2 = self.getaddrinfo(
+                    *local_addr, family=family,
+                    type=socket.SOCK_STREAM, proto=proto, flags=flags)
+                fs.append(f2)
+            else:
+                f2 = None
+
+            yield from tasks.wait(fs, loop=self)
+
+            infos = f1.result()
+            if not infos:
+                raise OSError('getaddrinfo() returned empty list')
+            if f2 is not None:
+                laddr_infos = f2.result()
+                if not laddr_infos:
+                    raise OSError('getaddrinfo() returned empty list')
+
+            exceptions = []
+            for family, type, proto, cname, address in infos:
+                try:
+                    sock = socket.socket(family=family, type=type, proto=proto)
+                    sock.setblocking(False)
+                    if f2 is not None:
+                        for _, _, _, _, laddr in laddr_infos:
+                            try:
+                                sock.bind(laddr)
+                                break
+                            except OSError as exc:
+                                exc = OSError(
+                                    exc.errno, 'error while '
+                                    'attempting to bind on address '
+                                    '{!r}: {}'.format(
+                                        laddr, exc.strerror.lower()))
+                                exceptions.append(exc)
+                        else:
+                            sock.close()
+                            sock = None
+                            continue
+                    if self._debug:
+                        logger.debug("connect %r to %r", sock, address)
+                    yield from self.sock_connect(sock, address)
+                except OSError as exc:
+                    if sock is not None:
+                        sock.close()
+                    exceptions.append(exc)
+                except:
+                    if sock is not None:
+                        sock.close()
+                    raise
+                else:
+                    break
+            else:
+                if len(exceptions) == 1:
+                    raise exceptions[0]
+                else:
+                    # If they all have the same str(), raise one.
+                    model = str(exceptions[0])
+                    if all(str(exc) == model for exc in exceptions):
+                        raise exceptions[0]
+                    # Raise a combined exception so the user can see all
+                    # the various error messages.
+                    raise OSError('Multiple exceptions: {}'.format(
+                        ', '.join(str(exc) for exc in exceptions)))
+
+        elif sock is None:
+            raise ValueError(
+                'host and port was not specified and no sock specified')
+
+        sock.setblocking(False)
+
+        transport, protocol = yield from self._create_connection_transport(
+            sock, protocol_factory, ssl, server_hostname)
+        if self._debug:
+            # Get the socket from the transport because SSL transport closes
+            # the old socket and creates a new SSL socket
+            sock = transport.get_extra_info('socket')
+            logger.debug("%r connected to %s:%r: (%r, %r)",
+                         sock, host, port, transport, protocol)
+        return transport, protocol
+
+    @coroutine
+    def _create_connection_transport(self, sock, protocol_factory, ssl,
+                                     server_hostname):
+        protocol = protocol_factory()
+        waiter = futures.Future(loop=self)
+        if ssl:
+            sslcontext = None if isinstance(ssl, bool) else ssl
+            transport = self._make_ssl_transport(
+                sock, protocol, sslcontext, waiter,
+                server_side=False, server_hostname=server_hostname)
+        else:
+            transport = self._make_socket_transport(sock, protocol, waiter)
+
+        try:
+            yield from waiter
+        except:
+            transport.close()
+            raise
+
+        return transport, protocol
+
+    @coroutine
+    def create_datagram_endpoint(self, protocol_factory,
+                                 local_addr=None, remote_addr=None, *,
+                                 family=0, proto=0, flags=0):
+        """Create datagram connection."""
+        if not (local_addr or remote_addr):
+            if family == 0:
+                raise ValueError('unexpected address family')
+            addr_pairs_info = (((family, proto), (None, None)),)
+        else:
+            # join address by (family, protocol)
+            addr_infos = collections.OrderedDict()
+            for idx, addr in ((0, local_addr), (1, remote_addr)):
+                if addr is not None:
+                    assert isinstance(addr, tuple) and len(addr) == 2, (
+                        '2-tuple is expected')
+
+                    infos = yield from self.getaddrinfo(
+                        *addr, family=family, type=socket.SOCK_DGRAM,
+                        proto=proto, flags=flags)
+                    if not infos:
+                        raise OSError('getaddrinfo() returned empty list')
+
+                    for fam, _, pro, _, address in infos:
+                        key = (fam, pro)
+                        if key not in addr_infos:
+                            addr_infos[key] = [None, None]
+                        addr_infos[key][idx] = address
+
+            # each addr has to have info for each (family, proto) pair
+            addr_pairs_info = [
+                (key, addr_pair) for key, addr_pair in addr_infos.items()
+                if not ((local_addr and addr_pair[0] is None) or
+                        (remote_addr and addr_pair[1] is None))]
+
+            if not addr_pairs_info:
+                raise ValueError('can not get address information')
+
+        exceptions = []
+
+        for ((family, proto),
+             (local_address, remote_address)) in addr_pairs_info:
+            sock = None
+            r_addr = None
+            try:
+                sock = socket.socket(
+                    family=family, type=socket.SOCK_DGRAM, proto=proto)
+                sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+                sock.setblocking(False)
+
+                if local_addr:
+                    sock.bind(local_address)
+                if remote_addr:
+                    yield from self.sock_connect(sock, remote_address)
+                    r_addr = remote_address
+            except OSError as exc:
+                if sock is not None:
+                    sock.close()
+                exceptions.append(exc)
+            except:
+                if sock is not None:
+                    sock.close()
+                raise
+            else:
+                break
+        else:
+            raise exceptions[0]
+
+        protocol = protocol_factory()
+        waiter = futures.Future(loop=self)
+        transport = self._make_datagram_transport(sock, protocol, r_addr,
+                                                  waiter)
+        if self._debug:
+            if local_addr:
+                logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
+                            "created: (%r, %r)",
+                            local_addr, remote_addr, transport, protocol)
+            else:
+                logger.debug("Datagram endpoint remote_addr=%r created: "
+                             "(%r, %r)",
+                             remote_addr, transport, protocol)
+
+        try:
+            yield from waiter
+        except:
+            transport.close()
+            raise
+
+        return transport, protocol
+
+    @coroutine
+    def create_server(self, protocol_factory, host=None, port=None,
+                      *,
+                      family=socket.AF_UNSPEC,
+                      flags=socket.AI_PASSIVE,
+                      sock=None,
+                      backlog=100,
+                      ssl=None,
+                      reuse_address=None):
+        """Create a TCP server bound to host and port.
+
+        Return a Server object which can be used to stop the service.
+
+        This method is a coroutine.
+        """
+        if isinstance(ssl, bool):
+            raise TypeError('ssl argument must be an SSLContext or None')
+        if host is not None or port is not None:
+            if sock is not None:
+                raise ValueError(
+                    'host/port and sock can not be specified at the same time')
+
+            AF_INET6 = getattr(socket, 'AF_INET6', 0)
+            if reuse_address is None:
+                reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
+            sockets = []
+            if host == '':
+                host = None
+
+            infos = yield from self.getaddrinfo(
+                host, port, family=family,
+                type=socket.SOCK_STREAM, proto=0, flags=flags)
+            if not infos:
+                raise OSError('getaddrinfo() returned empty list')
+
+            completed = False
+            try:
+                for res in infos:
+                    af, socktype, proto, canonname, sa = res
+                    try:
+                        sock = socket.socket(af, socktype, proto)
+                    except socket.error:
+                        # Assume it's a bad family/type/protocol combination.
+                        if self._debug:
+                            logger.warning('create_server() failed to create '
+                                           'socket.socket(%r, %r, %r)',
+                                           af, socktype, proto, exc_info=True)
+                        continue
+                    sockets.append(sock)
+                    if reuse_address:
+                        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,
+                                        True)
+                    # Disable IPv4/IPv6 dual stack support (enabled by
+                    # default on Linux) which makes a single socket
+                    # listen on both address families.
+                    if af == AF_INET6 and hasattr(socket, 'IPPROTO_IPV6'):
+                        sock.setsockopt(socket.IPPROTO_IPV6,
+                                        socket.IPV6_V6ONLY,
+                                        True)
+                    try:
+                        sock.bind(sa)
+                    except OSError as err:
+                        raise OSError(err.errno, 'error while attempting '
+                                      'to bind on address %r: %s'
+                                      % (sa, err.strerror.lower()))
+                completed = True
+            finally:
+                if not completed:
+                    for sock in sockets:
+                        sock.close()
+        else:
+            if sock is None:
+                raise ValueError('Neither host/port nor sock were specified')
+            sockets = [sock]
+
+        server = Server(self, sockets)
+        for sock in sockets:
+            sock.listen(backlog)
+            sock.setblocking(False)
+            self._start_serving(protocol_factory, sock, ssl, server)
+        if self._debug:
+            logger.info("%r is serving", server)
+        return server
+
+    @coroutine
+    def connect_read_pipe(self, protocol_factory, pipe):
+        protocol = protocol_factory()
+        waiter = futures.Future(loop=self)
+        transport = self._make_read_pipe_transport(pipe, protocol, waiter)
+
+        try:
+            yield from waiter
+        except:
+            transport.close()
+            raise
+
+        if self._debug:
+            logger.debug('Read pipe %r connected: (%r, %r)',
+                         pipe.fileno(), transport, protocol)
+        return transport, protocol
+
+    @coroutine
+    def connect_write_pipe(self, protocol_factory, pipe):
+        protocol = protocol_factory()
+        waiter = futures.Future(loop=self)
+        transport = self._make_write_pipe_transport(pipe, protocol, waiter)
+
+        try:
+            yield from waiter
+        except:
+            transport.close()
+            raise
+
+        if self._debug:
+            logger.debug('Write pipe %r connected: (%r, %r)',
+                         pipe.fileno(), transport, protocol)
+        return transport, protocol
+
+    def _log_subprocess(self, msg, stdin, stdout, stderr):
+        info = [msg]
+        if stdin is not None:
+            info.append('stdin=%s' % _format_pipe(stdin))
+        if stdout is not None and stderr == subprocess.STDOUT:
+            info.append('stdout=stderr=%s' % _format_pipe(stdout))
+        else:
+            if stdout is not None:
+                info.append('stdout=%s' % _format_pipe(stdout))
+            if stderr is not None:
+                info.append('stderr=%s' % _format_pipe(stderr))
+        logger.debug(' '.join(info))
+
+    @coroutine
+    def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
+                         stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+                         universal_newlines=False, shell=True, bufsize=0,
+                         **kwargs):
+        if not isinstance(cmd, (bytes, str)):
+            raise ValueError("cmd must be a string")
+        if universal_newlines:
+            raise ValueError("universal_newlines must be False")
+        if not shell:
+            raise ValueError("shell must be True")
+        if bufsize != 0:
+            raise ValueError("bufsize must be 0")
+        protocol = protocol_factory()
+        if self._debug:
+            # don't log parameters: they may contain sensitive information
+            # (password) and may be too long
+            debug_log = 'run shell command %r' % cmd
+            self._log_subprocess(debug_log, stdin, stdout, stderr)
+        transport = yield from self._make_subprocess_transport(
+            protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
+        if self._debug:
+            logger.info('%s: %r' % (debug_log, transport))
+        return transport, protocol
+
+    @coroutine
+    def subprocess_exec(self, protocol_factory, program, *args,
+                        stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+                        stderr=subprocess.PIPE, universal_newlines=False,
+                        shell=False, bufsize=0, **kwargs):
+        if universal_newlines:
+            raise ValueError("universal_newlines must be False")
+        if shell:
+            raise ValueError("shell must be False")
+        if bufsize != 0:
+            raise ValueError("bufsize must be 0")
+        popen_args = (program,) + args
+        for arg in popen_args:
+            if not isinstance(arg, (str, bytes)):
+                raise TypeError("program arguments must be "
+                                "a bytes or text string, not %s"
+                                % type(arg).__name__)
+        protocol = protocol_factory()
+        if self._debug:
+            # don't log parameters: they may contain sensitive information
+            # (password) and may be too long
+            debug_log = 'execute program %r' % program
+            self._log_subprocess(debug_log, stdin, stdout, stderr)
+        transport = yield from self._make_subprocess_transport(
+            protocol, popen_args, False, stdin, stdout, stderr,
+            bufsize, **kwargs)
+        if self._debug:
+            logger.info('%s: %r' % (debug_log, transport))
+        return transport, protocol
+
+    def set_exception_handler(self, handler):
+        """Set handler as the new event loop exception handler.
+
+        If handler is None, the default exception handler will
+        be set.
+
+        If handler is a callable object, it should have a
+        signature matching '(loop, context)', where 'loop'
+        will be a reference to the active event loop, 'context'
+        will be a dict object (see `call_exception_handler()`
+        documentation for details about context).
+        """
+        if handler is not None and not callable(handler):
+            raise TypeError('A callable object or None is expected, '
+                            'got {!r}'.format(handler))
+        self._exception_handler = handler
+
+    def default_exception_handler(self, context):
+        """Default exception handler.
+
+        This is called when an exception occurs and no exception
+        handler is set, and can be called by a custom exception
+        handler that wants to defer to the default behavior.
+
+        The context parameter has the same meaning as in
+        `call_exception_handler()`.
+        """
+        message = context.get('message')
+        if not message:
+            message = 'Unhandled exception in event loop'
+
+        exception = context.get('exception')
+        if exception is not None:
+            exc_info = (type(exception), exception, exception.__traceback__)
+        else:
+            exc_info = False
+
+        if ('source_traceback' not in context
+        and self._current_handle is not None
+        and self._current_handle._source_traceback):
+            context['handle_traceback'] = self._current_handle._source_traceback
+
+        log_lines = [message]
+        for key in sorted(context):
+            if key in {'message', 'exception'}:
+                continue
+            value = context[key]
+            if key == 'source_traceback':
+                tb = ''.join(traceback.format_list(value))
+                value = 'Object created at (most recent call last):\n'
+                value += tb.rstrip()
+            elif key == 'handle_traceback':
+                tb = ''.join(traceback.format_list(value))
+                value = 'Handle created at (most recent call last):\n'
+                value += tb.rstrip()
+            else:
+                value = repr(value)
+            log_lines.append('{}: {}'.format(key, value))
+
+        logger.error('\n'.join(log_lines), exc_info=exc_info)
+
+    def call_exception_handler(self, context):
+        """Call the current event loop's exception handler.
+
+        The context argument is a dict containing the following keys:
+
+        - 'message': Error message;
+        - 'exception' (optional): Exception object;
+        - 'future' (optional): Future instance;
+        - 'handle' (optional): Handle instance;
+        - 'protocol' (optional): Protocol instance;
+        - 'transport' (optional): Transport instance;
+        - 'socket' (optional): Socket instance.
+
+        New keys maybe introduced in the future.
+
+        Note: do not overload this method in an event loop subclass.
+        For custom exception handling, use the
+        `set_exception_handler()` method.
+        """
+        if self._exception_handler is None:
+            try:
+                self.default_exception_handler(context)
+            except Exception:
+                # Second protection layer for unexpected errors
+                # in the default implementation, as well as for subclassed
+                # event loops with overloaded "default_exception_handler".
+                logger.error('Exception in default exception handler',
+                             exc_info=True)
+        else:
+            try:
+                self._exception_handler(self, context)
+            except Exception as exc:
+                # Exception in the user set custom exception handler.
+                try:
+                    # Let's try default handler.
+                    self.default_exception_handler({
+                        'message': 'Unhandled error in exception handler',
+                        'exception': exc,
+                        'context': context,
+                    })
+                except Exception:
+                    # Guard 'default_exception_handler' in case it is
+                    # overloaded.
+                    logger.error('Exception in default exception handler '
+                                 'while handling an unexpected error '
+                                 'in custom exception handler',
+                                 exc_info=True)
+
+    def _add_callback(self, handle):
+        """Add a Handle to _scheduled (TimerHandle) or _ready."""
+        assert isinstance(handle, events.Handle), 'A Handle is required here'
+        if handle._cancelled:
+            return
+        assert not isinstance(handle, events.TimerHandle)
+        self._ready.append(handle)
+
+    def _add_callback_signalsafe(self, handle):
+        """Like _add_callback() but called from a signal handler."""
+        self._add_callback(handle)
+        self._write_to_self()
+
+    def _timer_handle_cancelled(self, handle):
+        """Notification that a TimerHandle has been cancelled."""
+        if handle._scheduled:
+            self._timer_cancelled_count += 1
+
+    def _run_once(self):
+        """Run one full iteration of the event loop.
+
+        This calls all currently ready callbacks, polls for I/O,
+        schedules the resulting callbacks, and finally schedules
+        'call_later' callbacks.
+        """
+
+        sched_count = len(self._scheduled)
+        if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
+            self._timer_cancelled_count / sched_count >
+                _MIN_CANCELLED_TIMER_HANDLES_FRACTION):
+            # Remove delayed calls that were cancelled if their number
+            # is too high
+            new_scheduled = []
+            for handle in self._scheduled:
+                if handle._cancelled:
+                    handle._scheduled = False
+                else:
+                    new_scheduled.append(handle)
+
+            heapq.heapify(new_scheduled)
+            self._scheduled = new_scheduled
+            self._timer_cancelled_count = 0
+        else:
+            # Remove delayed calls that were cancelled from head of queue.
+            while self._scheduled and self._scheduled[0]._cancelled:
+                self._timer_cancelled_count -= 1
+                handle = heapq.heappop(self._scheduled)
+                handle._scheduled = False
+
+        timeout = None
+        if self._ready:
+            timeout = 0
+        elif self._scheduled:
+            # Compute the desired timeout.
+            when = self._scheduled[0]._when
+            timeout = max(0, when - self.time())
+
+        if self._debug and timeout != 0:
+            t0 = self.time()
+            event_list = self._selector.select(timeout)
+            dt = self.time() - t0
+            if dt >= 1.0:
+                level = logging.INFO
+            else:
+                level = logging.DEBUG
+            nevent = len(event_list)
+            if timeout is None:
+                logger.log(level, 'poll took %.3f ms: %s events',
+                           dt * 1e3, nevent)
+            elif nevent:
+                logger.log(level,
+                           'poll %.3f ms took %.3f ms: %s events',
+                           timeout * 1e3, dt * 1e3, nevent)
+            elif dt >= 1.0:
+                logger.log(level,
+                           'poll %.3f ms took %.3f ms: timeout',
+                           timeout * 1e3, dt * 1e3)
+        else:
+            event_list = self._selector.select(timeout)
+        self._process_events(event_list)
+
+        # Handle 'later' callbacks that are ready.
+        end_time = self.time() + self._clock_resolution
+        while self._scheduled:
+            handle = self._scheduled[0]
+            if handle._when >= end_time:
+                break
+            handle = heapq.heappop(self._scheduled)
+            handle._scheduled = False
+            self._ready.append(handle)
+
+        # This is the only place where callbacks are actually *called*.
+        # All other places just add them to ready.
+        # Note: We run all currently scheduled callbacks, but not any
+        # callbacks scheduled by callbacks run this time around --
+        # they will be run the next time (after another I/O poll).
+        # Use an idiom that is thread-safe without using locks.
+        ntodo = len(self._ready)
+        for i in range(ntodo):
+            handle = self._ready.popleft()
+            if handle._cancelled:
+                continue
+            if self._debug:
+                try:
+                    self._current_handle = handle
+                    t0 = self.time()
+                    handle._run()
+                    dt = self.time() - t0
+                    if dt >= self.slow_callback_duration:
+                        logger.warning('Executing %s took %.3f seconds',
+                                       _format_handle(handle), dt)
+                finally:
+                    self._current_handle = None
+            else:
+                handle._run()
+        handle = None  # Needed to break cycles when an exception occurs.
+
+    def get_debug(self):
+        return self._debug
+
+    def set_debug(self, enabled):
+        self._debug = enabled

+ 270 - 0
env/Lib/site-packages/asyncio/base_subprocess.py

@@ -0,0 +1,270 @@
+import collections
+import subprocess
+import sys
+import warnings
+
+from . import futures
+from . import protocols
+from . import transports
+from .coroutines import coroutine
+from .log import logger
+
+
+class BaseSubprocessTransport(transports.SubprocessTransport):
+
+    def __init__(self, loop, protocol, args, shell,
+                 stdin, stdout, stderr, bufsize,
+                 waiter=None, extra=None, **kwargs):
+        super().__init__(extra)
+        self._closed = False
+        self._protocol = protocol
+        self._loop = loop
+        self._proc = None
+        self._pid = None
+        self._returncode = None
+        self._exit_waiters = []
+        self._pending_calls = collections.deque()
+        self._pipes = {}
+        self._finished = False
+
+        if stdin == subprocess.PIPE:
+            self._pipes[0] = None
+        if stdout == subprocess.PIPE:
+            self._pipes[1] = None
+        if stderr == subprocess.PIPE:
+            self._pipes[2] = None
+
+        # Create the child process: set the _proc attribute
+        self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,
+                    stderr=stderr, bufsize=bufsize, **kwargs)
+        self._pid = self._proc.pid
+        self._extra['subprocess'] = self._proc
+
+        if self._loop.get_debug():
+            if isinstance(args, (bytes, str)):
+                program = args
+            else:
+                program = args[0]
+            logger.debug('process %r created: pid %s',
+                         program, self._pid)
+
+        self._loop.create_task(self._connect_pipes(waiter))
+
+    def __repr__(self):
+        info = [self.__class__.__name__]
+        if self._closed:
+            info.append('closed')
+        info.append('pid=%s' % self._pid)
+        if self._returncode is not None:
+            info.append('returncode=%s' % self._returncode)
+
+        stdin = self._pipes.get(0)
+        if stdin is not None:
+            info.append('stdin=%s' % stdin.pipe)
+
+        stdout = self._pipes.get(1)
+        stderr = self._pipes.get(2)
+        if stdout is not None and stderr is stdout:
+            info.append('stdout=stderr=%s' % stdout.pipe)
+        else:
+            if stdout is not None:
+                info.append('stdout=%s' % stdout.pipe)
+            if stderr is not None:
+                info.append('stderr=%s' % stderr.pipe)
+
+        return '<%s>' % ' '.join(info)
+
+    def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
+        raise NotImplementedError
+
+    def _make_write_subprocess_pipe_proto(self, fd):
+        raise NotImplementedError
+
+    def _make_read_subprocess_pipe_proto(self, fd):
+        raise NotImplementedError
+
+    def close(self):
+        if self._closed:
+            return
+        self._closed = True
+
+        for proto in self._pipes.values():
+            if proto is None:
+                continue
+            proto.pipe.close()
+
+        if self._proc is not None and self._returncode is None:
+            if self._loop.get_debug():
+                logger.warning('Close running child process: kill %r', self)
+
+            try:
+                self._proc.kill()
+            except ProcessLookupError:
+                pass
+
+            # Don't clear the _proc reference yet: _post_init() may still run
+
+    # On Python 3.3 and older, objects with a destructor part of a reference
+    # cycle are never destroyed. It's not more the case on Python 3.4 thanks
+    # to the PEP 442.
+    if sys.version_info >= (3, 4):
+        def __del__(self):
+            if not self._closed:
+                warnings.warn("unclosed transport %r" % self, ResourceWarning)
+                self.close()
+
+    def get_pid(self):
+        return self._pid
+
+    def get_returncode(self):
+        return self._returncode
+
+    def get_pipe_transport(self, fd):
+        if fd in self._pipes:
+            return self._pipes[fd].pipe
+        else:
+            return None
+
+    def _check_proc(self):
+        if self._proc is None:
+            raise ProcessLookupError()
+
+    def send_signal(self, signal):
+        self._check_proc()
+        self._proc.send_signal(signal)
+
+    def terminate(self):
+        self._check_proc()
+        self._proc.terminate()
+
+    def kill(self):
+        self._check_proc()
+        self._proc.kill()
+
+    @coroutine
+    def _connect_pipes(self, waiter):
+        try:
+            proc = self._proc
+            loop = self._loop
+
+            if proc.stdin is not None:
+                _, pipe = yield from loop.connect_write_pipe(
+                    lambda: WriteSubprocessPipeProto(self, 0),
+                    proc.stdin)
+                self._pipes[0] = pipe
+
+            if proc.stdout is not None:
+                _, pipe = yield from loop.connect_read_pipe(
+                    lambda: ReadSubprocessPipeProto(self, 1),
+                    proc.stdout)
+                self._pipes[1] = pipe
+
+            if proc.stderr is not None:
+                _, pipe = yield from loop.connect_read_pipe(
+                    lambda: ReadSubprocessPipeProto(self, 2),
+                    proc.stderr)
+                self._pipes[2] = pipe
+
+            assert self._pending_calls is not None
+
+            loop.call_soon(self._protocol.connection_made, self)
+            for callback, data in self._pending_calls:
+                loop.call_soon(callback, *data)
+            self._pending_calls = None
+        except Exception as exc:
+            if waiter is not None and not waiter.cancelled():
+                waiter.set_exception(exc)
+        else:
+            if waiter is not None and not waiter.cancelled():
+                waiter.set_result(None)
+
+    def _call(self, cb, *data):
+        if self._pending_calls is not None:
+            self._pending_calls.append((cb, data))
+        else:
+            self._loop.call_soon(cb, *data)
+
+    def _pipe_connection_lost(self, fd, exc):
+        self._call(self._protocol.pipe_connection_lost, fd, exc)
+        self._try_finish()
+
+    def _pipe_data_received(self, fd, data):
+        self._call(self._protocol.pipe_data_received, fd, data)
+
+    def _process_exited(self, returncode):
+        assert returncode is not None, returncode
+        assert self._returncode is None, self._returncode
+        if self._loop.get_debug():
+            logger.info('%r exited with return code %r',
+                        self, returncode)
+        self._returncode = returncode
+        self._call(self._protocol.process_exited)
+        self._try_finish()
+
+        # wake up futures waiting for wait()
+        for waiter in self._exit_waiters:
+            if not waiter.cancelled():
+                waiter.set_result(returncode)
+        self._exit_waiters = None
+
+    def _wait(self):
+        """Wait until the process exit and return the process return code.
+
+        This method is a coroutine."""
+        if self._returncode is not None:
+            return self._returncode
+
+        waiter = futures.Future(loop=self._loop)
+        self._exit_waiters.append(waiter)
+        return (yield from waiter)
+
+    def _try_finish(self):
+        assert not self._finished
+        if self._returncode is None:
+            return
+        if all(p is not None and p.disconnected
+               for p in self._pipes.values()):
+            self._finished = True
+            self._call(self._call_connection_lost, None)
+
+    def _call_connection_lost(self, exc):
+        try:
+            self._protocol.connection_lost(exc)
+        finally:
+            self._loop = None
+            self._proc = None
+            self._protocol = None
+
+
+class WriteSubprocessPipeProto(protocols.BaseProtocol):
+
+    def __init__(self, proc, fd):
+        self.proc = proc
+        self.fd = fd
+        self.pipe = None
+        self.disconnected = False
+
+    def connection_made(self, transport):
+        self.pipe = transport
+
+    def __repr__(self):
+        return ('<%s fd=%s pipe=%r>'
+                % (self.__class__.__name__, self.fd, self.pipe))
+
+    def connection_lost(self, exc):
+        self.disconnected = True
+        self.proc._pipe_connection_lost(self.fd, exc)
+        self.proc = None
+
+    def pause_writing(self):
+        self.proc._protocol.pause_writing()
+
+    def resume_writing(self):
+        self.proc._protocol.resume_writing()
+
+
+class ReadSubprocessPipeProto(WriteSubprocessPipeProto,
+                              protocols.Protocol):
+
+    def data_received(self, data):
+        self.proc._pipe_data_received(self.fd, data)

+ 7 - 0
env/Lib/site-packages/asyncio/constants.py

@@ -0,0 +1,7 @@
+"""Constants."""
+
+# After the connection is lost, log warnings after this many write()s.
+LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5
+
+# Seconds to wait before retrying accept().
+ACCEPT_RETRY_DELAY = 1

+ 199 - 0
env/Lib/site-packages/asyncio/coroutines.py

@@ -0,0 +1,199 @@
+__all__ = ['coroutine',
+           'iscoroutinefunction', 'iscoroutine']
+
+import functools
+import inspect
+import opcode
+import os
+import sys
+import traceback
+import types
+
+from . import events
+from . import futures
+from .log import logger
+
+
+# Opcode of "yield from" instruction
+_YIELD_FROM = opcode.opmap['YIELD_FROM']
+
+# If you set _DEBUG to true, @coroutine will wrap the resulting
+# generator objects in a CoroWrapper instance (defined below).  That
+# instance will log a message when the generator is never iterated
+# over, which may happen when you forget to use "yield from" with a
+# coroutine call.  Note that the value of the _DEBUG flag is taken
+# when the decorator is used, so to be of any use it must be set
+# before you define your coroutines.  A downside of using this feature
+# is that tracebacks show entries for the CoroWrapper.__next__ method
+# when _DEBUG is true.
+_DEBUG = (not sys.flags.ignore_environment
+          and bool(os.environ.get('PYTHONASYNCIODEBUG')))
+
+
+# Check for CPython issue #21209
+def has_yield_from_bug():
+    class MyGen:
+        def __init__(self):
+            self.send_args = None
+        def __iter__(self):
+            return self
+        def __next__(self):
+            return 42
+        def send(self, *what):
+            self.send_args = what
+            return None
+    def yield_from_gen(gen):
+        yield from gen
+    value = (1, 2, 3)
+    gen = MyGen()
+    coro = yield_from_gen(gen)
+    next(coro)
+    coro.send(value)
+    return gen.send_args != (value,)
+_YIELD_FROM_BUG = has_yield_from_bug()
+del has_yield_from_bug
+
+
+class CoroWrapper:
+    # Wrapper for coroutine object in _DEBUG mode.
+
+    def __init__(self, gen, func):
+        assert inspect.isgenerator(gen), gen
+        self.gen = gen
+        self.func = func
+        self._source_traceback = traceback.extract_stack(sys._getframe(1))
+        # __name__, __qualname__, __doc__ attributes are set by the coroutine()
+        # decorator
+
+    def __repr__(self):
+        coro_repr = _format_coroutine(self)
+        if self._source_traceback:
+            frame = self._source_traceback[-1]
+            coro_repr += ', created at %s:%s' % (frame[0], frame[1])
+        return '<%s %s>' % (self.__class__.__name__, coro_repr)
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        return next(self.gen)
+
+    if _YIELD_FROM_BUG:
+        # For for CPython issue #21209: using "yield from" and a custom
+        # generator, generator.send(tuple) unpacks the tuple instead of passing
+        # the tuple unchanged. Check if the caller is a generator using "yield
+        # from" to decide if the parameter should be unpacked or not.
+        def send(self, *value):
+            frame = sys._getframe()
+            caller = frame.f_back
+            assert caller.f_lasti >= 0
+            if caller.f_code.co_code[caller.f_lasti] != _YIELD_FROM:
+                value = value[0]
+            return self.gen.send(value)
+    else:
+        def send(self, value):
+            return self.gen.send(value)
+
+    def throw(self, exc):
+        return self.gen.throw(exc)
+
+    def close(self):
+        return self.gen.close()
+
+    @property
+    def gi_frame(self):
+        return self.gen.gi_frame
+
+    @property
+    def gi_running(self):
+        return self.gen.gi_running
+
+    @property
+    def gi_code(self):
+        return self.gen.gi_code
+
+    def __del__(self):
+        # Be careful accessing self.gen.frame -- self.gen might not exist.
+        gen = getattr(self, 'gen', None)
+        frame = getattr(gen, 'gi_frame', None)
+        if frame is not None and frame.f_lasti == -1:
+            msg = '%r was never yielded from' % self
+            tb = getattr(self, '_source_traceback', ())
+            if tb:
+                tb = ''.join(traceback.format_list(tb))
+                msg += ('\nCoroutine object created at '
+                        '(most recent call last):\n')
+                msg += tb.rstrip()
+            logger.error(msg)
+
+
+def coroutine(func):
+    """Decorator to mark coroutines.
+
+    If the coroutine is not yielded from before it is destroyed,
+    an error message is logged.
+    """
+    if inspect.isgeneratorfunction(func):
+        coro = func
+    else:
+        @functools.wraps(func)
+        def coro(*args, **kw):
+            res = func(*args, **kw)
+            if isinstance(res, futures.Future) or inspect.isgenerator(res):
+                res = yield from res
+            return res
+
+    if not _DEBUG:
+        wrapper = coro
+    else:
+        @functools.wraps(func)
+        def wrapper(*args, **kwds):
+            w = CoroWrapper(coro(*args, **kwds), func)
+            if w._source_traceback:
+                del w._source_traceback[-1]
+            w.__name__ = func.__name__
+            if hasattr(func, '__qualname__'):
+                w.__qualname__ = func.__qualname__
+            w.__doc__ = func.__doc__
+            return w
+
+    wrapper._is_coroutine = True  # For iscoroutinefunction().
+    return wrapper
+
+
+def iscoroutinefunction(func):
+    """Return True if func is a decorated coroutine function."""
+    return getattr(func, '_is_coroutine', False)
+
+
+_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper)
+
+def iscoroutine(obj):
+    """Return True if obj is a coroutine object."""
+    return isinstance(obj, _COROUTINE_TYPES)
+
+
+def _format_coroutine(coro):
+    assert iscoroutine(coro)
+    coro_name = getattr(coro, '__qualname__', coro.__name__)
+
+    filename = coro.gi_code.co_filename
+    if (isinstance(coro, CoroWrapper)
+    and not inspect.isgeneratorfunction(coro.func)):
+        filename, lineno = events._get_function_source(coro.func)
+        if coro.gi_frame is None:
+            coro_repr = ('%s() done, defined at %s:%s'
+                         % (coro_name, filename, lineno))
+        else:
+            coro_repr = ('%s() running, defined at %s:%s'
+                         % (coro_name, filename, lineno))
+    elif coro.gi_frame is not None:
+        lineno = coro.gi_frame.f_lineno
+        coro_repr = ('%s() running at %s:%s'
+                     % (coro_name, filename, lineno))
+    else:
+        lineno = coro.gi_code.co_firstlineno
+        coro_repr = ('%s() done, defined at %s:%s'
+                     % (coro_name, filename, lineno))
+
+    return coro_repr

+ 597 - 0
env/Lib/site-packages/asyncio/events.py

@@ -0,0 +1,597 @@
+"""Event loop and event loop policy."""
+
+__all__ = ['AbstractEventLoopPolicy',
+           'AbstractEventLoop', 'AbstractServer',
+           'Handle', 'TimerHandle',
+           'get_event_loop_policy', 'set_event_loop_policy',
+           'get_event_loop', 'set_event_loop', 'new_event_loop',
+           'get_child_watcher', 'set_child_watcher',
+           ]
+
+import functools
+import inspect
+import reprlib
+import socket
+import subprocess
+import sys
+import threading
+import traceback
+
+
+_PY34 = sys.version_info >= (3, 4)
+
+
+def _get_function_source(func):
+    if _PY34:
+        func = inspect.unwrap(func)
+    elif hasattr(func, '__wrapped__'):
+        func = func.__wrapped__
+    if inspect.isfunction(func):
+        code = func.__code__
+        return (code.co_filename, code.co_firstlineno)
+    if isinstance(func, functools.partial):
+        return _get_function_source(func.func)
+    if _PY34 and isinstance(func, functools.partialmethod):
+        return _get_function_source(func.func)
+    return None
+
+
+def _format_args(args):
+    """Format function arguments.
+
+    Special case for a single parameter: ('hello',) is formatted as ('hello').
+    """
+    # use reprlib to limit the length of the output
+    args_repr = reprlib.repr(args)
+    if len(args) == 1 and args_repr.endswith(',)'):
+        args_repr = args_repr[:-2] + ')'
+    return args_repr
+
+
+def _format_callback(func, args, suffix=''):
+    if isinstance(func, functools.partial):
+        if args is not None:
+            suffix = _format_args(args) + suffix
+        return _format_callback(func.func, func.args, suffix)
+
+    func_repr = getattr(func, '__qualname__', None)
+    if not func_repr:
+        func_repr = repr(func)
+
+    if args is not None:
+        func_repr += _format_args(args)
+    if suffix:
+        func_repr += suffix
+
+    source = _get_function_source(func)
+    if source:
+        func_repr += ' at %s:%s' % source
+    return func_repr
+
+
+class Handle:
+    """Object returned by callback registration methods."""
+
+    __slots__ = ('_callback', '_args', '_cancelled', '_loop',
+                 '_source_traceback', '_repr', '__weakref__')
+
+    def __init__(self, callback, args, loop):
+        assert not isinstance(callback, Handle), 'A Handle is not a callback'
+        self._loop = loop
+        self._callback = callback
+        self._args = args
+        self._cancelled = False
+        self._repr = None
+        if self._loop.get_debug():
+            self._source_traceback = traceback.extract_stack(sys._getframe(1))
+        else:
+            self._source_traceback = None
+
+    def _repr_info(self):
+        info = [self.__class__.__name__]
+        if self._cancelled:
+            info.append('cancelled')
+        if self._callback is not None:
+            info.append(_format_callback(self._callback, self._args))
+        if self._source_traceback:
+            frame = self._source_traceback[-1]
+            info.append('created at %s:%s' % (frame[0], frame[1]))
+        return info
+
+    def __repr__(self):
+        if self._repr is not None:
+            return self._repr
+        info = self._repr_info()
+        return '<%s>' % ' '.join(info)
+
+    def cancel(self):
+        if not self._cancelled:
+            self._cancelled = True
+            if self._loop.get_debug():
+                # Keep a representation in debug mode to keep callback and
+                # parameters. For example, to log the warning
+                # "Executing <Handle...> took 2.5 second"
+                self._repr = repr(self)
+            self._callback = None
+            self._args = None
+
+    def _run(self):
+        try:
+            self._callback(*self._args)
+        except Exception as exc:
+            cb = _format_callback(self._callback, self._args)
+            msg = 'Exception in callback {}'.format(cb)
+            context = {
+                'message': msg,
+                'exception': exc,
+                'handle': self,
+            }
+            if self._source_traceback:
+                context['source_traceback'] = self._source_traceback
+            self._loop.call_exception_handler(context)
+        self = None  # Needed to break cycles when an exception occurs.
+
+
+class TimerHandle(Handle):
+    """Object returned by timed callback registration methods."""
+
+    __slots__ = ['_scheduled', '_when']
+
+    def __init__(self, when, callback, args, loop):
+        assert when is not None
+        super().__init__(callback, args, loop)
+        if self._source_traceback:
+            del self._source_traceback[-1]
+        self._when = when
+        self._scheduled = False
+
+    def _repr_info(self):
+        info = super()._repr_info()
+        pos = 2 if self._cancelled else 1
+        info.insert(pos, 'when=%s' % self._when)
+        return info
+
+    def __hash__(self):
+        return hash(self._when)
+
+    def __lt__(self, other):
+        return self._when < other._when
+
+    def __le__(self, other):
+        if self._when < other._when:
+            return True
+        return self.__eq__(other)
+
+    def __gt__(self, other):
+        return self._when > other._when
+
+    def __ge__(self, other):
+        if self._when > other._when:
+            return True
+        return self.__eq__(other)
+
+    def __eq__(self, other):
+        if isinstance(other, TimerHandle):
+            return (self._when == other._when and
+                    self._callback == other._callback and
+                    self._args == other._args and
+                    self._cancelled == other._cancelled)
+        return NotImplemented
+
+    def __ne__(self, other):
+        equal = self.__eq__(other)
+        return NotImplemented if equal is NotImplemented else not equal
+
+    def cancel(self):
+        if not self._cancelled:
+            self._loop._timer_handle_cancelled(self)
+        super().cancel()
+
+
+class AbstractServer:
+    """Abstract server returned by create_server()."""
+
+    def close(self):
+        """Stop serving.  This leaves existing connections open."""
+        return NotImplemented
+
+    def wait_closed(self):
+        """Coroutine to wait until service is closed."""
+        return NotImplemented
+
+
+class AbstractEventLoop:
+    """Abstract event loop."""
+
+    # Running and stopping the event loop.
+
+    def run_forever(self):
+        """Run the event loop until stop() is called."""
+        raise NotImplementedError
+
+    def run_until_complete(self, future):
+        """Run the event loop until a Future is done.
+
+        Return the Future's result, or raise its exception.
+        """
+        raise NotImplementedError
+
+    def stop(self):
+        """Stop the event loop as soon as reasonable.
+
+        Exactly how soon that is may depend on the implementation, but
+        no more I/O callbacks should be scheduled.
+        """
+        raise NotImplementedError
+
+    def is_running(self):
+        """Return whether the event loop is currently running."""
+        raise NotImplementedError
+
+    def is_closed(self):
+        """Returns True if the event loop was closed."""
+        raise NotImplementedError
+
+    def close(self):
+        """Close the loop.
+
+        The loop should not be running.
+
+        This is idempotent and irreversible.
+
+        No other methods should be called after this one.
+        """
+        raise NotImplementedError
+
+    # Methods scheduling callbacks.  All these return Handles.
+
+    def _timer_handle_cancelled(self, handle):
+        """Notification that a TimerHandle has been cancelled."""
+        raise NotImplementedError
+
+    def call_soon(self, callback, *args):
+        return self.call_later(0, callback, *args)
+
+    def call_later(self, delay, callback, *args):
+        raise NotImplementedError
+
+    def call_at(self, when, callback, *args):
+        raise NotImplementedError
+
+    def time(self):
+        raise NotImplementedError
+
+    # Method scheduling a coroutine object: create a task.
+
+    def create_task(self, coro):
+        raise NotImplementedError
+
+    # Methods for interacting with threads.
+
+    def call_soon_threadsafe(self, callback, *args):
+        raise NotImplementedError
+
+    def run_in_executor(self, executor, callback, *args):
+        raise NotImplementedError
+
+    def set_default_executor(self, executor):
+        raise NotImplementedError
+
+    # Network I/O methods returning Futures.
+
+    def getaddrinfo(self, host, port, *, family=0, type=0, proto=0, flags=0):
+        raise NotImplementedError
+
+    def getnameinfo(self, sockaddr, flags=0):
+        raise NotImplementedError
+
+    def create_connection(self, protocol_factory, host=None, port=None, *,
+                          ssl=None, family=0, proto=0, flags=0, sock=None,
+                          local_addr=None, server_hostname=None):
+        raise NotImplementedError
+
+    def create_server(self, protocol_factory, host=None, port=None, *,
+                      family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
+                      sock=None, backlog=100, ssl=None, reuse_address=None):
+        """A coroutine which creates a TCP server bound to host and port.
+
+        The return value is a Server object which can be used to stop
+        the service.
+
+        If host is an empty string or None all interfaces are assumed
+        and a list of multiple sockets will be returned (most likely
+        one for IPv4 and another one for IPv6).
+
+        family can be set to either AF_INET or AF_INET6 to force the
+        socket to use IPv4 or IPv6. If not set it will be determined
+        from host (defaults to AF_UNSPEC).
+
+        flags is a bitmask for getaddrinfo().
+
+        sock can optionally be specified in order to use a preexisting
+        socket object.
+
+        backlog is the maximum number of queued connections passed to
+        listen() (defaults to 100).
+
+        ssl can be set to an SSLContext to enable SSL over the
+        accepted connections.
+
+        reuse_address tells the kernel to reuse a local socket in
+        TIME_WAIT state, without waiting for its natural timeout to
+        expire. If not specified will automatically be set to True on
+        UNIX.
+        """
+        raise NotImplementedError
+
+    def create_unix_connection(self, protocol_factory, path, *,
+                               ssl=None, sock=None,
+                               server_hostname=None):
+        raise NotImplementedError
+
+    def create_unix_server(self, protocol_factory, path, *,
+                           sock=None, backlog=100, ssl=None):
+        """A coroutine which creates a UNIX Domain Socket server.
+
+        The return value is a Server object, which can be used to stop
+        the service.
+
+        path is a str, representing a file systsem path to bind the
+        server socket to.
+
+        sock can optionally be specified in order to use a preexisting
+        socket object.
+
+        backlog is the maximum number of queued connections passed to
+        listen() (defaults to 100).
+
+        ssl can be set to an SSLContext to enable SSL over the
+        accepted connections.
+        """
+        raise NotImplementedError
+
+    def create_datagram_endpoint(self, protocol_factory,
+                                 local_addr=None, remote_addr=None, *,
+                                 family=0, proto=0, flags=0):
+        raise NotImplementedError
+
+    # Pipes and subprocesses.
+
+    def connect_read_pipe(self, protocol_factory, pipe):
+        """Register read pipe in event loop. Set the pipe to non-blocking mode.
+
+        protocol_factory should instantiate object with Protocol interface.
+        pipe is a file-like object.
+        Return pair (transport, protocol), where transport supports the
+        ReadTransport interface."""
+        # The reason to accept file-like object instead of just file descriptor
+        # is: we need to own pipe and close it at transport finishing
+        # Can got complicated errors if pass f.fileno(),
+        # close fd in pipe transport then close f and vise versa.
+        raise NotImplementedError
+
+    def connect_write_pipe(self, protocol_factory, pipe):
+        """Register write pipe in event loop.
+
+        protocol_factory should instantiate object with BaseProtocol interface.
+        Pipe is file-like object already switched to nonblocking.
+        Return pair (transport, protocol), where transport support
+        WriteTransport interface."""
+        # The reason to accept file-like object instead of just file descriptor
+        # is: we need to own pipe and close it at transport finishing
+        # Can got complicated errors if pass f.fileno(),
+        # close fd in pipe transport then close f and vise versa.
+        raise NotImplementedError
+
+    def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
+                         stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+                         **kwargs):
+        raise NotImplementedError
+
+    def subprocess_exec(self, protocol_factory, *args, stdin=subprocess.PIPE,
+                        stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+                        **kwargs):
+        raise NotImplementedError
+
+    # Ready-based callback registration methods.
+    # The add_*() methods return None.
+    # The remove_*() methods return True if something was removed,
+    # False if there was nothing to delete.
+
+    def add_reader(self, fd, callback, *args):
+        raise NotImplementedError
+
+    def remove_reader(self, fd):
+        raise NotImplementedError
+
+    def add_writer(self, fd, callback, *args):
+        raise NotImplementedError
+
+    def remove_writer(self, fd):
+        raise NotImplementedError
+
+    # Completion based I/O methods returning Futures.
+
+    def sock_recv(self, sock, nbytes):
+        raise NotImplementedError
+
+    def sock_sendall(self, sock, data):
+        raise NotImplementedError
+
+    def sock_connect(self, sock, address):
+        raise NotImplementedError
+
+    def sock_accept(self, sock):
+        raise NotImplementedError
+
+    # Signal handling.
+
+    def add_signal_handler(self, sig, callback, *args):
+        raise NotImplementedError
+
+    def remove_signal_handler(self, sig):
+        raise NotImplementedError
+
+    # Error handlers.
+
+    def set_exception_handler(self, handler):
+        raise NotImplementedError
+
+    def default_exception_handler(self, context):
+        raise NotImplementedError
+
+    def call_exception_handler(self, context):
+        raise NotImplementedError
+
+    # Debug flag management.
+
+    def get_debug(self):
+        raise NotImplementedError
+
+    def set_debug(self, enabled):
+        raise NotImplementedError
+
+
+class AbstractEventLoopPolicy:
+    """Abstract policy for accessing the event loop."""
+
+    def get_event_loop(self):
+        """Get the event loop for the current context.
+
+        Returns an event loop object implementing the BaseEventLoop interface,
+        or raises an exception in case no event loop has been set for the
+        current context and the current policy does not specify to create one.
+
+        It should never return None."""
+        raise NotImplementedError
+
+    def set_event_loop(self, loop):
+        """Set the event loop for the current context to loop."""
+        raise NotImplementedError
+
+    def new_event_loop(self):
+        """Create and return a new event loop object according to this
+        policy's rules. If there's need to set this loop as the event loop for
+        the current context, set_event_loop must be called explicitly."""
+        raise NotImplementedError
+
+    # Child processes handling (Unix only).
+
+    def get_child_watcher(self):
+        "Get the watcher for child processes."
+        raise NotImplementedError
+
+    def set_child_watcher(self, watcher):
+        """Set the watcher for child processes."""
+        raise NotImplementedError
+
+
+class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
+    """Default policy implementation for accessing the event loop.
+
+    In this policy, each thread has its own event loop.  However, we
+    only automatically create an event loop by default for the main
+    thread; other threads by default have no event loop.
+
+    Other policies may have different rules (e.g. a single global
+    event loop, or automatically creating an event loop per thread, or
+    using some other notion of context to which an event loop is
+    associated).
+    """
+
+    _loop_factory = None
+
+    class _Local(threading.local):
+        _loop = None
+        _set_called = False
+
+    def __init__(self):
+        self._local = self._Local()
+
+    def get_event_loop(self):
+        """Get the event loop.
+
+        This may be None or an instance of EventLoop.
+        """
+        if (self._local._loop is None and
+            not self._local._set_called and
+            isinstance(threading.current_thread(), threading._MainThread)):
+            self.set_event_loop(self.new_event_loop())
+        if self._local._loop is None:
+            raise RuntimeError('There is no current event loop in thread %r.'
+                               % threading.current_thread().name)
+        return self._local._loop
+
+    def set_event_loop(self, loop):
+        """Set the event loop."""
+        self._local._set_called = True
+        assert loop is None or isinstance(loop, AbstractEventLoop)
+        self._local._loop = loop
+
+    def new_event_loop(self):
+        """Create a new event loop.
+
+        You must call set_event_loop() to make this the current event
+        loop.
+        """
+        return self._loop_factory()
+
+
+# Event loop policy.  The policy itself is always global, even if the
+# policy's rules say that there is an event loop per thread (or other
+# notion of context).  The default policy is installed by the first
+# call to get_event_loop_policy().
+_event_loop_policy = None
+
+# Lock for protecting the on-the-fly creation of the event loop policy.
+_lock = threading.Lock()
+
+
+def _init_event_loop_policy():
+    global _event_loop_policy
+    with _lock:
+        if _event_loop_policy is None:  # pragma: no branch
+            from . import DefaultEventLoopPolicy
+            _event_loop_policy = DefaultEventLoopPolicy()
+
+
+def get_event_loop_policy():
+    """Get the current event loop policy."""
+    if _event_loop_policy is None:
+        _init_event_loop_policy()
+    return _event_loop_policy
+
+
+def set_event_loop_policy(policy):
+    """Set the current event loop policy.
+
+    If policy is None, the default policy is restored."""
+    global _event_loop_policy
+    assert policy is None or isinstance(policy, AbstractEventLoopPolicy)
+    _event_loop_policy = policy
+
+
+def get_event_loop():
+    """Equivalent to calling get_event_loop_policy().get_event_loop()."""
+    return get_event_loop_policy().get_event_loop()
+
+
+def set_event_loop(loop):
+    """Equivalent to calling get_event_loop_policy().set_event_loop(loop)."""
+    get_event_loop_policy().set_event_loop(loop)
+
+
+def new_event_loop():
+    """Equivalent to calling get_event_loop_policy().new_event_loop()."""
+    return get_event_loop_policy().new_event_loop()
+
+
+def get_child_watcher():
+    """Equivalent to calling get_event_loop_policy().get_child_watcher()."""
+    return get_event_loop_policy().get_child_watcher()
+
+
+def set_child_watcher(watcher):
+    """Equivalent to calling
+    get_event_loop_policy().set_child_watcher(watcher)."""
+    return get_event_loop_policy().set_child_watcher(watcher)

+ 409 - 0
env/Lib/site-packages/asyncio/futures.py

@@ -0,0 +1,409 @@
+"""A Future class similar to the one in PEP 3148."""
+
+__all__ = ['CancelledError', 'TimeoutError',
+           'InvalidStateError',
+           'Future', 'wrap_future',
+           ]
+
+import concurrent.futures._base
+import logging
+import reprlib
+import sys
+import traceback
+
+from . import events
+
+# States for Future.
+_PENDING = 'PENDING'
+_CANCELLED = 'CANCELLED'
+_FINISHED = 'FINISHED'
+
+_PY34 = sys.version_info >= (3, 4)
+
+Error = concurrent.futures._base.Error
+CancelledError = concurrent.futures.CancelledError
+TimeoutError = concurrent.futures.TimeoutError
+
+STACK_DEBUG = logging.DEBUG - 1  # heavy-duty debugging
+
+
+class InvalidStateError(Error):
+    """The operation is not allowed in this state."""
+
+
+class _TracebackLogger:
+    """Helper to log a traceback upon destruction if not cleared.
+
+    This solves a nasty problem with Futures and Tasks that have an
+    exception set: if nobody asks for the exception, the exception is
+    never logged.  This violates the Zen of Python: 'Errors should
+    never pass silently.  Unless explicitly silenced.'
+
+    However, we don't want to log the exception as soon as
+    set_exception() is called: if the calling code is written
+    properly, it will get the exception and handle it properly.  But
+    we *do* want to log it if result() or exception() was never called
+    -- otherwise developers waste a lot of time wondering why their
+    buggy code fails silently.
+
+    An earlier attempt added a __del__() method to the Future class
+    itself, but this backfired because the presence of __del__()
+    prevents garbage collection from breaking cycles.  A way out of
+    this catch-22 is to avoid having a __del__() method on the Future
+    class itself, but instead to have a reference to a helper object
+    with a __del__() method that logs the traceback, where we ensure
+    that the helper object doesn't participate in cycles, and only the
+    Future has a reference to it.
+
+    The helper object is added when set_exception() is called.  When
+    the Future is collected, and the helper is present, the helper
+    object is also collected, and its __del__() method will log the
+    traceback.  When the Future's result() or exception() method is
+    called (and a helper object is present), it removes the helper
+    object, after calling its clear() method to prevent it from
+    logging.
+
+    One downside is that we do a fair amount of work to extract the
+    traceback from the exception, even when it is never logged.  It
+    would seem cheaper to just store the exception object, but that
+    references the traceback, which references stack frames, which may
+    reference the Future, which references the _TracebackLogger, and
+    then the _TracebackLogger would be included in a cycle, which is
+    what we're trying to avoid!  As an optimization, we don't
+    immediately format the exception; we only do the work when
+    activate() is called, which call is delayed until after all the
+    Future's callbacks have run.  Since usually a Future has at least
+    one callback (typically set by 'yield from') and usually that
+    callback extracts the callback, thereby removing the need to
+    format the exception.
+
+    PS. I don't claim credit for this solution.  I first heard of it
+    in a discussion about closing files when they are collected.
+    """
+
+    __slots__ = ('loop', 'source_traceback', 'exc', 'tb')
+
+    def __init__(self, future, exc):
+        self.loop = future._loop
+        self.source_traceback = future._source_traceback
+        self.exc = exc
+        self.tb = None
+
+    def activate(self):
+        exc = self.exc
+        if exc is not None:
+            self.exc = None
+            self.tb = traceback.format_exception(exc.__class__, exc,
+                                                 exc.__traceback__)
+
+    def clear(self):
+        self.exc = None
+        self.tb = None
+
+    def __del__(self):
+        if self.tb:
+            msg = 'Future/Task exception was never retrieved\n'
+            if self.source_traceback:
+                src = ''.join(traceback.format_list(self.source_traceback))
+                msg += 'Future/Task created at (most recent call last):\n'
+                msg += '%s\n' % src.rstrip()
+            msg += ''.join(self.tb).rstrip()
+            self.loop.call_exception_handler({'message': msg})
+
+
+class Future:
+    """This class is *almost* compatible with concurrent.futures.Future.
+
+    Differences:
+
+    - result() and exception() do not take a timeout argument and
+      raise an exception when the future isn't done yet.
+
+    - Callbacks registered with add_done_callback() are always called
+      via the event loop's call_soon_threadsafe().
+
+    - This class is not compatible with the wait() and as_completed()
+      methods in the concurrent.futures package.
+
+    (In Python 3.4 or later we may be able to unify the implementations.)
+    """
+
+    # Class variables serving as defaults for instance variables.
+    _state = _PENDING
+    _result = None
+    _exception = None
+    _loop = None
+    _source_traceback = None
+
+    _blocking = False  # proper use of future (yield vs yield from)
+
+    _log_traceback = False   # Used for Python 3.4 and later
+    _tb_logger = None        # Used for Python 3.3 only
+
+    def __init__(self, *, loop=None):
+        """Initialize the future.
+
+        The optional event_loop argument allows to explicitly set the event
+        loop object used by the future. If it's not provided, the future uses
+        the default event loop.
+        """
+        if loop is None:
+            self._loop = events.get_event_loop()
+        else:
+            self._loop = loop
+        self._callbacks = []
+        if self._loop.get_debug():
+            self._source_traceback = traceback.extract_stack(sys._getframe(1))
+
+    def _format_callbacks(self):
+        cb = self._callbacks
+        size = len(cb)
+        if not size:
+            cb = ''
+
+        def format_cb(callback):
+            return events._format_callback(callback, ())
+
+        if size == 1:
+            cb = format_cb(cb[0])
+        elif size == 2:
+            cb = '{}, {}'.format(format_cb(cb[0]), format_cb(cb[1]))
+        elif size > 2:
+            cb = '{}, <{} more>, {}'.format(format_cb(cb[0]),
+                                            size-2,
+                                            format_cb(cb[-1]))
+        return 'cb=[%s]' % cb
+
+    def _repr_info(self):
+        info = [self._state.lower()]
+        if self._state == _FINISHED:
+            if self._exception is not None:
+                info.append('exception={!r}'.format(self._exception))
+            else:
+                # use reprlib to limit the length of the output, especially
+                # for very long strings
+                result = reprlib.repr(self._result)
+                info.append('result={}'.format(result))
+        if self._callbacks:
+            info.append(self._format_callbacks())
+        if self._source_traceback:
+            frame = self._source_traceback[-1]
+            info.append('created at %s:%s' % (frame[0], frame[1]))
+        return info
+
+    def __repr__(self):
+        info = self._repr_info()
+        return '<%s %s>' % (self.__class__.__name__, ' '.join(info))
+
+    # On Python 3.3 and older, objects with a destructor part of a reference
+    # cycle are never destroyed. It's not more the case on Python 3.4 thanks
+    # to the PEP 442.
+    if _PY34:
+        def __del__(self):
+            if not self._log_traceback:
+                # set_exception() was not called, or result() or exception()
+                # has consumed the exception
+                return
+            exc = self._exception
+            context = {
+                'message': ('%s exception was never retrieved'
+                            % self.__class__.__name__),
+                'exception': exc,
+                'future': self,
+            }
+            if self._source_traceback:
+                context['source_traceback'] = self._source_traceback
+            self._loop.call_exception_handler(context)
+
+    def cancel(self):
+        """Cancel the future and schedule callbacks.
+
+        If the future is already done or cancelled, return False.  Otherwise,
+        change the future's state to cancelled, schedule the callbacks and
+        return True.
+        """
+        if self._state != _PENDING:
+            return False
+        self._state = _CANCELLED
+        self._schedule_callbacks()
+        return True
+
+    def _schedule_callbacks(self):
+        """Internal: Ask the event loop to call all callbacks.
+
+        The callbacks are scheduled to be called as soon as possible. Also
+        clears the callback list.
+        """
+        callbacks = self._callbacks[:]
+        if not callbacks:
+            return
+
+        self._callbacks[:] = []
+        for callback in callbacks:
+            self._loop.call_soon(callback, self)
+
+    def cancelled(self):
+        """Return True if the future was cancelled."""
+        return self._state == _CANCELLED
+
+    # Don't implement running(); see http://bugs.python.org/issue18699
+
+    def done(self):
+        """Return True if the future is done.
+
+        Done means either that a result / exception are available, or that the
+        future was cancelled.
+        """
+        return self._state != _PENDING
+
+    def result(self):
+        """Return the result this future represents.
+
+        If the future has been cancelled, raises CancelledError.  If the
+        future's result isn't yet available, raises InvalidStateError.  If
+        the future is done and has an exception set, this exception is raised.
+        """
+        if self._state == _CANCELLED:
+            raise CancelledError
+        if self._state != _FINISHED:
+            raise InvalidStateError('Result is not ready.')
+        self._log_traceback = False
+        if self._tb_logger is not None:
+            self._tb_logger.clear()
+            self._tb_logger = None
+        if self._exception is not None:
+            raise self._exception
+        return self._result
+
+    def exception(self):
+        """Return the exception that was set on this future.
+
+        The exception (or None if no exception was set) is returned only if
+        the future is done.  If the future has been cancelled, raises
+        CancelledError.  If the future isn't done yet, raises
+        InvalidStateError.
+        """
+        if self._state == _CANCELLED:
+            raise CancelledError
+        if self._state != _FINISHED:
+            raise InvalidStateError('Exception is not set.')
+        self._log_traceback = False
+        if self._tb_logger is not None:
+            self._tb_logger.clear()
+            self._tb_logger = None
+        return self._exception
+
+    def add_done_callback(self, fn):
+        """Add a callback to be run when the future becomes done.
+
+        The callback is called with a single argument - the future object. If
+        the future is already done when this is called, the callback is
+        scheduled with call_soon.
+        """
+        if self._state != _PENDING:
+            self._loop.call_soon(fn, self)
+        else:
+            self._callbacks.append(fn)
+
+    # New method not in PEP 3148.
+
+    def remove_done_callback(self, fn):
+        """Remove all instances of a callback from the "call when done" list.
+
+        Returns the number of callbacks removed.
+        """
+        filtered_callbacks = [f for f in self._callbacks if f != fn]
+        removed_count = len(self._callbacks) - len(filtered_callbacks)
+        if removed_count:
+            self._callbacks[:] = filtered_callbacks
+        return removed_count
+
+    # So-called internal methods (note: no set_running_or_notify_cancel()).
+
+    def _set_result_unless_cancelled(self, result):
+        """Helper setting the result only if the future was not cancelled."""
+        if self.cancelled():
+            return
+        self.set_result(result)
+
+    def set_result(self, result):
+        """Mark the future done and set its result.
+
+        If the future is already done when this method is called, raises
+        InvalidStateError.
+        """
+        if self._state != _PENDING:
+            raise InvalidStateError('{}: {!r}'.format(self._state, self))
+        self._result = result
+        self._state = _FINISHED
+        self._schedule_callbacks()
+
+    def set_exception(self, exception):
+        """Mark the future done and set an exception.
+
+        If the future is already done when this method is called, raises
+        InvalidStateError.
+        """
+        if self._state != _PENDING:
+            raise InvalidStateError('{}: {!r}'.format(self._state, self))
+        if isinstance(exception, type):
+            exception = exception()
+        self._exception = exception
+        self._state = _FINISHED
+        self._schedule_callbacks()
+        if _PY34:
+            self._log_traceback = True
+        else:
+            self._tb_logger = _TracebackLogger(self, exception)
+            # Arrange for the logger to be activated after all callbacks
+            # have had a chance to call result() or exception().
+            self._loop.call_soon(self._tb_logger.activate)
+
+    # Truly internal methods.
+
+    def _copy_state(self, other):
+        """Internal helper to copy state from another Future.
+
+        The other Future may be a concurrent.futures.Future.
+        """
+        assert other.done()
+        if self.cancelled():
+            return
+        assert not self.done()
+        if other.cancelled():
+            self.cancel()
+        else:
+            exception = other.exception()
+            if exception is not None:
+                self.set_exception(exception)
+            else:
+                result = other.result()
+                self.set_result(result)
+
+    def __iter__(self):
+        if not self.done():
+            self._blocking = True
+            yield self  # This tells Task to wait for completion.
+        assert self.done(), "yield from wasn't used with future"
+        return self.result()  # May raise too.
+
+
+def wrap_future(fut, *, loop=None):
+    """Wrap concurrent.futures.Future object."""
+    if isinstance(fut, Future):
+        return fut
+    assert isinstance(fut, concurrent.futures.Future), \
+        'concurrent.futures.Future is expected, got {!r}'.format(fut)
+    if loop is None:
+        loop = events.get_event_loop()
+    new_future = Future(loop=loop)
+
+    def _check_cancel_other(f):
+        if f.cancelled():
+            fut.cancel()
+
+    new_future.add_done_callback(_check_cancel_other)
+    fut.add_done_callback(
+        lambda future: loop.call_soon_threadsafe(
+            new_future._copy_state, future))
+    return new_future

+ 469 - 0
env/Lib/site-packages/asyncio/locks.py

@@ -0,0 +1,469 @@
+"""Synchronization primitives."""
+
+__all__ = ['Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore']
+
+import collections
+
+from . import events
+from . import futures
+from .coroutines import coroutine
+
+
+class _ContextManager:
+    """Context manager.
+
+    This enables the following idiom for acquiring and releasing a
+    lock around a block:
+
+        with (yield from lock):
+            <block>
+
+    while failing loudly when accidentally using:
+
+        with lock:
+            <block>
+    """
+
+    def __init__(self, lock):
+        self._lock = lock
+
+    def __enter__(self):
+        # We have no use for the "as ..."  clause in the with
+        # statement for locks.
+        return None
+
+    def __exit__(self, *args):
+        try:
+            self._lock.release()
+        finally:
+            self._lock = None  # Crudely prevent reuse.
+
+
+class Lock:
+    """Primitive lock objects.
+
+    A primitive lock is a synchronization primitive that is not owned
+    by a particular coroutine when locked.  A primitive lock is in one
+    of two states, 'locked' or 'unlocked'.
+
+    It is created in the unlocked state.  It has two basic methods,
+    acquire() and release().  When the state is unlocked, acquire()
+    changes the state to locked and returns immediately.  When the
+    state is locked, acquire() blocks until a call to release() in
+    another coroutine changes it to unlocked, then the acquire() call
+    resets it to locked and returns.  The release() method should only
+    be called in the locked state; it changes the state to unlocked
+    and returns immediately.  If an attempt is made to release an
+    unlocked lock, a RuntimeError will be raised.
+
+    When more than one coroutine is blocked in acquire() waiting for
+    the state to turn to unlocked, only one coroutine proceeds when a
+    release() call resets the state to unlocked; first coroutine which
+    is blocked in acquire() is being processed.
+
+    acquire() is a coroutine and should be called with 'yield from'.
+
+    Locks also support the context management protocol.  '(yield from lock)'
+    should be used as context manager expression.
+
+    Usage:
+
+        lock = Lock()
+        ...
+        yield from lock
+        try:
+            ...
+        finally:
+            lock.release()
+
+    Context manager usage:
+
+        lock = Lock()
+        ...
+        with (yield from lock):
+             ...
+
+    Lock objects can be tested for locking state:
+
+        if not lock.locked():
+           yield from lock
+        else:
+           # lock is acquired
+           ...
+
+    """
+
+    def __init__(self, *, loop=None):
+        self._waiters = collections.deque()
+        self._locked = False
+        if loop is not None:
+            self._loop = loop
+        else:
+            self._loop = events.get_event_loop()
+
+    def __repr__(self):
+        res = super().__repr__()
+        extra = 'locked' if self._locked else 'unlocked'
+        if self._waiters:
+            extra = '{},waiters:{}'.format(extra, len(self._waiters))
+        return '<{} [{}]>'.format(res[1:-1], extra)
+
+    def locked(self):
+        """Return True if lock is acquired."""
+        return self._locked
+
+    @coroutine
+    def acquire(self):
+        """Acquire a lock.
+
+        This method blocks until the lock is unlocked, then sets it to
+        locked and returns True.
+        """
+        if not self._waiters and not self._locked:
+            self._locked = True
+            return True
+
+        fut = futures.Future(loop=self._loop)
+        self._waiters.append(fut)
+        try:
+            yield from fut
+            self._locked = True
+            return True
+        finally:
+            self._waiters.remove(fut)
+
+    def release(self):
+        """Release a lock.
+
+        When the lock is locked, reset it to unlocked, and return.
+        If any other coroutines are blocked waiting for the lock to become
+        unlocked, allow exactly one of them to proceed.
+
+        When invoked on an unlocked lock, a RuntimeError is raised.
+
+        There is no return value.
+        """
+        if self._locked:
+            self._locked = False
+            # Wake up the first waiter who isn't cancelled.
+            for fut in self._waiters:
+                if not fut.done():
+                    fut.set_result(True)
+                    break
+        else:
+            raise RuntimeError('Lock is not acquired.')
+
+    def __enter__(self):
+        raise RuntimeError(
+            '"yield from" should be used as context manager expression')
+
+    def __exit__(self, *args):
+        # This must exist because __enter__ exists, even though that
+        # always raises; that's how the with-statement works.
+        pass
+
+    def __iter__(self):
+        # This is not a coroutine.  It is meant to enable the idiom:
+        #
+        #     with (yield from lock):
+        #         <block>
+        #
+        # as an alternative to:
+        #
+        #     yield from lock.acquire()
+        #     try:
+        #         <block>
+        #     finally:
+        #         lock.release()
+        yield from self.acquire()
+        return _ContextManager(self)
+
+
+class Event:
+    """Asynchronous equivalent to threading.Event.
+
+    Class implementing event objects. An event manages a flag that can be set
+    to true with the set() method and reset to false with the clear() method.
+    The wait() method blocks until the flag is true. The flag is initially
+    false.
+    """
+
+    def __init__(self, *, loop=None):
+        self._waiters = collections.deque()
+        self._value = False
+        if loop is not None:
+            self._loop = loop
+        else:
+            self._loop = events.get_event_loop()
+
+    def __repr__(self):
+        res = super().__repr__()
+        extra = 'set' if self._value else 'unset'
+        if self._waiters:
+            extra = '{},waiters:{}'.format(extra, len(self._waiters))
+        return '<{} [{}]>'.format(res[1:-1], extra)
+
+    def is_set(self):
+        """Return True if and only if the internal flag is true."""
+        return self._value
+
+    def set(self):
+        """Set the internal flag to true. All coroutines waiting for it to
+        become true are awakened. Coroutine that call wait() once the flag is
+        true will not block at all.
+        """
+        if not self._value:
+            self._value = True
+
+            for fut in self._waiters:
+                if not fut.done():
+                    fut.set_result(True)
+
+    def clear(self):
+        """Reset the internal flag to false. Subsequently, coroutines calling
+        wait() will block until set() is called to set the internal flag
+        to true again."""
+        self._value = False
+
+    @coroutine
+    def wait(self):
+        """Block until the internal flag is true.
+
+        If the internal flag is true on entry, return True
+        immediately.  Otherwise, block until another coroutine calls
+        set() to set the flag to true, then return True.
+        """
+        if self._value:
+            return True
+
+        fut = futures.Future(loop=self._loop)
+        self._waiters.append(fut)
+        try:
+            yield from fut
+            return True
+        finally:
+            self._waiters.remove(fut)
+
+
+class Condition:
+    """Asynchronous equivalent to threading.Condition.
+
+    This class implements condition variable objects. A condition variable
+    allows one or more coroutines to wait until they are notified by another
+    coroutine.
+
+    A new Lock object is created and used as the underlying lock.
+    """
+
+    def __init__(self, lock=None, *, loop=None):
+        if loop is not None:
+            self._loop = loop
+        else:
+            self._loop = events.get_event_loop()
+
+        if lock is None:
+            lock = Lock(loop=self._loop)
+        elif lock._loop is not self._loop:
+            raise ValueError("loop argument must agree with lock")
+
+        self._lock = lock
+        # Export the lock's locked(), acquire() and release() methods.
+        self.locked = lock.locked
+        self.acquire = lock.acquire
+        self.release = lock.release
+
+        self._waiters = collections.deque()
+
+    def __repr__(self):
+        res = super().__repr__()
+        extra = 'locked' if self.locked() else 'unlocked'
+        if self._waiters:
+            extra = '{},waiters:{}'.format(extra, len(self._waiters))
+        return '<{} [{}]>'.format(res[1:-1], extra)
+
+    @coroutine
+    def wait(self):
+        """Wait until notified.
+
+        If the calling coroutine has not acquired the lock when this
+        method is called, a RuntimeError is raised.
+
+        This method releases the underlying lock, and then blocks
+        until it is awakened by a notify() or notify_all() call for
+        the same condition variable in another coroutine.  Once
+        awakened, it re-acquires the lock and returns True.
+        """
+        if not self.locked():
+            raise RuntimeError('cannot wait on un-acquired lock')
+
+        self.release()
+        try:
+            fut = futures.Future(loop=self._loop)
+            self._waiters.append(fut)
+            try:
+                yield from fut
+                return True
+            finally:
+                self._waiters.remove(fut)
+
+        finally:
+            yield from self.acquire()
+
+    @coroutine
+    def wait_for(self, predicate):
+        """Wait until a predicate becomes true.
+
+        The predicate should be a callable which result will be
+        interpreted as a boolean value.  The final predicate value is
+        the return value.
+        """
+        result = predicate()
+        while not result:
+            yield from self.wait()
+            result = predicate()
+        return result
+
+    def notify(self, n=1):
+        """By default, wake up one coroutine waiting on this condition, if any.
+        If the calling coroutine has not acquired the lock when this method
+        is called, a RuntimeError is raised.
+
+        This method wakes up at most n of the coroutines waiting for the
+        condition variable; it is a no-op if no coroutines are waiting.
+
+        Note: an awakened coroutine does not actually return from its
+        wait() call until it can reacquire the lock. Since notify() does
+        not release the lock, its caller should.
+        """
+        if not self.locked():
+            raise RuntimeError('cannot notify on un-acquired lock')
+
+        idx = 0
+        for fut in self._waiters:
+            if idx >= n:
+                break
+
+            if not fut.done():
+                idx += 1
+                fut.set_result(False)
+
+    def notify_all(self):
+        """Wake up all threads waiting on this condition. This method acts
+        like notify(), but wakes up all waiting threads instead of one. If the
+        calling thread has not acquired the lock when this method is called,
+        a RuntimeError is raised.
+        """
+        self.notify(len(self._waiters))
+
+    def __enter__(self):
+        raise RuntimeError(
+            '"yield from" should be used as context manager expression')
+
+    def __exit__(self, *args):
+        pass
+
+    def __iter__(self):
+        # See comment in Lock.__iter__().
+        yield from self.acquire()
+        return _ContextManager(self)
+
+
+class Semaphore:
+    """A Semaphore implementation.
+
+    A semaphore manages an internal counter which is decremented by each
+    acquire() call and incremented by each release() call. The counter
+    can never go below zero; when acquire() finds that it is zero, it blocks,
+    waiting until some other thread calls release().
+
+    Semaphores also support the context management protocol.
+
+    The optional argument gives the initial value for the internal
+    counter; it defaults to 1. If the value given is less than 0,
+    ValueError is raised.
+    """
+
+    def __init__(self, value=1, *, loop=None):
+        if value < 0:
+            raise ValueError("Semaphore initial value must be >= 0")
+        self._value = value
+        self._waiters = collections.deque()
+        if loop is not None:
+            self._loop = loop
+        else:
+            self._loop = events.get_event_loop()
+
+    def __repr__(self):
+        res = super().__repr__()
+        extra = 'locked' if self.locked() else 'unlocked,value:{}'.format(
+            self._value)
+        if self._waiters:
+            extra = '{},waiters:{}'.format(extra, len(self._waiters))
+        return '<{} [{}]>'.format(res[1:-1], extra)
+
+    def locked(self):
+        """Returns True if semaphore can not be acquired immediately."""
+        return self._value == 0
+
+    @coroutine
+    def acquire(self):
+        """Acquire a semaphore.
+
+        If the internal counter is larger than zero on entry,
+        decrement it by one and return True immediately.  If it is
+        zero on entry, block, waiting until some other coroutine has
+        called release() to make it larger than 0, and then return
+        True.
+        """
+        if not self._waiters and self._value > 0:
+            self._value -= 1
+            return True
+
+        fut = futures.Future(loop=self._loop)
+        self._waiters.append(fut)
+        try:
+            yield from fut
+            self._value -= 1
+            return True
+        finally:
+            self._waiters.remove(fut)
+
+    def release(self):
+        """Release a semaphore, incrementing the internal counter by one.
+        When it was zero on entry and another coroutine is waiting for it to
+        become larger than zero again, wake up that coroutine.
+        """
+        self._value += 1
+        for waiter in self._waiters:
+            if not waiter.done():
+                waiter.set_result(True)
+                break
+
+    def __enter__(self):
+        raise RuntimeError(
+            '"yield from" should be used as context manager expression')
+
+    def __exit__(self, *args):
+        pass
+
+    def __iter__(self):
+        # See comment in Lock.__iter__().
+        yield from self.acquire()
+        return _ContextManager(self)
+
+
+class BoundedSemaphore(Semaphore):
+    """A bounded semaphore implementation.
+
+    This raises ValueError in release() if it would increase the value
+    above the initial value.
+    """
+
+    def __init__(self, value=1, *, loop=None):
+        self._bound_value = value
+        super().__init__(value, loop=loop)
+
+    def release(self):
+        if self._value >= self._bound_value:
+            raise ValueError('BoundedSemaphore released too many times')
+        super().release()

+ 7 - 0
env/Lib/site-packages/asyncio/log.py

@@ -0,0 +1,7 @@
+"""Logging configuration."""
+
+import logging
+
+
+# Name the logger after the package.
+logger = logging.getLogger(__package__)

+ 547 - 0
env/Lib/site-packages/asyncio/proactor_events.py

@@ -0,0 +1,547 @@
+"""Event loop using a proactor and related classes.
+
+A proactor is a "notify-on-completion" multiplexer.  Currently a
+proactor is only implemented on Windows with IOCP.
+"""
+
+__all__ = ['BaseProactorEventLoop']
+
+import socket
+import sys
+import warnings
+
+from . import base_events
+from . import constants
+from . import futures
+from . import sslproto
+from . import transports
+from .log import logger
+
+
+class _ProactorBasePipeTransport(transports._FlowControlMixin,
+                                 transports.BaseTransport):
+    """Base class for pipe and socket transports."""
+
+    def __init__(self, loop, sock, protocol, waiter=None,
+                 extra=None, server=None):
+        super().__init__(extra, loop)
+        self._set_extra(sock)
+        self._sock = sock
+        self._protocol = protocol
+        self._server = server
+        self._buffer = None  # None or bytearray.
+        self._read_fut = None
+        self._write_fut = None
+        self._pending_write = 0
+        self._conn_lost = 0
+        self._closing = False  # Set when close() called.
+        self._eof_written = False
+        if self._server is not None:
+            self._server._attach()
+        self._loop.call_soon(self._protocol.connection_made, self)
+        if waiter is not None:
+            # only wake up the waiter when connection_made() has been called
+            self._loop.call_soon(waiter._set_result_unless_cancelled, None)
+
+    def __repr__(self):
+        info = [self.__class__.__name__]
+        if self._sock is None:
+            info.append('closed')
+        elif self._closing:
+            info.append('closing')
+        if self._sock is not None:
+            info.append('fd=%s' % self._sock.fileno())
+        if self._read_fut is not None:
+            info.append('read=%s' % self._read_fut)
+        if self._write_fut is not None:
+            info.append("write=%r" % self._write_fut)
+        if self._buffer:
+            bufsize = len(self._buffer)
+            info.append('write_bufsize=%s' % bufsize)
+        if self._eof_written:
+            info.append('EOF written')
+        return '<%s>' % ' '.join(info)
+
+    def _set_extra(self, sock):
+        self._extra['pipe'] = sock
+
+    def close(self):
+        if self._closing:
+            return
+        self._closing = True
+        self._conn_lost += 1
+        if not self._buffer and self._write_fut is None:
+            self._loop.call_soon(self._call_connection_lost, None)
+        if self._read_fut is not None:
+            self._read_fut.cancel()
+            self._read_fut = None
+
+    # On Python 3.3 and older, objects with a destructor part of a reference
+    # cycle are never destroyed. It's not more the case on Python 3.4 thanks
+    # to the PEP 442.
+    if sys.version_info >= (3, 4):
+        def __del__(self):
+            if self._sock is not None:
+                warnings.warn("unclosed transport %r" % self, ResourceWarning)
+                self.close()
+
+    def _fatal_error(self, exc, message='Fatal error on pipe transport'):
+        if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
+            if self._loop.get_debug():
+                logger.debug("%r: %s", self, message, exc_info=True)
+        else:
+            self._loop.call_exception_handler({
+                'message': message,
+                'exception': exc,
+                'transport': self,
+                'protocol': self._protocol,
+            })
+        self._force_close(exc)
+
+    def _force_close(self, exc):
+        if self._closing:
+            return
+        self._closing = True
+        self._conn_lost += 1
+        if self._write_fut:
+            self._write_fut.cancel()
+            self._write_fut = None
+        if self._read_fut:
+            self._read_fut.cancel()
+            self._read_fut = None
+        self._pending_write = 0
+        self._buffer = None
+        self._loop.call_soon(self._call_connection_lost, exc)
+
+    def _call_connection_lost(self, exc):
+        try:
+            self._protocol.connection_lost(exc)
+        finally:
+            # XXX If there is a pending overlapped read on the other
+            # end then it may fail with ERROR_NETNAME_DELETED if we
+            # just close our end.  First calling shutdown() seems to
+            # cure it, but maybe using DisconnectEx() would be better.
+            if hasattr(self._sock, 'shutdown'):
+                self._sock.shutdown(socket.SHUT_RDWR)
+            self._sock.close()
+            self._sock = None
+            server = self._server
+            if server is not None:
+                server._detach()
+                self._server = None
+
+    def get_write_buffer_size(self):
+        size = self._pending_write
+        if self._buffer is not None:
+            size += len(self._buffer)
+        return size
+
+
+class _ProactorReadPipeTransport(_ProactorBasePipeTransport,
+                                 transports.ReadTransport):
+    """Transport for read pipes."""
+
+    def __init__(self, loop, sock, protocol, waiter=None,
+                 extra=None, server=None):
+        super().__init__(loop, sock, protocol, waiter, extra, server)
+        self._paused = False
+        self._loop.call_soon(self._loop_reading)
+
+    def pause_reading(self):
+        if self._closing:
+            raise RuntimeError('Cannot pause_reading() when closing')
+        if self._paused:
+            raise RuntimeError('Already paused')
+        self._paused = True
+        if self._loop.get_debug():
+            logger.debug("%r pauses reading", self)
+
+    def resume_reading(self):
+        if not self._paused:
+            raise RuntimeError('Not paused')
+        self._paused = False
+        if self._closing:
+            return
+        self._loop.call_soon(self._loop_reading, self._read_fut)
+        if self._loop.get_debug():
+            logger.debug("%r resumes reading", self)
+
+    def _loop_reading(self, fut=None):
+        if self._paused:
+            return
+        data = None
+
+        try:
+            if fut is not None:
+                assert self._read_fut is fut or (self._read_fut is None and
+                                                 self._closing)
+                self._read_fut = None
+                data = fut.result()  # deliver data later in "finally" clause
+
+            if self._closing:
+                # since close() has been called we ignore any read data
+                data = None
+                return
+
+            if data == b'':
+                # we got end-of-file so no need to reschedule a new read
+                return
+
+            # reschedule a new read
+            self._read_fut = self._loop._proactor.recv(self._sock, 4096)
+        except ConnectionAbortedError as exc:
+            if not self._closing:
+                self._fatal_error(exc, 'Fatal read error on pipe transport')
+            elif self._loop.get_debug():
+                logger.debug("Read error on pipe transport while closing",
+                             exc_info=True)
+        except ConnectionResetError as exc:
+            self._force_close(exc)
+        except OSError as exc:
+            self._fatal_error(exc, 'Fatal read error on pipe transport')
+        except futures.CancelledError:
+            if not self._closing:
+                raise
+        else:
+            self._read_fut.add_done_callback(self._loop_reading)
+        finally:
+            if data:
+                self._protocol.data_received(data)
+            elif data is not None:
+                if self._loop.get_debug():
+                    logger.debug("%r received EOF", self)
+                keep_open = self._protocol.eof_received()
+                if not keep_open:
+                    self.close()
+
+
+class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
+                                      transports.WriteTransport):
+    """Transport for write pipes."""
+
+    def write(self, data):
+        if not isinstance(data, (bytes, bytearray, memoryview)):
+            raise TypeError('data argument must be byte-ish (%r)',
+                            type(data))
+        if self._eof_written:
+            raise RuntimeError('write_eof() already called')
+
+        if not data:
+            return
+
+        if self._conn_lost:
+            if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
+                logger.warning('socket.send() raised exception.')
+            self._conn_lost += 1
+            return
+
+        # Observable states:
+        # 1. IDLE: _write_fut and _buffer both None
+        # 2. WRITING: _write_fut set; _buffer None
+        # 3. BACKED UP: _write_fut set; _buffer a bytearray
+        # We always copy the data, so the caller can't modify it
+        # while we're still waiting for the I/O to happen.
+        if self._write_fut is None:  # IDLE -> WRITING
+            assert self._buffer is None
+            # Pass a copy, except if it's already immutable.
+            self._loop_writing(data=bytes(data))
+        elif not self._buffer:  # WRITING -> BACKED UP
+            # Make a mutable copy which we can extend.
+            self._buffer = bytearray(data)
+            self._maybe_pause_protocol()
+        else:  # BACKED UP
+            # Append to buffer (also copies).
+            self._buffer.extend(data)
+            self._maybe_pause_protocol()
+
+    def _loop_writing(self, f=None, data=None):
+        try:
+            assert f is self._write_fut
+            self._write_fut = None
+            self._pending_write = 0
+            if f:
+                f.result()
+            if data is None:
+                data = self._buffer
+                self._buffer = None
+            if not data:
+                if self._closing:
+                    self._loop.call_soon(self._call_connection_lost, None)
+                if self._eof_written:
+                    self._sock.shutdown(socket.SHUT_WR)
+                # Now that we've reduced the buffer size, tell the
+                # protocol to resume writing if it was paused.  Note that
+                # we do this last since the callback is called immediately
+                # and it may add more data to the buffer (even causing the
+                # protocol to be paused again).
+                self._maybe_resume_protocol()
+            else:
+                self._write_fut = self._loop._proactor.send(self._sock, data)
+                if not self._write_fut.done():
+                    assert self._pending_write == 0
+                    self._pending_write = len(data)
+                    self._write_fut.add_done_callback(self._loop_writing)
+                    self._maybe_pause_protocol()
+                else:
+                    self._write_fut.add_done_callback(self._loop_writing)
+        except ConnectionResetError as exc:
+            self._force_close(exc)
+        except OSError as exc:
+            self._fatal_error(exc, 'Fatal write error on pipe transport')
+
+    def can_write_eof(self):
+        return True
+
+    def write_eof(self):
+        self.close()
+
+    def abort(self):
+        self._force_close(None)
+
+
+class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport):
+    def __init__(self, *args, **kw):
+        super().__init__(*args, **kw)
+        self._read_fut = self._loop._proactor.recv(self._sock, 16)
+        self._read_fut.add_done_callback(self._pipe_closed)
+
+    def _pipe_closed(self, fut):
+        if fut.cancelled():
+            # the transport has been closed
+            return
+        assert fut.result() == b''
+        if self._closing:
+            assert self._read_fut is None
+            return
+        assert fut is self._read_fut, (fut, self._read_fut)
+        self._read_fut = None
+        if self._write_fut is not None:
+            self._force_close(BrokenPipeError())
+        else:
+            self.close()
+
+
+class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport,
+                                   _ProactorBaseWritePipeTransport,
+                                   transports.Transport):
+    """Transport for duplex pipes."""
+
+    def can_write_eof(self):
+        return False
+
+    def write_eof(self):
+        raise NotImplementedError
+
+
+class _ProactorSocketTransport(_ProactorReadPipeTransport,
+                               _ProactorBaseWritePipeTransport,
+                               transports.Transport):
+    """Transport for connected sockets."""
+
+    def _set_extra(self, sock):
+        self._extra['socket'] = sock
+        try:
+            self._extra['sockname'] = sock.getsockname()
+        except (socket.error, AttributeError):
+            if self._loop.get_debug():
+                logger.warning("getsockname() failed on %r",
+                             sock, exc_info=True)
+        if 'peername' not in self._extra:
+            try:
+                self._extra['peername'] = sock.getpeername()
+            except (socket.error, AttributeError):
+                if self._loop.get_debug():
+                    logger.warning("getpeername() failed on %r",
+                                   sock, exc_info=True)
+
+    def can_write_eof(self):
+        return True
+
+    def write_eof(self):
+        if self._closing or self._eof_written:
+            return
+        self._eof_written = True
+        if self._write_fut is None:
+            self._sock.shutdown(socket.SHUT_WR)
+
+
+class BaseProactorEventLoop(base_events.BaseEventLoop):
+
+    def __init__(self, proactor):
+        super().__init__()
+        logger.debug('Using proactor: %s', proactor.__class__.__name__)
+        self._proactor = proactor
+        self._selector = proactor   # convenient alias
+        self._self_reading_future = None
+        self._accept_futures = {}   # socket file descriptor => Future
+        proactor.set_loop(self)
+        self._make_self_pipe()
+
+    def _make_socket_transport(self, sock, protocol, waiter=None,
+                               extra=None, server=None):
+        return _ProactorSocketTransport(self, sock, protocol, waiter,
+                                        extra, server)
+
+    def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
+                            *, server_side=False, server_hostname=None,
+                            extra=None, server=None):
+        if not sslproto._is_sslproto_available():
+            raise NotImplementedError("Proactor event loop requires Python 3.5"
+                                      " or newer (ssl.MemoryBIO) to support "
+                                      "SSL")
+
+        ssl_protocol = sslproto.SSLProtocol(self, protocol, sslcontext, waiter,
+                                            server_side, server_hostname)
+        _ProactorSocketTransport(self, rawsock, ssl_protocol,
+                                 extra=extra, server=server)
+        return ssl_protocol._app_transport
+
+    def _make_duplex_pipe_transport(self, sock, protocol, waiter=None,
+                                    extra=None):
+        return _ProactorDuplexPipeTransport(self,
+                                            sock, protocol, waiter, extra)
+
+    def _make_read_pipe_transport(self, sock, protocol, waiter=None,
+                                  extra=None):
+        return _ProactorReadPipeTransport(self, sock, protocol, waiter, extra)
+
+    def _make_write_pipe_transport(self, sock, protocol, waiter=None,
+                                   extra=None):
+        # We want connection_lost() to be called when other end closes
+        return _ProactorWritePipeTransport(self,
+                                           sock, protocol, waiter, extra)
+
+    def close(self):
+        if self.is_running():
+            raise RuntimeError("Cannot close a running event loop")
+        if self.is_closed():
+            return
+
+        # Call these methods before closing the event loop (before calling
+        # BaseEventLoop.close), because they can schedule callbacks with
+        # call_soon(), which is forbidden when the event loop is closed.
+        self._stop_accept_futures()
+        self._close_self_pipe()
+        self._proactor.close()
+        self._proactor = None
+        self._selector = None
+
+        # Close the event loop
+        super().close()
+
+    def sock_recv(self, sock, n):
+        return self._proactor.recv(sock, n)
+
+    def sock_sendall(self, sock, data):
+        return self._proactor.send(sock, data)
+
+    def sock_connect(self, sock, address):
+        try:
+            if self._debug:
+                base_events._check_resolved_address(sock, address)
+        except ValueError as err:
+            fut = futures.Future(loop=self)
+            fut.set_exception(err)
+            return fut
+        else:
+            return self._proactor.connect(sock, address)
+
+    def sock_accept(self, sock):
+        return self._proactor.accept(sock)
+
+    def _socketpair(self):
+        raise NotImplementedError
+
+    def _close_self_pipe(self):
+        if self._self_reading_future is not None:
+            self._self_reading_future.cancel()
+            self._self_reading_future = None
+        self._ssock.close()
+        self._ssock = None
+        self._csock.close()
+        self._csock = None
+        self._internal_fds -= 1
+
+    def _make_self_pipe(self):
+        # A self-socket, really. :-)
+        self._ssock, self._csock = self._socketpair()
+        self._ssock.setblocking(False)
+        self._csock.setblocking(False)
+        self._internal_fds += 1
+        self.call_soon(self._loop_self_reading)
+
+    def _loop_self_reading(self, f=None):
+        try:
+            if f is not None:
+                f.result()  # may raise
+            f = self._proactor.recv(self._ssock, 4096)
+        except futures.CancelledError:
+            # _close_self_pipe() has been called, stop waiting for data
+            return
+        except Exception as exc:
+            self.call_exception_handler({
+                'message': 'Error on reading from the event loop self pipe',
+                'exception': exc,
+                'loop': self,
+            })
+        else:
+            self._self_reading_future = f
+            f.add_done_callback(self._loop_self_reading)
+
+    def _write_to_self(self):
+        self._csock.send(b'\0')
+
+    def _start_serving(self, protocol_factory, sock,
+                       sslcontext=None, server=None):
+
+        def loop(f=None):
+            try:
+                if f is not None:
+                    conn, addr = f.result()
+                    if self._debug:
+                        logger.debug("%r got a new connection from %r: %r",
+                                     server, addr, conn)
+                    protocol = protocol_factory()
+                    if sslcontext is not None:
+                        self._make_ssl_transport(
+                            conn, protocol, sslcontext, server_side=True,
+                            extra={'peername': addr}, server=server)
+                    else:
+                        self._make_socket_transport(
+                            conn, protocol,
+                            extra={'peername': addr}, server=server)
+                if self.is_closed():
+                    return
+                f = self._proactor.accept(sock)
+            except OSError as exc:
+                if sock.fileno() != -1:
+                    self.call_exception_handler({
+                        'message': 'Accept failed on a socket',
+                        'exception': exc,
+                        'socket': sock,
+                    })
+                    sock.close()
+                elif self._debug:
+                    logger.debug("Accept failed on socket %r",
+                                 sock, exc_info=True)
+            except futures.CancelledError:
+                sock.close()
+            else:
+                self._accept_futures[sock.fileno()] = f
+                f.add_done_callback(loop)
+
+        self.call_soon(loop)
+
+    def _process_events(self, event_list):
+        # Events are processed in the IocpProactor._poll() method
+        pass
+
+    def _stop_accept_futures(self):
+        for future in self._accept_futures.values():
+            future.cancel()
+        self._accept_futures.clear()
+
+    def _stop_serving(self, sock):
+        self._stop_accept_futures()
+        self._proactor._stop_serving(sock)
+        sock.close()

+ 134 - 0
env/Lib/site-packages/asyncio/protocols.py

@@ -0,0 +1,134 @@
+"""Abstract Protocol class."""
+
+__all__ = ['BaseProtocol', 'Protocol', 'DatagramProtocol',
+           'SubprocessProtocol']
+
+
+class BaseProtocol:
+    """Common base class for protocol interfaces.
+
+    Usually user implements protocols that derived from BaseProtocol
+    like Protocol or ProcessProtocol.
+
+    The only case when BaseProtocol should be implemented directly is
+    write-only transport like write pipe
+    """
+
+    def connection_made(self, transport):
+        """Called when a connection is made.
+
+        The argument is the transport representing the pipe connection.
+        To receive data, wait for data_received() calls.
+        When the connection is closed, connection_lost() is called.
+        """
+
+    def connection_lost(self, exc):
+        """Called when the connection is lost or closed.
+
+        The argument is an exception object or None (the latter
+        meaning a regular EOF is received or the connection was
+        aborted or closed).
+        """
+
+    def pause_writing(self):
+        """Called when the transport's buffer goes over the high-water mark.
+
+        Pause and resume calls are paired -- pause_writing() is called
+        once when the buffer goes strictly over the high-water mark
+        (even if subsequent writes increases the buffer size even
+        more), and eventually resume_writing() is called once when the
+        buffer size reaches the low-water mark.
+
+        Note that if the buffer size equals the high-water mark,
+        pause_writing() is not called -- it must go strictly over.
+        Conversely, resume_writing() is called when the buffer size is
+        equal or lower than the low-water mark.  These end conditions
+        are important to ensure that things go as expected when either
+        mark is zero.
+
+        NOTE: This is the only Protocol callback that is not called
+        through EventLoop.call_soon() -- if it were, it would have no
+        effect when it's most needed (when the app keeps writing
+        without yielding until pause_writing() is called).
+        """
+
+    def resume_writing(self):
+        """Called when the transport's buffer drains below the low-water mark.
+
+        See pause_writing() for details.
+        """
+
+
+class Protocol(BaseProtocol):
+    """Interface for stream protocol.
+
+    The user should implement this interface.  They can inherit from
+    this class but don't need to.  The implementations here do
+    nothing (they don't raise exceptions).
+
+    When the user wants to requests a transport, they pass a protocol
+    factory to a utility function (e.g., EventLoop.create_connection()).
+
+    When the connection is made successfully, connection_made() is
+    called with a suitable transport object.  Then data_received()
+    will be called 0 or more times with data (bytes) received from the
+    transport; finally, connection_lost() will be called exactly once
+    with either an exception object or None as an argument.
+
+    State machine of calls:
+
+      start -> CM [-> DR*] [-> ER?] -> CL -> end
+
+    * CM: connection_made()
+    * DR: data_received()
+    * ER: eof_received()
+    * CL: connection_lost()
+    """
+
+    def data_received(self, data):
+        """Called when some data is received.
+
+        The argument is a bytes object.
+        """
+
+    def eof_received(self):
+        """Called when the other end calls write_eof() or equivalent.
+
+        If this returns a false value (including None), the transport
+        will close itself.  If it returns a true value, closing the
+        transport is up to the protocol.
+        """
+
+
+class DatagramProtocol(BaseProtocol):
+    """Interface for datagram protocol."""
+
+    def datagram_received(self, data, addr):
+        """Called when some datagram is received."""
+
+    def error_received(self, exc):
+        """Called when a send or receive operation raises an OSError.
+
+        (Other than BlockingIOError or InterruptedError.)
+        """
+
+
+class SubprocessProtocol(BaseProtocol):
+    """Interface for protocol for subprocess calls."""
+
+    def pipe_data_received(self, fd, data):
+        """Called when the subprocess writes data into stdout/stderr pipe.
+
+        fd is int file descriptor.
+        data is bytes object.
+        """
+
+    def pipe_connection_lost(self, fd, exc):
+        """Called when a file descriptor associated with the child process is
+        closed.
+
+        fd is the int file descriptor that was closed.
+        """
+
+    def process_exited(self):
+        """Called when subprocess has exited."""

+ 302 - 0
env/Lib/site-packages/asyncio/queues.py

@@ -0,0 +1,302 @@
+"""Queues"""
+
+__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'JoinableQueue',
+           'QueueFull', 'QueueEmpty']
+
+import collections
+import heapq
+
+from . import events
+from . import futures
+from . import locks
+from .tasks import coroutine
+
+
+class QueueEmpty(Exception):
+    """Exception raised when Queue.get_nowait() is called on a Queue object
+    which is empty.
+    """
+    pass
+
+
+class QueueFull(Exception):
+    """Exception raised when the Queue.put_nowait() method is called on a Queue
+    object which is full.
+    """
+    pass
+
+
+class Queue:
+    """A queue, useful for coordinating producer and consumer coroutines.
+
+    If maxsize is less than or equal to zero, the queue size is infinite. If it
+    is an integer greater than 0, then "yield from put()" will block when the
+    queue reaches maxsize, until an item is removed by get().
+
+    Unlike the standard library Queue, you can reliably know this Queue's size
+    with qsize(), since your single-threaded asyncio application won't be
+    interrupted between calling qsize() and doing an operation on the Queue.
+    """
+
+    def __init__(self, maxsize=0, *, loop=None):
+        if loop is None:
+            self._loop = events.get_event_loop()
+        else:
+            self._loop = loop
+        self._maxsize = maxsize
+
+        # Futures.
+        self._getters = collections.deque()
+        # Pairs of (item, Future).
+        self._putters = collections.deque()
+        self._init(maxsize)
+
+    def _init(self, maxsize):
+        self._queue = collections.deque()
+
+    def _get(self):
+        return self._queue.popleft()
+
+    def _put(self, item):
+        self._queue.append(item)
+
+    def __repr__(self):
+        return '<{} at {:#x} {}>'.format(
+            type(self).__name__, id(self), self._format())
+
+    def __str__(self):
+        return '<{} {}>'.format(type(self).__name__, self._format())
+
+    def _format(self):
+        result = 'maxsize={!r}'.format(self._maxsize)
+        if getattr(self, '_queue', None):
+            result += ' _queue={!r}'.format(list(self._queue))
+        if self._getters:
+            result += ' _getters[{}]'.format(len(self._getters))
+        if self._putters:
+            result += ' _putters[{}]'.format(len(self._putters))
+        return result
+
+    def _consume_done_getters(self):
+        # Delete waiters at the head of the get() queue who've timed out.
+        while self._getters and self._getters[0].done():
+            self._getters.popleft()
+
+    def _consume_done_putters(self):
+        # Delete waiters at the head of the put() queue who've timed out.
+        while self._putters and self._putters[0][1].done():
+            self._putters.popleft()
+
+    def qsize(self):
+        """Number of items in the queue."""
+        return len(self._queue)
+
+    @property
+    def maxsize(self):
+        """Number of items allowed in the queue."""
+        return self._maxsize
+
+    def empty(self):
+        """Return True if the queue is empty, False otherwise."""
+        return not self._queue
+
+    def full(self):
+        """Return True if there are maxsize items in the queue.
+
+        Note: if the Queue was initialized with maxsize=0 (the default),
+        then full() is never True.
+        """
+        if self._maxsize <= 0:
+            return False
+        else:
+            return self.qsize() >= self._maxsize
+
+    @coroutine
+    def put(self, item):
+        """Put an item into the queue.
+
+        Put an item into the queue. If the queue is full, wait until a free
+        slot is available before adding item.
+
+        This method is a coroutine.
+        """
+        self._consume_done_getters()
+        if self._getters:
+            assert not self._queue, (
+                'queue non-empty, why are getters waiting?')
+
+            getter = self._getters.popleft()
+
+            # Use _put and _get instead of passing item straight to getter, in
+            # case a subclass has logic that must run (e.g. JoinableQueue).
+            self._put(item)
+
+            # getter cannot be cancelled, we just removed done getters
+            getter.set_result(self._get())
+
+        elif self._maxsize > 0 and self._maxsize <= self.qsize():
+            waiter = futures.Future(loop=self._loop)
+
+            self._putters.append((item, waiter))
+            yield from waiter
+
+        else:
+            self._put(item)
+
+    def put_nowait(self, item):
+        """Put an item into the queue without blocking.
+
+        If no free slot is immediately available, raise QueueFull.
+        """
+        self._consume_done_getters()
+        if self._getters:
+            assert not self._queue, (
+                'queue non-empty, why are getters waiting?')
+
+            getter = self._getters.popleft()
+
+            # Use _put and _get instead of passing item straight to getter, in
+            # case a subclass has logic that must run (e.g. JoinableQueue).
+            self._put(item)
+
+            # getter cannot be cancelled, we just removed done getters
+            getter.set_result(self._get())
+
+        elif self._maxsize > 0 and self._maxsize <= self.qsize():
+            raise QueueFull
+        else:
+            self._put(item)
+
+    @coroutine
+    def get(self):
+        """Remove and return an item from the queue.
+
+        If queue is empty, wait until an item is available.
+
+        This method is a coroutine.
+        """
+        self._consume_done_putters()
+        if self._putters:
+            assert self.full(), 'queue not full, why are putters waiting?'
+            item, putter = self._putters.popleft()
+            self._put(item)
+
+            # When a getter runs and frees up a slot so this putter can
+            # run, we need to defer the put for a tick to ensure that
+            # getters and putters alternate perfectly. See
+            # ChannelTest.test_wait.
+            self._loop.call_soon(putter._set_result_unless_cancelled, None)
+
+            return self._get()
+
+        elif self.qsize():
+            return self._get()
+        else:
+            waiter = futures.Future(loop=self._loop)
+
+            self._getters.append(waiter)
+            return (yield from waiter)
+
+    def get_nowait(self):
+        """Remove and return an item from the queue.
+
+        Return an item if one is immediately available, else raise QueueEmpty.
+        """
+        self._consume_done_putters()
+        if self._putters:
+            assert self.full(), 'queue not full, why are putters waiting?'
+            item, putter = self._putters.popleft()
+            self._put(item)
+            # Wake putter on next tick.
+
+            # getter cannot be cancelled, we just removed done putters
+            putter.set_result(None)
+
+            return self._get()
+
+        elif self.qsize():
+            return self._get()
+        else:
+            raise QueueEmpty
+
+
+class PriorityQueue(Queue):
+    """A subclass of Queue; retrieves entries in priority order (lowest first).
+
+    Entries are typically tuples of the form: (priority number, data).
+    """
+
+    def _init(self, maxsize):
+        self._queue = []
+
+    def _put(self, item, heappush=heapq.heappush):
+        heappush(self._queue, item)
+
+    def _get(self, heappop=heapq.heappop):
+        return heappop(self._queue)
+
+
+class LifoQueue(Queue):
+    """A subclass of Queue that retrieves most recently added entries first."""
+
+    def _init(self, maxsize):
+        self._queue = []
+
+    def _put(self, item):
+        self._queue.append(item)
+
+    def _get(self):
+        return self._queue.pop()
+
+
+class JoinableQueue(Queue):
+    """A subclass of Queue with task_done() and join() methods."""
+
+    def __init__(self, maxsize=0, *, loop=None):
+        super().__init__(maxsize=maxsize, loop=loop)
+        self._unfinished_tasks = 0
+        self._finished = locks.Event(loop=self._loop)
+        self._finished.set()
+
+    def _format(self):
+        result = Queue._format(self)
+        if self._unfinished_tasks:
+            result += ' tasks={}'.format(self._unfinished_tasks)
+        return result
+
+    def _put(self, item):
+        super()._put(item)
+        self._unfinished_tasks += 1
+        self._finished.clear()
+
+    def task_done(self):
+        """Indicate that a formerly enqueued task is complete.
+
+        Used by queue consumers. For each get() used to fetch a task,
+        a subsequent call to task_done() tells the queue that the processing
+        on the task is complete.
+
+        If a join() is currently blocking, it will resume when all items have
+        been processed (meaning that a task_done() call was received for every
+        item that had been put() into the queue).
+
+        Raises ValueError if called more times than there were items placed in
+        the queue.
+        """
+        if self._unfinished_tasks <= 0:
+            raise ValueError('task_done() called too many times')
+        self._unfinished_tasks -= 1
+        if self._unfinished_tasks == 0:
+            self._finished.set()
+
+    @coroutine
+    def join(self):
+        """Block until all items in the queue have been gotten and processed.
+
+        The count of unfinished tasks goes up whenever an item is added to the
+        queue. The count goes down whenever a consumer thread calls task_done()
+        to indicate that the item was retrieved and all work on it is complete.
+        When the count of unfinished tasks drops to zero, join() unblocks.
+        """
+        if self._unfinished_tasks > 0:
+            yield from self._finished.wait()

+ 1070 - 0
env/Lib/site-packages/asyncio/selector_events.py

@@ -0,0 +1,1070 @@
+"""Event loop using a selector and related classes.
+
+A selector is a "notify-when-ready" multiplexer.  For a subclass which
+also includes support for signal handling, see the unix_events sub-module.
+"""
+
+__all__ = ['BaseSelectorEventLoop']
+
+import collections
+import errno
+import functools
+import socket
+import sys
+import warnings
+try:
+    import ssl
+except ImportError:  # pragma: no cover
+    ssl = None
+
+from . import base_events
+from . import constants
+from . import events
+from . import futures
+from . import selectors
+from . import transports
+from . import sslproto
+from .coroutines import coroutine
+from .log import logger
+
+
+def _test_selector_event(selector, fd, event):
+    # Test if the selector is monitoring 'event' events
+    # for the file descriptor 'fd'.
+    try:
+        key = selector.get_key(fd)
+    except KeyError:
+        return False
+    else:
+        return bool(key.events & event)
+
+
+class BaseSelectorEventLoop(base_events.BaseEventLoop):
+    """Selector event loop.
+
+    See events.EventLoop for API specification.
+    """
+
+    def __init__(self, selector=None):
+        super().__init__()
+
+        if selector is None:
+            selector = selectors.DefaultSelector()
+        logger.debug('Using selector: %s', selector.__class__.__name__)
+        self._selector = selector
+        self._make_self_pipe()
+
+    def _make_socket_transport(self, sock, protocol, waiter=None, *,
+                               extra=None, server=None):
+        return _SelectorSocketTransport(self, sock, protocol, waiter,
+                                        extra, server)
+
+    def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
+                            *, server_side=False, server_hostname=None,
+                            extra=None, server=None):
+        if not sslproto._is_sslproto_available():
+            return self._make_legacy_ssl_transport(
+                rawsock, protocol, sslcontext, waiter,
+                server_side=server_side, server_hostname=server_hostname,
+                extra=extra, server=server)
+
+        ssl_protocol = sslproto.SSLProtocol(self, protocol, sslcontext, waiter,
+                                            server_side, server_hostname)
+        _SelectorSocketTransport(self, rawsock, ssl_protocol,
+                                 extra=extra, server=server)
+        return ssl_protocol._app_transport
+
+    def _make_legacy_ssl_transport(self, rawsock, protocol, sslcontext,
+                                   waiter, *,
+                                   server_side=False, server_hostname=None,
+                                   extra=None, server=None):
+        # Use the legacy API: SSL_write, SSL_read, etc. The legacy API is used
+        # on Python 3.4 and older, when ssl.MemoryBIO is not available.
+        return _SelectorSslTransport(
+            self, rawsock, protocol, sslcontext, waiter,
+            server_side, server_hostname, extra, server)
+
+    def _make_datagram_transport(self, sock, protocol,
+                                 address=None, waiter=None, extra=None):
+        return _SelectorDatagramTransport(self, sock, protocol,
+                                          address, waiter, extra)
+
+    def close(self):
+        if self.is_running():
+            raise RuntimeError("Cannot close a running event loop")
+        if self.is_closed():
+            return
+        self._close_self_pipe()
+        super().close()
+        if self._selector is not None:
+            self._selector.close()
+            self._selector = None
+
+    def _socketpair(self):
+        raise NotImplementedError
+
+    def _close_self_pipe(self):
+        self.remove_reader(self._ssock.fileno())
+        self._ssock.close()
+        self._ssock = None
+        self._csock.close()
+        self._csock = None
+        self._internal_fds -= 1
+
+    def _make_self_pipe(self):
+        # A self-socket, really. :-)
+        self._ssock, self._csock = self._socketpair()
+        self._ssock.setblocking(False)
+        self._csock.setblocking(False)
+        self._internal_fds += 1
+        self.add_reader(self._ssock.fileno(), self._read_from_self)
+
+    def _process_self_data(self, data):
+        pass
+
+    def _read_from_self(self):
+        while True:
+            try:
+                data = self._ssock.recv(4096)
+                if not data:
+                    break
+                self._process_self_data(data)
+            except InterruptedError:
+                continue
+            except BlockingIOError:
+                break
+
+    def _write_to_self(self):
+        # This may be called from a different thread, possibly after
+        # _close_self_pipe() has been called or even while it is
+        # running.  Guard for self._csock being None or closed.  When
+        # a socket is closed, send() raises OSError (with errno set to
+        # EBADF, but let's not rely on the exact error code).
+        csock = self._csock
+        if csock is not None:
+            try:
+                csock.send(b'\0')
+            except OSError:
+                if self._debug:
+                    logger.debug("Fail to write a null byte into the "
+                                 "self-pipe socket",
+                                 exc_info=True)
+
+    def _start_serving(self, protocol_factory, sock,
+                       sslcontext=None, server=None):
+        self.add_reader(sock.fileno(), self._accept_connection,
+                        protocol_factory, sock, sslcontext, server)
+
+    def _accept_connection(self, protocol_factory, sock,
+                           sslcontext=None, server=None):
+        try:
+            conn, addr = sock.accept()
+            if self._debug:
+                logger.debug("%r got a new connection from %r: %r",
+                             server, addr, conn)
+            conn.setblocking(False)
+        except (BlockingIOError, InterruptedError, ConnectionAbortedError):
+            pass  # False alarm.
+        except OSError as exc:
+            # There's nowhere to send the error, so just log it.
+            if exc.errno in (errno.EMFILE, errno.ENFILE,
+                             errno.ENOBUFS, errno.ENOMEM):
+                # Some platforms (e.g. Linux keep reporting the FD as
+                # ready, so we remove the read handler temporarily.
+                # We'll try again in a while.
+                self.call_exception_handler({
+                    'message': 'socket.accept() out of system resource',
+                    'exception': exc,
+                    'socket': sock,
+                })
+                self.remove_reader(sock.fileno())
+                self.call_later(constants.ACCEPT_RETRY_DELAY,
+                                self._start_serving,
+                                protocol_factory, sock, sslcontext, server)
+            else:
+                raise  # The event loop will catch, log and ignore it.
+        else:
+            extra = {'peername': addr}
+            accept = self._accept_connection2(protocol_factory, conn, extra,
+                                              sslcontext, server)
+            self.create_task(accept)
+
+    @coroutine
+    def _accept_connection2(self, protocol_factory, conn, extra,
+                            sslcontext=None, server=None):
+        protocol = None
+        transport = None
+        try:
+            protocol = protocol_factory()
+            waiter = futures.Future(loop=self)
+            if sslcontext:
+                transport = self._make_ssl_transport(
+                    conn, protocol, sslcontext, waiter=waiter,
+                    server_side=True, extra=extra, server=server)
+            else:
+                transport = self._make_socket_transport(
+                    conn, protocol, waiter=waiter, extra=extra,
+                    server=server)
+
+            try:
+                yield from waiter
+            except:
+                transport.close()
+                raise
+
+            # It's now up to the protocol to handle the connection.
+        except Exception as exc:
+            if self._debug:
+                context = {
+                    'message': ('Error on transport creation '
+                                'for incoming connection'),
+                    'exception': exc,
+                }
+                if protocol is not None:
+                    context['protocol'] = protocol
+                if transport is not None:
+                    context['transport'] = transport
+                self.call_exception_handler(context)
+
+    def add_reader(self, fd, callback, *args):
+        """Add a reader callback."""
+        self._check_closed()
+        handle = events.Handle(callback, args, self)
+        try:
+            key = self._selector.get_key(fd)
+        except KeyError:
+            self._selector.register(fd, selectors.EVENT_READ,
+                                    (handle, None))
+        else:
+            mask, (reader, writer) = key.events, key.data
+            self._selector.modify(fd, mask | selectors.EVENT_READ,
+                                  (handle, writer))
+            if reader is not None:
+                reader.cancel()
+
+    def remove_reader(self, fd):
+        """Remove a reader callback."""
+        if self.is_closed():
+            return False
+        try:
+            key = self._selector.get_key(fd)
+        except KeyError:
+            return False
+        else:
+            mask, (reader, writer) = key.events, key.data
+            mask &= ~selectors.EVENT_READ
+            if not mask:
+                self._selector.unregister(fd)
+            else:
+                self._selector.modify(fd, mask, (None, writer))
+
+            if reader is not None:
+                reader.cancel()
+                return True
+            else:
+                return False
+
+    def add_writer(self, fd, callback, *args):
+        """Add a writer callback.."""
+        self._check_closed()
+        handle = events.Handle(callback, args, self)
+        try:
+            key = self._selector.get_key(fd)
+        except KeyError:
+            self._selector.register(fd, selectors.EVENT_WRITE,
+                                    (None, handle))
+        else:
+            mask, (reader, writer) = key.events, key.data
+            self._selector.modify(fd, mask | selectors.EVENT_WRITE,
+                                  (reader, handle))
+            if writer is not None:
+                writer.cancel()
+
+    def remove_writer(self, fd):
+        """Remove a writer callback."""
+        if self.is_closed():
+            return False
+        try:
+            key = self._selector.get_key(fd)
+        except KeyError:
+            return False
+        else:
+            mask, (reader, writer) = key.events, key.data
+            # Remove both writer and connector.
+            mask &= ~selectors.EVENT_WRITE
+            if not mask:
+                self._selector.unregister(fd)
+            else:
+                self._selector.modify(fd, mask, (reader, None))
+
+            if writer is not None:
+                writer.cancel()
+                return True
+            else:
+                return False
+
+    def sock_recv(self, sock, n):
+        """Receive data from the socket.
+
+        The return value is a bytes object representing the data received.
+        The maximum amount of data to be received at once is specified by
+        nbytes.
+
+        This method is a coroutine.
+        """
+        if self._debug and sock.gettimeout() != 0:
+            raise ValueError("the socket must be non-blocking")
+        fut = futures.Future(loop=self)
+        self._sock_recv(fut, False, sock, n)
+        return fut
+
+    def _sock_recv(self, fut, registered, sock, n):
+        # _sock_recv() can add itself as an I/O callback if the operation can't
+        # be done immediately. Don't use it directly, call sock_recv().
+        fd = sock.fileno()
+        if registered:
+            # Remove the callback early.  It should be rare that the
+            # selector says the fd is ready but the call still returns
+            # EAGAIN, and I am willing to take a hit in that case in
+            # order to simplify the common case.
+            self.remove_reader(fd)
+        if fut.cancelled():
+            return
+        try:
+            data = sock.recv(n)
+        except (BlockingIOError, InterruptedError):
+            self.add_reader(fd, self._sock_recv, fut, True, sock, n)
+        except Exception as exc:
+            fut.set_exception(exc)
+        else:
+            fut.set_result(data)
+
+    def sock_sendall(self, sock, data):
+        """Send data to the socket.
+
+        The socket must be connected to a remote socket. This method continues
+        to send data from data until either all data has been sent or an
+        error occurs. None is returned on success. On error, an exception is
+        raised, and there is no way to determine how much data, if any, was
+        successfully processed by the receiving end of the connection.
+
+        This method is a coroutine.
+        """
+        if self._debug and sock.gettimeout() != 0:
+            raise ValueError("the socket must be non-blocking")
+        fut = futures.Future(loop=self)
+        if data:
+            self._sock_sendall(fut, False, sock, data)
+        else:
+            fut.set_result(None)
+        return fut
+
+    def _sock_sendall(self, fut, registered, sock, data):
+        fd = sock.fileno()
+
+        if registered:
+            self.remove_writer(fd)
+        if fut.cancelled():
+            return
+
+        try:
+            n = sock.send(data)
+        except (BlockingIOError, InterruptedError):
+            n = 0
+        except Exception as exc:
+            fut.set_exception(exc)
+            return
+
+        if n == len(data):
+            fut.set_result(None)
+        else:
+            if n:
+                data = data[n:]
+            self.add_writer(fd, self._sock_sendall, fut, True, sock, data)
+
+    def sock_connect(self, sock, address):
+        """Connect to a remote socket at address.
+
+        The address must be already resolved to avoid the trap of hanging the
+        entire event loop when the address requires doing a DNS lookup. For
+        example, it must be an IP address, not an hostname, for AF_INET and
+        AF_INET6 address families. Use getaddrinfo() to resolve the hostname
+        asynchronously.
+
+        This method is a coroutine.
+        """
+        if self._debug and sock.gettimeout() != 0:
+            raise ValueError("the socket must be non-blocking")
+        fut = futures.Future(loop=self)
+        try:
+            if self._debug:
+                base_events._check_resolved_address(sock, address)
+        except ValueError as err:
+            fut.set_exception(err)
+        else:
+            self._sock_connect(fut, sock, address)
+        return fut
+
+    def _sock_connect(self, fut, sock, address):
+        fd = sock.fileno()
+        try:
+            while True:
+                try:
+                    sock.connect(address)
+                except InterruptedError:
+                    continue
+                else:
+                    break
+        except BlockingIOError:
+            fut.add_done_callback(functools.partial(self._sock_connect_done,
+                                                    fd))
+            self.add_writer(fd, self._sock_connect_cb, fut, sock, address)
+        except Exception as exc:
+            fut.set_exception(exc)
+        else:
+            fut.set_result(None)
+
+    def _sock_connect_done(self, fd, fut):
+        self.remove_writer(fd)
+
+    def _sock_connect_cb(self, fut, sock, address):
+        if fut.cancelled():
+            return
+
+        try:
+            err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+            if err != 0:
+                # Jump to any except clause below.
+                raise OSError(err, 'Connect call failed %s' % (address,))
+        except (BlockingIOError, InterruptedError):
+            # socket is still registered, the callback will be retried later
+            pass
+        except Exception as exc:
+            fut.set_exception(exc)
+        else:
+            fut.set_result(None)
+
+    def sock_accept(self, sock):
+        """Accept a connection.
+
+        The socket must be bound to an address and listening for connections.
+        The return value is a pair (conn, address) where conn is a new socket
+        object usable to send and receive data on the connection, and address
+        is the address bound to the socket on the other end of the connection.
+
+        This method is a coroutine.
+        """
+        if self._debug and sock.gettimeout() != 0:
+            raise ValueError("the socket must be non-blocking")
+        fut = futures.Future(loop=self)
+        self._sock_accept(fut, False, sock)
+        return fut
+
+    def _sock_accept(self, fut, registered, sock):
+        fd = sock.fileno()
+        if registered:
+            self.remove_reader(fd)
+        if fut.cancelled():
+            return
+        try:
+            conn, address = sock.accept()
+            conn.setblocking(False)
+        except (BlockingIOError, InterruptedError):
+            self.add_reader(fd, self._sock_accept, fut, True, sock)
+        except Exception as exc:
+            fut.set_exception(exc)
+        else:
+            fut.set_result((conn, address))
+
+    def _process_events(self, event_list):
+        for key, mask in event_list:
+            fileobj, (reader, writer) = key.fileobj, key.data
+            if mask & selectors.EVENT_READ and reader is not None:
+                if reader._cancelled:
+                    self.remove_reader(fileobj)
+                else:
+                    self._add_callback(reader)
+            if mask & selectors.EVENT_WRITE and writer is not None:
+                if writer._cancelled:
+                    self.remove_writer(fileobj)
+                else:
+                    self._add_callback(writer)
+
+    def _stop_serving(self, sock):
+        self.remove_reader(sock.fileno())
+        sock.close()
+
+
+class _SelectorTransport(transports._FlowControlMixin,
+                         transports.Transport):
+
+    max_size = 256 * 1024  # Buffer size passed to recv().
+
+    _buffer_factory = bytearray  # Constructs initial value for self._buffer.
+
+    # Attribute used in the destructor: it must be set even if the constructor
+    # is not called (see _SelectorSslTransport which may start by raising an
+    # exception)
+    _sock = None
+
+    def __init__(self, loop, sock, protocol, extra=None, server=None):
+        super().__init__(extra, loop)
+        self._extra['socket'] = sock
+        self._extra['sockname'] = sock.getsockname()
+        if 'peername' not in self._extra:
+            try:
+                self._extra['peername'] = sock.getpeername()
+            except socket.error:
+                self._extra['peername'] = None
+        self._sock = sock
+        self._sock_fd = sock.fileno()
+        self._protocol = protocol
+        self._protocol_connected = True
+        self._server = server
+        self._buffer = self._buffer_factory()
+        self._conn_lost = 0  # Set when call to connection_lost scheduled.
+        self._closing = False  # Set when close() called.
+        if self._server is not None:
+            self._server._attach()
+
+    def __repr__(self):
+        info = [self.__class__.__name__]
+        if self._sock is None:
+            info.append('closed')
+        elif self._closing:
+            info.append('closing')
+        info.append('fd=%s' % self._sock_fd)
+        # test if the transport was closed
+        if self._loop is not None:
+            polling = _test_selector_event(self._loop._selector,
+                                           self._sock_fd, selectors.EVENT_READ)
+            if polling:
+                info.append('read=polling')
+            else:
+                info.append('read=idle')
+
+            polling = _test_selector_event(self._loop._selector,
+                                           self._sock_fd,
+                                           selectors.EVENT_WRITE)
+            if polling:
+                state = 'polling'
+            else:
+                state = 'idle'
+
+            bufsize = self.get_write_buffer_size()
+            info.append('write=<%s, bufsize=%s>' % (state, bufsize))
+        return '<%s>' % ' '.join(info)
+
+    def abort(self):
+        self._force_close(None)
+
+    def close(self):
+        if self._closing:
+            return
+        self._closing = True
+        self._loop.remove_reader(self._sock_fd)
+        if not self._buffer:
+            self._conn_lost += 1
+            self._loop.call_soon(self._call_connection_lost, None)
+
+    # On Python 3.3 and older, objects with a destructor part of a reference
+    # cycle are never destroyed. It's not more the case on Python 3.4 thanks
+    # to the PEP 442.
+    if sys.version_info >= (3, 4):
+        def __del__(self):
+            if self._sock is not None:
+                warnings.warn("unclosed transport %r" % self, ResourceWarning)
+                self._sock.close()
+
+    def _fatal_error(self, exc, message='Fatal error on transport'):
+        # Should be called from exception handler only.
+        if isinstance(exc, (BrokenPipeError,
+                            ConnectionResetError, ConnectionAbortedError)):
+            if self._loop.get_debug():
+                logger.debug("%r: %s", self, message, exc_info=True)
+        else:
+            self._loop.call_exception_handler({
+                'message': message,
+                'exception': exc,
+                'transport': self,
+                'protocol': self._protocol,
+            })
+        self._force_close(exc)
+
+    def _force_close(self, exc):
+        if self._conn_lost:
+            return
+        if self._buffer:
+            self._buffer.clear()
+            self._loop.remove_writer(self._sock_fd)
+        if not self._closing:
+            self._closing = True
+            self._loop.remove_reader(self._sock_fd)
+        self._conn_lost += 1
+        self._loop.call_soon(self._call_connection_lost, exc)
+
+    def _call_connection_lost(self, exc):
+        try:
+            if self._protocol_connected:
+                self._protocol.connection_lost(exc)
+        finally:
+            self._sock.close()
+            self._sock = None
+            self._protocol = None
+            self._loop = None
+            server = self._server
+            if server is not None:
+                server._detach()
+                self._server = None
+
+    def get_write_buffer_size(self):
+        return len(self._buffer)
+
+
+class _SelectorSocketTransport(_SelectorTransport):
+
+    def __init__(self, loop, sock, protocol, waiter=None,
+                 extra=None, server=None):
+        super().__init__(loop, sock, protocol, extra, server)
+        self._eof = False
+        self._paused = False
+
+        self._loop.call_soon(self._protocol.connection_made, self)
+        # only start reading when connection_made() has been called
+        self._loop.call_soon(self._loop.add_reader,
+                             self._sock_fd, self._read_ready)
+        if waiter is not None:
+            # only wake up the waiter when connection_made() has been called
+            self._loop.call_soon(waiter._set_result_unless_cancelled, None)
+
+    def pause_reading(self):
+        if self._closing:
+            raise RuntimeError('Cannot pause_reading() when closing')
+        if self._paused:
+            raise RuntimeError('Already paused')
+        self._paused = True
+        self._loop.remove_reader(self._sock_fd)
+        if self._loop.get_debug():
+            logger.debug("%r pauses reading", self)
+
+    def resume_reading(self):
+        if not self._paused:
+            raise RuntimeError('Not paused')
+        self._paused = False
+        if self._closing:
+            return
+        self._loop.add_reader(self._sock_fd, self._read_ready)
+        if self._loop.get_debug():
+            logger.debug("%r resumes reading", self)
+
+    def _read_ready(self):
+        try:
+            data = self._sock.recv(self.max_size)
+        except (BlockingIOError, InterruptedError):
+            pass
+        except Exception as exc:
+            self._fatal_error(exc, 'Fatal read error on socket transport')
+        else:
+            if data:
+                self._protocol.data_received(data)
+            else:
+                if self._loop.get_debug():
+                    logger.debug("%r received EOF", self)
+                keep_open = self._protocol.eof_received()
+                if keep_open:
+                    # We're keeping the connection open so the
+                    # protocol can write more, but we still can't
+                    # receive more, so remove the reader callback.
+                    self._loop.remove_reader(self._sock_fd)
+                else:
+                    self.close()
+
+    def write(self, data):
+        if not isinstance(data, (bytes, bytearray, memoryview)):
+            raise TypeError('data argument must be byte-ish (%r)',
+                            type(data))
+        if self._eof:
+            raise RuntimeError('Cannot call write() after write_eof()')
+        if not data:
+            return
+
+        if self._conn_lost:
+            if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
+                logger.warning('socket.send() raised exception.')
+            self._conn_lost += 1
+            return
+
+        if not self._buffer:
+            # Optimization: try to send now.
+            try:
+                n = self._sock.send(data)
+            except (BlockingIOError, InterruptedError):
+                pass
+            except Exception as exc:
+                self._fatal_error(exc, 'Fatal write error on socket transport')
+                return
+            else:
+                data = data[n:]
+                if not data:
+                    return
+            # Not all was written; register write handler.
+            self._loop.add_writer(self._sock_fd, self._write_ready)
+
+        # Add it to the buffer.
+        self._buffer.extend(data)
+        self._maybe_pause_protocol()
+
+    def _write_ready(self):
+        assert self._buffer, 'Data should not be empty'
+
+        try:
+            n = self._sock.send(self._buffer)
+        except (BlockingIOError, InterruptedError):
+            pass
+        except Exception as exc:
+            self._loop.remove_writer(self._sock_fd)
+            self._buffer.clear()
+            self._fatal_error(exc, 'Fatal write error on socket transport')
+        else:
+            if n:
+                del self._buffer[:n]
+            self._maybe_resume_protocol()  # May append to buffer.
+            if not self._buffer:
+                self._loop.remove_writer(self._sock_fd)
+                if self._closing:
+                    self._call_connection_lost(None)
+                elif self._eof:
+                    self._sock.shutdown(socket.SHUT_WR)
+
+    def write_eof(self):
+        if self._eof:
+            return
+        self._eof = True
+        if not self._buffer:
+            self._sock.shutdown(socket.SHUT_WR)
+
+    def can_write_eof(self):
+        return True
+
+
+class _SelectorSslTransport(_SelectorTransport):
+
+    _buffer_factory = bytearray
+
+    def __init__(self, loop, rawsock, protocol, sslcontext, waiter=None,
+                 server_side=False, server_hostname=None,
+                 extra=None, server=None):
+        if ssl is None:
+            raise RuntimeError('stdlib ssl module not available')
+
+        if not sslcontext:
+            sslcontext = sslproto._create_transport_context(server_side, server_hostname)
+
+        wrap_kwargs = {
+            'server_side': server_side,
+            'do_handshake_on_connect': False,
+        }
+        if server_hostname and not server_side:
+            wrap_kwargs['server_hostname'] = server_hostname
+        sslsock = sslcontext.wrap_socket(rawsock, **wrap_kwargs)
+
+        super().__init__(loop, sslsock, protocol, extra, server)
+        # the protocol connection is only made after the SSL handshake
+        self._protocol_connected = False
+
+        self._server_hostname = server_hostname
+        self._waiter = waiter
+        self._sslcontext = sslcontext
+        self._paused = False
+
+        # SSL-specific extra info.  (peercert is set later)
+        self._extra.update(sslcontext=sslcontext)
+
+        if self._loop.get_debug():
+            logger.debug("%r starts SSL handshake", self)
+            start_time = self._loop.time()
+        else:
+            start_time = None
+        self._on_handshake(start_time)
+
+    def _wakeup_waiter(self, exc=None):
+        if self._waiter is None:
+            return
+        if not self._waiter.cancelled():
+            if exc is not None:
+                self._waiter.set_exception(exc)
+            else:
+                self._waiter.set_result(None)
+        self._waiter = None
+
+    def _on_handshake(self, start_time):
+        try:
+            self._sock.do_handshake()
+        except ssl.SSLWantReadError:
+            self._loop.add_reader(self._sock_fd,
+                                  self._on_handshake, start_time)
+            return
+        except ssl.SSLWantWriteError:
+            self._loop.add_writer(self._sock_fd,
+                                  self._on_handshake, start_time)
+            return
+        except BaseException as exc:
+            if self._loop.get_debug():
+                logger.warning("%r: SSL handshake failed",
+                               self, exc_info=True)
+            self._loop.remove_reader(self._sock_fd)
+            self._loop.remove_writer(self._sock_fd)
+            self._sock.close()
+            self._wakeup_waiter(exc)
+            if isinstance(exc, Exception):
+                return
+            else:
+                raise
+
+        self._loop.remove_reader(self._sock_fd)
+        self._loop.remove_writer(self._sock_fd)
+
+        peercert = self._sock.getpeercert()
+        if not hasattr(self._sslcontext, 'check_hostname'):
+            # Verify hostname if requested, Python 3.4+ uses check_hostname
+            # and checks the hostname in do_handshake()
+            if (self._server_hostname and
+                self._sslcontext.verify_mode != ssl.CERT_NONE):
+                try:
+                    ssl.match_hostname(peercert, self._server_hostname)
+                except Exception as exc:
+                    if self._loop.get_debug():
+                        logger.warning("%r: SSL handshake failed "
+                                       "on matching the hostname",
+                                       self, exc_info=True)
+                    self._sock.close()
+                    self._wakeup_waiter(exc)
+                    return
+
+        # Add extra info that becomes available after handshake.
+        self._extra.update(peercert=peercert,
+                           cipher=self._sock.cipher(),
+                           compression=self._sock.compression(),
+                           )
+
+        self._read_wants_write = False
+        self._write_wants_read = False
+        self._loop.add_reader(self._sock_fd, self._read_ready)
+        self._protocol_connected = True
+        self._loop.call_soon(self._protocol.connection_made, self)
+        # only wake up the waiter when connection_made() has been called
+        self._loop.call_soon(self._wakeup_waiter)
+
+        if self._loop.get_debug():
+            dt = self._loop.time() - start_time
+            logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
+
+    def pause_reading(self):
+        # XXX This is a bit icky, given the comment at the top of
+        # _read_ready().  Is it possible to evoke a deadlock?  I don't
+        # know, although it doesn't look like it; write() will still
+        # accept more data for the buffer and eventually the app will
+        # call resume_reading() again, and things will flow again.
+
+        if self._closing:
+            raise RuntimeError('Cannot pause_reading() when closing')
+        if self._paused:
+            raise RuntimeError('Already paused')
+        self._paused = True
+        self._loop.remove_reader(self._sock_fd)
+        if self._loop.get_debug():
+            logger.debug("%r pauses reading", self)
+
+    def resume_reading(self):
+        if not self._paused:
+            raise RuntimeError('Not paused')
+        self._paused = False
+        if self._closing:
+            return
+        self._loop.add_reader(self._sock_fd, self._read_ready)
+        if self._loop.get_debug():
+            logger.debug("%r resumes reading", self)
+
+    def _read_ready(self):
+        if self._write_wants_read:
+            self._write_wants_read = False
+            self._write_ready()
+
+            if self._buffer:
+                self._loop.add_writer(self._sock_fd, self._write_ready)
+
+        try:
+            data = self._sock.recv(self.max_size)
+        except (BlockingIOError, InterruptedError, ssl.SSLWantReadError):
+            pass
+        except ssl.SSLWantWriteError:
+            self._read_wants_write = True
+            self._loop.remove_reader(self._sock_fd)
+            self._loop.add_writer(self._sock_fd, self._write_ready)
+        except Exception as exc:
+            self._fatal_error(exc, 'Fatal read error on SSL transport')
+        else:
+            if data:
+                self._protocol.data_received(data)
+            else:
+                try:
+                    if self._loop.get_debug():
+                        logger.debug("%r received EOF", self)
+                    keep_open = self._protocol.eof_received()
+                    if keep_open:
+                        logger.warning('returning true from eof_received() '
+                                       'has no effect when using ssl')
+                finally:
+                    self.close()
+
+    def _write_ready(self):
+        if self._read_wants_write:
+            self._read_wants_write = False
+            self._read_ready()
+
+            if not (self._paused or self._closing):
+                self._loop.add_reader(self._sock_fd, self._read_ready)
+
+        if self._buffer:
+            try:
+                n = self._sock.send(self._buffer)
+            except (BlockingIOError, InterruptedError, ssl.SSLWantWriteError):
+                n = 0
+            except ssl.SSLWantReadError:
+                n = 0
+                self._loop.remove_writer(self._sock_fd)
+                self._write_wants_read = True
+            except Exception as exc:
+                self._loop.remove_writer(self._sock_fd)
+                self._buffer.clear()
+                self._fatal_error(exc, 'Fatal write error on SSL transport')
+                return
+
+            if n:
+                del self._buffer[:n]
+
+        self._maybe_resume_protocol()  # May append to buffer.
+
+        if not self._buffer:
+            self._loop.remove_writer(self._sock_fd)
+            if self._closing:
+                self._call_connection_lost(None)
+
+    def write(self, data):
+        if not isinstance(data, (bytes, bytearray, memoryview)):
+            raise TypeError('data argument must be byte-ish (%r)',
+                            type(data))
+        if not data:
+            return
+
+        if self._conn_lost:
+            if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
+                logger.warning('socket.send() raised exception.')
+            self._conn_lost += 1
+            return
+
+        if not self._buffer:
+            self._loop.add_writer(self._sock_fd, self._write_ready)
+
+        # Add it to the buffer.
+        self._buffer.extend(data)
+        self._maybe_pause_protocol()
+
+    def can_write_eof(self):
+        return False
+
+
+class _SelectorDatagramTransport(_SelectorTransport):
+
+    _buffer_factory = collections.deque
+
+    def __init__(self, loop, sock, protocol, address=None,
+                 waiter=None, extra=None):
+        super().__init__(loop, sock, protocol, extra)
+        self._address = address
+        self._loop.call_soon(self._protocol.connection_made, self)
+        # only start reading when connection_made() has been called
+        self._loop.call_soon(self._loop.add_reader,
+                             self._sock_fd, self._read_ready)
+        if waiter is not None:
+            # only wake up the waiter when connection_made() has been called
+            self._loop.call_soon(waiter._set_result_unless_cancelled, None)
+
+    def get_write_buffer_size(self):
+        return sum(len(data) for data, _ in self._buffer)
+
+    def _read_ready(self):
+        try:
+            data, addr = self._sock.recvfrom(self.max_size)
+        except (BlockingIOError, InterruptedError):
+            pass
+        except OSError as exc:
+            self._protocol.error_received(exc)
+        except Exception as exc:
+            self._fatal_error(exc, 'Fatal read error on datagram transport')
+        else:
+            self._protocol.datagram_received(data, addr)
+
+    def sendto(self, data, addr=None):
+        if not isinstance(data, (bytes, bytearray, memoryview)):
+            raise TypeError('data argument must be byte-ish (%r)',
+                            type(data))
+        if not data:
+            return
+
+        if self._address and addr not in (None, self._address):
+            raise ValueError('Invalid address: must be None or %s' %
+                             (self._address,))
+
+        if self._conn_lost and self._address:
+            if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
+                logger.warning('socket.send() raised exception.')
+            self._conn_lost += 1
+            return
+
+        if not self._buffer:
+            # Attempt to send it right away first.
+            try:
+                if self._address:
+                    self._sock.send(data)
+                else:
+                    self._sock.sendto(data, addr)
+                return
+            except (BlockingIOError, InterruptedError):
+                self._loop.add_writer(self._sock_fd, self._sendto_ready)
+            except OSError as exc:
+                self._protocol.error_received(exc)
+                return
+            except Exception as exc:
+                self._fatal_error(exc,
+                                  'Fatal write error on datagram transport')
+                return
+
+        # Ensure that what we buffer is immutable.
+        self._buffer.append((bytes(data), addr))
+        self._maybe_pause_protocol()
+
+    def _sendto_ready(self):
+        while self._buffer:
+            data, addr = self._buffer.popleft()
+            try:
+                if self._address:
+                    self._sock.send(data)
+                else:
+                    self._sock.sendto(data, addr)
+            except (BlockingIOError, InterruptedError):
+                self._buffer.appendleft((data, addr))  # Try again later.
+                break
+            except OSError as exc:
+                self._protocol.error_received(exc)
+                return
+            except Exception as exc:
+                self._fatal_error(exc,
+                                  'Fatal write error on datagram transport')
+                return
+
+        self._maybe_resume_protocol()  # May append to buffer.
+        if not self._buffer:
+            self._loop.remove_writer(self._sock_fd)
+            if self._closing:
+                self._call_connection_lost(None)

+ 594 - 0
env/Lib/site-packages/asyncio/selectors.py

@@ -0,0 +1,594 @@
+"""Selectors module.
+
+This module allows high-level and efficient I/O multiplexing, built upon the
+`select` module primitives.
+"""
+
+
+from abc import ABCMeta, abstractmethod
+from collections import namedtuple, Mapping
+import math
+import select
+import sys
+
+
+# generic events, that must be mapped to implementation-specific ones
+EVENT_READ = (1 << 0)
+EVENT_WRITE = (1 << 1)
+
+
+def _fileobj_to_fd(fileobj):
+    """Return a file descriptor from a file object.
+
+    Parameters:
+    fileobj -- file object or file descriptor
+
+    Returns:
+    corresponding file descriptor
+
+    Raises:
+    ValueError if the object is invalid
+    """
+    if isinstance(fileobj, int):
+        fd = fileobj
+    else:
+        try:
+            fd = int(fileobj.fileno())
+        except (AttributeError, TypeError, ValueError):
+            raise ValueError("Invalid file object: "
+                             "{!r}".format(fileobj)) from None
+    if fd < 0:
+        raise ValueError("Invalid file descriptor: {}".format(fd))
+    return fd
+
+
+SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
+"""Object used to associate a file object to its backing file descriptor,
+selected event mask and attached data."""
+
+
+class _SelectorMapping(Mapping):
+    """Mapping of file objects to selector keys."""
+
+    def __init__(self, selector):
+        self._selector = selector
+
+    def __len__(self):
+        return len(self._selector._fd_to_key)
+
+    def __getitem__(self, fileobj):
+        try:
+            fd = self._selector._fileobj_lookup(fileobj)
+            return self._selector._fd_to_key[fd]
+        except KeyError:
+            raise KeyError("{!r} is not registered".format(fileobj)) from None
+
+    def __iter__(self):
+        return iter(self._selector._fd_to_key)
+
+
+class BaseSelector(metaclass=ABCMeta):
+    """Selector abstract base class.
+
+    A selector supports registering file objects to be monitored for specific
+    I/O events.
+
+    A file object is a file descriptor or any object with a `fileno()` method.
+    An arbitrary object can be attached to the file object, which can be used
+    for example to store context information, a callback, etc.
+
+    A selector can use various implementations (select(), poll(), epoll()...)
+    depending on the platform. The default `Selector` class uses the most
+    efficient implementation on the current platform.
+    """
+
+    @abstractmethod
+    def register(self, fileobj, events, data=None):
+        """Register a file object.
+
+        Parameters:
+        fileobj -- file object or file descriptor
+        events  -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
+        data    -- attached data
+
+        Returns:
+        SelectorKey instance
+
+        Raises:
+        ValueError if events is invalid
+        KeyError if fileobj is already registered
+        OSError if fileobj is closed or otherwise is unacceptable to
+                the underlying system call (if a system call is made)
+
+        Note:
+        OSError may or may not be raised
+        """
+        raise NotImplementedError
+
+    @abstractmethod
+    def unregister(self, fileobj):
+        """Unregister a file object.
+
+        Parameters:
+        fileobj -- file object or file descriptor
+
+        Returns:
+        SelectorKey instance
+
+        Raises:
+        KeyError if fileobj is not registered
+
+        Note:
+        If fileobj is registered but has since been closed this does
+        *not* raise OSError (even if the wrapped syscall does)
+        """
+        raise NotImplementedError
+
+    def modify(self, fileobj, events, data=None):
+        """Change a registered file object monitored events or attached data.
+
+        Parameters:
+        fileobj -- file object or file descriptor
+        events  -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
+        data    -- attached data
+
+        Returns:
+        SelectorKey instance
+
+        Raises:
+        Anything that unregister() or register() raises
+        """
+        self.unregister(fileobj)
+        return self.register(fileobj, events, data)
+
+    @abstractmethod
+    def select(self, timeout=None):
+        """Perform the actual selection, until some monitored file objects are
+        ready or a timeout expires.
+
+        Parameters:
+        timeout -- if timeout > 0, this specifies the maximum wait time, in
+                   seconds
+                   if timeout <= 0, the select() call won't block, and will
+                   report the currently ready file objects
+                   if timeout is None, select() will block until a monitored
+                   file object becomes ready
+
+        Returns:
+        list of (key, events) for ready file objects
+        `events` is a bitwise mask of EVENT_READ|EVENT_WRITE
+        """
+        raise NotImplementedError
+
+    def close(self):
+        """Close the selector.
+
+        This must be called to make sure that any underlying resource is freed.
+        """
+        pass
+
+    def get_key(self, fileobj):
+        """Return the key associated to a registered file object.
+
+        Returns:
+        SelectorKey for this file object
+        """
+        mapping = self.get_map()
+        if mapping is None:
+            raise RuntimeError('Selector is closed')
+        try:
+            return mapping[fileobj]
+        except KeyError:
+            raise KeyError("{!r} is not registered".format(fileobj)) from None
+
+    @abstractmethod
+    def get_map(self):
+        """Return a mapping of file objects to selector keys."""
+        raise NotImplementedError
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        self.close()
+
+
+class _BaseSelectorImpl(BaseSelector):
+    """Base selector implementation."""
+
+    def __init__(self):
+        # this maps file descriptors to keys
+        self._fd_to_key = {}
+        # read-only mapping returned by get_map()
+        self._map = _SelectorMapping(self)
+
+    def _fileobj_lookup(self, fileobj):
+        """Return a file descriptor from a file object.
+
+        This wraps _fileobj_to_fd() to do an exhaustive search in case
+        the object is invalid but we still have it in our map.  This
+        is used by unregister() so we can unregister an object that
+        was previously registered even if it is closed.  It is also
+        used by _SelectorMapping.
+        """
+        try:
+            return _fileobj_to_fd(fileobj)
+        except ValueError:
+            # Do an exhaustive search.
+            for key in self._fd_to_key.values():
+                if key.fileobj is fileobj:
+                    return key.fd
+            # Raise ValueError after all.
+            raise
+
+    def register(self, fileobj, events, data=None):
+        if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
+            raise ValueError("Invalid events: {!r}".format(events))
+
+        key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
+
+        if key.fd in self._fd_to_key:
+            raise KeyError("{!r} (FD {}) is already registered"
+                           .format(fileobj, key.fd))
+
+        self._fd_to_key[key.fd] = key
+        return key
+
+    def unregister(self, fileobj):
+        try:
+            key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
+        except KeyError:
+            raise KeyError("{!r} is not registered".format(fileobj)) from None
+        return key
+
+    def modify(self, fileobj, events, data=None):
+        # TODO: Subclasses can probably optimize this even further.
+        try:
+            key = self._fd_to_key[self._fileobj_lookup(fileobj)]
+        except KeyError:
+            raise KeyError("{!r} is not registered".format(fileobj)) from None
+        if events != key.events:
+            self.unregister(fileobj)
+            key = self.register(fileobj, events, data)
+        elif data != key.data:
+            # Use a shortcut to update the data.
+            key = key._replace(data=data)
+            self._fd_to_key[key.fd] = key
+        return key
+
+    def close(self):
+        self._fd_to_key.clear()
+        self._map = None
+
+    def get_map(self):
+        return self._map
+
+    def _key_from_fd(self, fd):
+        """Return the key associated to a given file descriptor.
+
+        Parameters:
+        fd -- file descriptor
+
+        Returns:
+        corresponding key, or None if not found
+        """
+        try:
+            return self._fd_to_key[fd]
+        except KeyError:
+            return None
+
+
+class SelectSelector(_BaseSelectorImpl):
+    """Select-based selector."""
+
+    def __init__(self):
+        super().__init__()
+        self._readers = set()
+        self._writers = set()
+
+    def register(self, fileobj, events, data=None):
+        key = super().register(fileobj, events, data)
+        if events & EVENT_READ:
+            self._readers.add(key.fd)
+        if events & EVENT_WRITE:
+            self._writers.add(key.fd)
+        return key
+
+    def unregister(self, fileobj):
+        key = super().unregister(fileobj)
+        self._readers.discard(key.fd)
+        self._writers.discard(key.fd)
+        return key
+
+    if sys.platform == 'win32':
+        def _select(self, r, w, _, timeout=None):
+            r, w, x = select.select(r, w, w, timeout)
+            return r, w + x, []
+    else:
+        _select = select.select
+
+    def select(self, timeout=None):
+        timeout = None if timeout is None else max(timeout, 0)
+        ready = []
+        try:
+            r, w, _ = self._select(self._readers, self._writers, [], timeout)
+        except InterruptedError:
+            return ready
+        r = set(r)
+        w = set(w)
+        for fd in r | w:
+            events = 0
+            if fd in r:
+                events |= EVENT_READ
+            if fd in w:
+                events |= EVENT_WRITE
+
+            key = self._key_from_fd(fd)
+            if key:
+                ready.append((key, events & key.events))
+        return ready
+
+
+if hasattr(select, 'poll'):
+
+    class PollSelector(_BaseSelectorImpl):
+        """Poll-based selector."""
+
+        def __init__(self):
+            super().__init__()
+            self._poll = select.poll()
+
+        def register(self, fileobj, events, data=None):
+            key = super().register(fileobj, events, data)
+            poll_events = 0
+            if events & EVENT_READ:
+                poll_events |= select.POLLIN
+            if events & EVENT_WRITE:
+                poll_events |= select.POLLOUT
+            self._poll.register(key.fd, poll_events)
+            return key
+
+        def unregister(self, fileobj):
+            key = super().unregister(fileobj)
+            self._poll.unregister(key.fd)
+            return key
+
+        def select(self, timeout=None):
+            if timeout is None:
+                timeout = None
+            elif timeout <= 0:
+                timeout = 0
+            else:
+                # poll() has a resolution of 1 millisecond, round away from
+                # zero to wait *at least* timeout seconds.
+                timeout = math.ceil(timeout * 1e3)
+            ready = []
+            try:
+                fd_event_list = self._poll.poll(timeout)
+            except InterruptedError:
+                return ready
+            for fd, event in fd_event_list:
+                events = 0
+                if event & ~select.POLLIN:
+                    events |= EVENT_WRITE
+                if event & ~select.POLLOUT:
+                    events |= EVENT_READ
+
+                key = self._key_from_fd(fd)
+                if key:
+                    ready.append((key, events & key.events))
+            return ready
+
+
+if hasattr(select, 'epoll'):
+
+    class EpollSelector(_BaseSelectorImpl):
+        """Epoll-based selector."""
+
+        def __init__(self):
+            super().__init__()
+            self._epoll = select.epoll()
+
+        def fileno(self):
+            return self._epoll.fileno()
+
+        def register(self, fileobj, events, data=None):
+            key = super().register(fileobj, events, data)
+            epoll_events = 0
+            if events & EVENT_READ:
+                epoll_events |= select.EPOLLIN
+            if events & EVENT_WRITE:
+                epoll_events |= select.EPOLLOUT
+            self._epoll.register(key.fd, epoll_events)
+            return key
+
+        def unregister(self, fileobj):
+            key = super().unregister(fileobj)
+            try:
+                self._epoll.unregister(key.fd)
+            except OSError:
+                # This can happen if the FD was closed since it
+                # was registered.
+                pass
+            return key
+
+        def select(self, timeout=None):
+            if timeout is None:
+                timeout = -1
+            elif timeout <= 0:
+                timeout = 0
+            else:
+                # epoll_wait() has a resolution of 1 millisecond, round away
+                # from zero to wait *at least* timeout seconds.
+                timeout = math.ceil(timeout * 1e3) * 1e-3
+
+            # epoll_wait() expects `maxevents` to be greater than zero;
+            # we want to make sure that `select()` can be called when no
+            # FD is registered.
+            max_ev = max(len(self._fd_to_key), 1)
+
+            ready = []
+            try:
+                fd_event_list = self._epoll.poll(timeout, max_ev)
+            except InterruptedError:
+                return ready
+            for fd, event in fd_event_list:
+                events = 0
+                if event & ~select.EPOLLIN:
+                    events |= EVENT_WRITE
+                if event & ~select.EPOLLOUT:
+                    events |= EVENT_READ
+
+                key = self._key_from_fd(fd)
+                if key:
+                    ready.append((key, events & key.events))
+            return ready
+
+        def close(self):
+            self._epoll.close()
+            super().close()
+
+
+if hasattr(select, 'devpoll'):
+
+    class DevpollSelector(_BaseSelectorImpl):
+        """Solaris /dev/poll selector."""
+
+        def __init__(self):
+            super().__init__()
+            self._devpoll = select.devpoll()
+
+        def fileno(self):
+            return self._devpoll.fileno()
+
+        def register(self, fileobj, events, data=None):
+            key = super().register(fileobj, events, data)
+            poll_events = 0
+            if events & EVENT_READ:
+                poll_events |= select.POLLIN
+            if events & EVENT_WRITE:
+                poll_events |= select.POLLOUT
+            self._devpoll.register(key.fd, poll_events)
+            return key
+
+        def unregister(self, fileobj):
+            key = super().unregister(fileobj)
+            self._devpoll.unregister(key.fd)
+            return key
+
+        def select(self, timeout=None):
+            if timeout is None:
+                timeout = None
+            elif timeout <= 0:
+                timeout = 0
+            else:
+                # devpoll() has a resolution of 1 millisecond, round away from
+                # zero to wait *at least* timeout seconds.
+                timeout = math.ceil(timeout * 1e3)
+            ready = []
+            try:
+                fd_event_list = self._devpoll.poll(timeout)
+            except InterruptedError:
+                return ready
+            for fd, event in fd_event_list:
+                events = 0
+                if event & ~select.POLLIN:
+                    events |= EVENT_WRITE
+                if event & ~select.POLLOUT:
+                    events |= EVENT_READ
+
+                key = self._key_from_fd(fd)
+                if key:
+                    ready.append((key, events & key.events))
+            return ready
+
+        def close(self):
+            self._devpoll.close()
+            super().close()
+
+
+if hasattr(select, 'kqueue'):
+
+    class KqueueSelector(_BaseSelectorImpl):
+        """Kqueue-based selector."""
+
+        def __init__(self):
+            super().__init__()
+            self._kqueue = select.kqueue()
+
+        def fileno(self):
+            return self._kqueue.fileno()
+
+        def register(self, fileobj, events, data=None):
+            key = super().register(fileobj, events, data)
+            if events & EVENT_READ:
+                kev = select.kevent(key.fd, select.KQ_FILTER_READ,
+                                    select.KQ_EV_ADD)
+                self._kqueue.control([kev], 0, 0)
+            if events & EVENT_WRITE:
+                kev = select.kevent(key.fd, select.KQ_FILTER_WRITE,
+                                    select.KQ_EV_ADD)
+                self._kqueue.control([kev], 0, 0)
+            return key
+
+        def unregister(self, fileobj):
+            key = super().unregister(fileobj)
+            if key.events & EVENT_READ:
+                kev = select.kevent(key.fd, select.KQ_FILTER_READ,
+                                    select.KQ_EV_DELETE)
+                try:
+                    self._kqueue.control([kev], 0, 0)
+                except OSError:
+                    # This can happen if the FD was closed since it
+                    # was registered.
+                    pass
+            if key.events & EVENT_WRITE:
+                kev = select.kevent(key.fd, select.KQ_FILTER_WRITE,
+                                    select.KQ_EV_DELETE)
+                try:
+                    self._kqueue.control([kev], 0, 0)
+                except OSError:
+                    # See comment above.
+                    pass
+            return key
+
+        def select(self, timeout=None):
+            timeout = None if timeout is None else max(timeout, 0)
+            max_ev = len(self._fd_to_key)
+            ready = []
+            try:
+                kev_list = self._kqueue.control(None, max_ev, timeout)
+            except InterruptedError:
+                return ready
+            for kev in kev_list:
+                fd = kev.ident
+                flag = kev.filter
+                events = 0
+                if flag == select.KQ_FILTER_READ:
+                    events |= EVENT_READ
+                if flag == select.KQ_FILTER_WRITE:
+                    events |= EVENT_WRITE
+
+                key = self._key_from_fd(fd)
+                if key:
+                    ready.append((key, events & key.events))
+            return ready
+
+        def close(self):
+            self._kqueue.close()
+            super().close()
+
+
+# Choose the best implementation, roughly:
+#    epoll|kqueue|devpoll > poll > select.
+# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
+if 'KqueueSelector' in globals():
+    DefaultSelector = KqueueSelector
+elif 'EpollSelector' in globals():
+    DefaultSelector = EpollSelector
+elif 'DevpollSelector' in globals():
+    DefaultSelector = DevpollSelector
+elif 'PollSelector' in globals():
+    DefaultSelector = PollSelector
+else:
+    DefaultSelector = SelectSelector

+ 668 - 0
env/Lib/site-packages/asyncio/sslproto.py

@@ -0,0 +1,668 @@
+import collections
+import sys
+import warnings
+try:
+    import ssl
+except ImportError:  # pragma: no cover
+    ssl = None
+
+from . import protocols
+from . import transports
+from .log import logger
+
+
+def _create_transport_context(server_side, server_hostname):
+    if server_side:
+        raise ValueError('Server side SSL needs a valid SSLContext')
+
+    # Client side may pass ssl=True to use a default
+    # context; in that case the sslcontext passed is None.
+    # The default is secure for client connections.
+    if hasattr(ssl, 'create_default_context'):
+        # Python 3.4+: use up-to-date strong settings.
+        sslcontext = ssl.create_default_context()
+        if not server_hostname:
+            sslcontext.check_hostname = False
+    else:
+        # Fallback for Python 3.3.
+        sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+        sslcontext.options |= ssl.OP_NO_SSLv2
+        sslcontext.options |= ssl.OP_NO_SSLv3
+        sslcontext.set_default_verify_paths()
+        sslcontext.verify_mode = ssl.CERT_REQUIRED
+    return sslcontext
+
+
+def _is_sslproto_available():
+    return hasattr(ssl, "MemoryBIO")
+
+
+# States of an _SSLPipe.
+_UNWRAPPED = "UNWRAPPED"
+_DO_HANDSHAKE = "DO_HANDSHAKE"
+_WRAPPED = "WRAPPED"
+_SHUTDOWN = "SHUTDOWN"
+
+
+class _SSLPipe(object):
+    """An SSL "Pipe".
+
+    An SSL pipe allows you to communicate with an SSL/TLS protocol instance
+    through memory buffers. It can be used to implement a security layer for an
+    existing connection where you don't have access to the connection's file
+    descriptor, or for some reason you don't want to use it.
+
+    An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode,
+    data is passed through untransformed. In wrapped mode, application level
+    data is encrypted to SSL record level data and vice versa. The SSL record
+    level is the lowest level in the SSL protocol suite and is what travels
+    as-is over the wire.
+
+    An SslPipe initially is in "unwrapped" mode. To start SSL, call
+    do_handshake(). To shutdown SSL again, call unwrap().
+    """
+
+    max_size = 256 * 1024   # Buffer size passed to read()
+
+    def __init__(self, context, server_side, server_hostname=None):
+        """
+        The *context* argument specifies the ssl.SSLContext to use.
+
+        The *server_side* argument indicates whether this is a server side or
+        client side transport.
+
+        The optional *server_hostname* argument can be used to specify the
+        hostname you are connecting to. You may only specify this parameter if
+        the _ssl module supports Server Name Indication (SNI).
+        """
+        self._context = context
+        self._server_side = server_side
+        self._server_hostname = server_hostname
+        self._state = _UNWRAPPED
+        self._incoming = ssl.MemoryBIO()
+        self._outgoing = ssl.MemoryBIO()
+        self._sslobj = None
+        self._need_ssldata = False
+        self._handshake_cb = None
+        self._shutdown_cb = None
+
+    @property
+    def context(self):
+        """The SSL context passed to the constructor."""
+        return self._context
+
+    @property
+    def ssl_object(self):
+        """The internal ssl.SSLObject instance.
+
+        Return None if the pipe is not wrapped.
+        """
+        return self._sslobj
+
+    @property
+    def need_ssldata(self):
+        """Whether more record level data is needed to complete a handshake
+        that is currently in progress."""
+        return self._need_ssldata
+
+    @property
+    def wrapped(self):
+        """
+        Whether a security layer is currently in effect.
+
+        Return False during handshake.
+        """
+        return self._state == _WRAPPED
+
+    def do_handshake(self, callback=None):
+        """Start the SSL handshake.
+
+        Return a list of ssldata. A ssldata element is a list of buffers
+
+        The optional *callback* argument can be used to install a callback that
+        will be called when the handshake is complete. The callback will be
+        called with None if successful, else an exception instance.
+        """
+        if self._state != _UNWRAPPED:
+            raise RuntimeError('handshake in progress or completed')
+        self._sslobj = self._context.wrap_bio(
+            self._incoming, self._outgoing,
+            server_side=self._server_side,
+            server_hostname=self._server_hostname)
+        self._state = _DO_HANDSHAKE
+        self._handshake_cb = callback
+        ssldata, appdata = self.feed_ssldata(b'', only_handshake=True)
+        assert len(appdata) == 0
+        return ssldata
+
+    def shutdown(self, callback=None):
+        """Start the SSL shutdown sequence.
+
+        Return a list of ssldata. A ssldata element is a list of buffers
+
+        The optional *callback* argument can be used to install a callback that
+        will be called when the shutdown is complete. The callback will be
+        called without arguments.
+        """
+        if self._state == _UNWRAPPED:
+            raise RuntimeError('no security layer present')
+        if self._state == _SHUTDOWN:
+            raise RuntimeError('shutdown in progress')
+        assert self._state in (_WRAPPED, _DO_HANDSHAKE)
+        self._state = _SHUTDOWN
+        self._shutdown_cb = callback
+        ssldata, appdata = self.feed_ssldata(b'')
+        assert appdata == [] or appdata == [b'']
+        return ssldata
+
+    def feed_eof(self):
+        """Send a potentially "ragged" EOF.
+
+        This method will raise an SSL_ERROR_EOF exception if the EOF is
+        unexpected.
+        """
+        self._incoming.write_eof()
+        ssldata, appdata = self.feed_ssldata(b'')
+        assert appdata == [] or appdata == [b'']
+
+    def feed_ssldata(self, data, only_handshake=False):
+        """Feed SSL record level data into the pipe.
+
+        The data must be a bytes instance. It is OK to send an empty bytes
+        instance. This can be used to get ssldata for a handshake initiated by
+        this endpoint.
+
+        Return a (ssldata, appdata) tuple. The ssldata element is a list of
+        buffers containing SSL data that needs to be sent to the remote SSL.
+
+        The appdata element is a list of buffers containing plaintext data that
+        needs to be forwarded to the application. The appdata list may contain
+        an empty buffer indicating an SSL "close_notify" alert. This alert must
+        be acknowledged by calling shutdown().
+        """
+        if self._state == _UNWRAPPED:
+            # If unwrapped, pass plaintext data straight through.
+            if data:
+                appdata = [data]
+            else:
+                appdata = []
+            return ([], appdata)
+
+        self._need_ssldata = False
+        if data:
+            self._incoming.write(data)
+
+        ssldata = []
+        appdata = []
+        try:
+            if self._state == _DO_HANDSHAKE:
+                # Call do_handshake() until it doesn't raise anymore.
+                self._sslobj.do_handshake()
+                self._state = _WRAPPED
+                if self._handshake_cb:
+                    self._handshake_cb(None)
+                if only_handshake:
+                    return (ssldata, appdata)
+                # Handshake done: execute the wrapped block
+
+            if self._state == _WRAPPED:
+                # Main state: read data from SSL until close_notify
+                while True:
+                    chunk = self._sslobj.read(self.max_size)
+                    appdata.append(chunk)
+                    if not chunk:  # close_notify
+                        break
+
+            elif self._state == _SHUTDOWN:
+                # Call shutdown() until it doesn't raise anymore.
+                self._sslobj.unwrap()
+                self._sslobj = None
+                self._state = _UNWRAPPED
+                if self._shutdown_cb:
+                    self._shutdown_cb()
+
+            elif self._state == _UNWRAPPED:
+                # Drain possible plaintext data after close_notify.
+                appdata.append(self._incoming.read())
+        except (ssl.SSLError, ssl.CertificateError) as exc:
+            if getattr(exc, 'errno', None) not in (
+                    ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE,
+                    ssl.SSL_ERROR_SYSCALL):
+                if self._state == _DO_HANDSHAKE and self._handshake_cb:
+                    self._handshake_cb(exc)
+                raise
+            self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ)
+
+        # Check for record level data that needs to be sent back.
+        # Happens for the initial handshake and renegotiations.
+        if self._outgoing.pending:
+            ssldata.append(self._outgoing.read())
+        return (ssldata, appdata)
+
+    def feed_appdata(self, data, offset=0):
+        """Feed plaintext data into the pipe.
+
+        Return an (ssldata, offset) tuple. The ssldata element is a list of
+        buffers containing record level data that needs to be sent to the
+        remote SSL instance. The offset is the number of plaintext bytes that
+        were processed, which may be less than the length of data.
+
+        NOTE: In case of short writes, this call MUST be retried with the SAME
+        buffer passed into the *data* argument (i.e. the id() must be the
+        same). This is an OpenSSL requirement. A further particularity is that
+        a short write will always have offset == 0, because the _ssl module
+        does not enable partial writes. And even though the offset is zero,
+        there will still be encrypted data in ssldata.
+        """
+        assert 0 <= offset <= len(data)
+        if self._state == _UNWRAPPED:
+            # pass through data in unwrapped mode
+            if offset < len(data):
+                ssldata = [data[offset:]]
+            else:
+                ssldata = []
+            return (ssldata, len(data))
+
+        ssldata = []
+        view = memoryview(data)
+        while True:
+            self._need_ssldata = False
+            try:
+                if offset < len(view):
+                    offset += self._sslobj.write(view[offset:])
+            except ssl.SSLError as exc:
+                # It is not allowed to call write() after unwrap() until the
+                # close_notify is acknowledged. We return the condition to the
+                # caller as a short write.
+                if exc.reason == 'PROTOCOL_IS_SHUTDOWN':
+                    exc.errno = ssl.SSL_ERROR_WANT_READ
+                if exc.errno not in (ssl.SSL_ERROR_WANT_READ,
+                                     ssl.SSL_ERROR_WANT_WRITE,
+                                     ssl.SSL_ERROR_SYSCALL):
+                    raise
+                self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ)
+
+            # See if there's any record level data back for us.
+            if self._outgoing.pending:
+                ssldata.append(self._outgoing.read())
+            if offset == len(view) or self._need_ssldata:
+                break
+        return (ssldata, offset)
+
+
+class _SSLProtocolTransport(transports._FlowControlMixin,
+                            transports.Transport):
+
+    def __init__(self, loop, ssl_protocol, app_protocol):
+        self._loop = loop
+        self._ssl_protocol = ssl_protocol
+        self._app_protocol = app_protocol
+        self._closed = False
+
+    def get_extra_info(self, name, default=None):
+        """Get optional transport information."""
+        return self._ssl_protocol._get_extra_info(name, default)
+
+    def close(self):
+        """Close the transport.
+
+        Buffered data will be flushed asynchronously.  No more data
+        will be received.  After all buffered data is flushed, the
+        protocol's connection_lost() method will (eventually) called
+        with None as its argument.
+        """
+        self._closed = True
+        self._ssl_protocol._start_shutdown()
+
+    # On Python 3.3 and older, objects with a destructor part of a reference
+    # cycle are never destroyed. It's not more the case on Python 3.4 thanks
+    # to the PEP 442.
+    if sys.version_info >= (3, 4):
+        def __del__(self):
+            if not self._closed:
+                warnings.warn("unclosed transport %r" % self, ResourceWarning)
+                self.close()
+
+    def pause_reading(self):
+        """Pause the receiving end.
+
+        No data will be passed to the protocol's data_received()
+        method until resume_reading() is called.
+        """
+        self._ssl_protocol._transport.pause_reading()
+
+    def resume_reading(self):
+        """Resume the receiving end.
+
+        Data received will once again be passed to the protocol's
+        data_received() method.
+        """
+        self._ssl_protocol._transport.resume_reading()
+
+    def set_write_buffer_limits(self, high=None, low=None):
+        """Set the high- and low-water limits for write flow control.
+
+        These two values control when to call the protocol's
+        pause_writing() and resume_writing() methods.  If specified,
+        the low-water limit must be less than or equal to the
+        high-water limit.  Neither value can be negative.
+
+        The defaults are implementation-specific.  If only the
+        high-water limit is given, the low-water limit defaults to a
+        implementation-specific value less than or equal to the
+        high-water limit.  Setting high to zero forces low to zero as
+        well, and causes pause_writing() to be called whenever the
+        buffer becomes non-empty.  Setting low to zero causes
+        resume_writing() to be called only once the buffer is empty.
+        Use of zero for either limit is generally sub-optimal as it
+        reduces opportunities for doing I/O and computation
+        concurrently.
+        """
+        self._ssl_protocol._transport.set_write_buffer_limits(high, low)
+
+    def get_write_buffer_size(self):
+        """Return the current size of the write buffer."""
+        return self._ssl_protocol._transport.get_write_buffer_size()
+
+    def write(self, data):
+        """Write some data bytes to the transport.
+
+        This does not block; it buffers the data and arranges for it
+        to be sent out asynchronously.
+        """
+        if not isinstance(data, (bytes, bytearray, memoryview)):
+            raise TypeError("data: expecting a bytes-like instance, got {!r}"
+                                .format(type(data).__name__))
+        if not data:
+            return
+        self._ssl_protocol._write_appdata(data)
+
+    def can_write_eof(self):
+        """Return True if this transport supports write_eof(), False if not."""
+        return False
+
+    def abort(self):
+        """Close the transport immediately.
+
+        Buffered data will be lost.  No more data will be received.
+        The protocol's connection_lost() method will (eventually) be
+        called with None as its argument.
+        """
+        self._ssl_protocol._abort()
+
+
+class SSLProtocol(protocols.Protocol):
+    """SSL protocol.
+
+    Implementation of SSL on top of a socket using incoming and outgoing
+    buffers which are ssl.MemoryBIO objects.
+    """
+
+    def __init__(self, loop, app_protocol, sslcontext, waiter,
+                 server_side=False, server_hostname=None):
+        if ssl is None:
+            raise RuntimeError('stdlib ssl module not available')
+
+        if not sslcontext:
+            sslcontext = _create_transport_context(server_side, server_hostname)
+
+        self._server_side = server_side
+        if server_hostname and not server_side:
+            self._server_hostname = server_hostname
+        else:
+            self._server_hostname = None
+        self._sslcontext = sslcontext
+        # SSL-specific extra info. More info are set when the handshake
+        # completes.
+        self._extra = dict(sslcontext=sslcontext)
+
+        # App data write buffering
+        self._write_backlog = collections.deque()
+        self._write_buffer_size = 0
+
+        self._waiter = waiter
+        self._loop = loop
+        self._app_protocol = app_protocol
+        self._app_transport = _SSLProtocolTransport(self._loop,
+                                                    self, self._app_protocol)
+        self._sslpipe = None
+        self._session_established = False
+        self._in_handshake = False
+        self._in_shutdown = False
+        self._transport = None
+
+    def _wakeup_waiter(self, exc=None):
+        if self._waiter is None:
+            return
+        if not self._waiter.cancelled():
+            if exc is not None:
+                self._waiter.set_exception(exc)
+            else:
+                self._waiter.set_result(None)
+        self._waiter = None
+
+    def connection_made(self, transport):
+        """Called when the low-level connection is made.
+
+        Start the SSL handshake.
+        """
+        self._transport = transport
+        self._sslpipe = _SSLPipe(self._sslcontext,
+                                 self._server_side,
+                                 self._server_hostname)
+        self._start_handshake()
+
+    def connection_lost(self, exc):
+        """Called when the low-level connection is lost or closed.
+
+        The argument is an exception object or None (the latter
+        meaning a regular EOF is received or the connection was
+        aborted or closed).
+        """
+        if self._session_established:
+            self._session_established = False
+            self._loop.call_soon(self._app_protocol.connection_lost, exc)
+        self._transport = None
+        self._app_transport = None
+
+    def pause_writing(self):
+        """Called when the low-level transport's buffer goes over
+        the high-water mark.
+        """
+        self._app_protocol.pause_writing()
+
+    def resume_writing(self):
+        """Called when the low-level transport's buffer drains below
+        the low-water mark.
+        """
+        self._app_protocol.resume_writing()
+
+    def data_received(self, data):
+        """Called when some SSL data is received.
+
+        The argument is a bytes object.
+        """
+        try:
+            ssldata, appdata = self._sslpipe.feed_ssldata(data)
+        except ssl.SSLError as e:
+            if self._loop.get_debug():
+                logger.warning('%r: SSL error %s (reason %s)',
+                               self, e.errno, e.reason)
+            self._abort()
+            return
+
+        for chunk in ssldata:
+            self._transport.write(chunk)
+
+        for chunk in appdata:
+            if chunk:
+                self._app_protocol.data_received(chunk)
+            else:
+                self._start_shutdown()
+                break
+
+    def eof_received(self):
+        """Called when the other end of the low-level stream
+        is half-closed.
+
+        If this returns a false value (including None), the transport
+        will close itself.  If it returns a true value, closing the
+        transport is up to the protocol.
+        """
+        try:
+            if self._loop.get_debug():
+                logger.debug("%r received EOF", self)
+
+            self._wakeup_waiter(ConnectionResetError)
+
+            if not self._in_handshake:
+                keep_open = self._app_protocol.eof_received()
+                if keep_open:
+                    logger.warning('returning true from eof_received() '
+                                   'has no effect when using ssl')
+        finally:
+            self._transport.close()
+
+    def _get_extra_info(self, name, default=None):
+        if name in self._extra:
+            return self._extra[name]
+        else:
+            return self._transport.get_extra_info(name, default)
+
+    def _start_shutdown(self):
+        if self._in_shutdown:
+            return
+        self._in_shutdown = True
+        self._write_appdata(b'')
+
+    def _write_appdata(self, data):
+        self._write_backlog.append((data, 0))
+        self._write_buffer_size += len(data)
+        self._process_write_backlog()
+
+    def _start_handshake(self):
+        if self._loop.get_debug():
+            logger.debug("%r starts SSL handshake", self)
+            self._handshake_start_time = self._loop.time()
+        else:
+            self._handshake_start_time = None
+        self._in_handshake = True
+        # (b'', 1) is a special value in _process_write_backlog() to do
+        # the SSL handshake
+        self._write_backlog.append((b'', 1))
+        self._loop.call_soon(self._process_write_backlog)
+
+    def _on_handshake_complete(self, handshake_exc):
+        self._in_handshake = False
+
+        sslobj = self._sslpipe.ssl_object
+        try:
+            if handshake_exc is not None:
+                raise handshake_exc
+
+            peercert = sslobj.getpeercert()
+            if not hasattr(self._sslcontext, 'check_hostname'):
+                # Verify hostname if requested, Python 3.4+ uses check_hostname
+                # and checks the hostname in do_handshake()
+                if (self._server_hostname
+                and self._sslcontext.verify_mode != ssl.CERT_NONE):
+                    ssl.match_hostname(peercert, self._server_hostname)
+        except BaseException as exc:
+            if self._loop.get_debug():
+                if isinstance(exc, ssl.CertificateError):
+                    logger.warning("%r: SSL handshake failed "
+                                   "on verifying the certificate",
+                                   self, exc_info=True)
+                else:
+                    logger.warning("%r: SSL handshake failed",
+                                   self, exc_info=True)
+            self._transport.close()
+            if isinstance(exc, Exception):
+                self._wakeup_waiter(exc)
+                return
+            else:
+                raise
+
+        if self._loop.get_debug():
+            dt = self._loop.time() - self._handshake_start_time
+            logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
+
+        # Add extra info that becomes available after handshake.
+        self._extra.update(peercert=peercert,
+                           cipher=sslobj.cipher(),
+                           compression=sslobj.compression(),
+                           )
+        self._app_protocol.connection_made(self._app_transport)
+        self._wakeup_waiter()
+        self._session_established = True
+        # In case transport.write() was already called. Don't call
+        # immediatly _process_write_backlog(), but schedule it:
+        # _on_handshake_complete() can be called indirectly from
+        # _process_write_backlog(), and _process_write_backlog() is not
+        # reentrant.
+        self._loop.call_soon(self._process_write_backlog)
+
+    def _process_write_backlog(self):
+        # Try to make progress on the write backlog.
+        if self._transport is None:
+            return
+
+        try:
+            for i in range(len(self._write_backlog)):
+                data, offset = self._write_backlog[0]
+                if data:
+                    ssldata, offset = self._sslpipe.feed_appdata(data, offset)
+                elif offset:
+                    ssldata = self._sslpipe.do_handshake(self._on_handshake_complete)
+                    offset = 1
+                else:
+                    ssldata = self._sslpipe.shutdown(self._finalize)
+                    offset = 1
+
+                for chunk in ssldata:
+                    self._transport.write(chunk)
+
+                if offset < len(data):
+                    self._write_backlog[0] = (data, offset)
+                    # A short write means that a write is blocked on a read
+                    # We need to enable reading if it is paused!
+                    assert self._sslpipe.need_ssldata
+                    if self._transport._paused:
+                        self._transport.resume_reading()
+                    break
+
+                # An entire chunk from the backlog was processed. We can
+                # delete it and reduce the outstanding buffer size.
+                del self._write_backlog[0]
+                self._write_buffer_size -= len(data)
+        except BaseException as exc:
+            if self._in_handshake:
+                self._on_handshake_complete(exc)
+            else:
+                self._fatal_error(exc, 'Fatal error on SSL transport')
+
+    def _fatal_error(self, exc, message='Fatal error on transport'):
+        # Should be called from exception handler only.
+        if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
+            if self._loop.get_debug():
+                logger.debug("%r: %s", self, message, exc_info=True)
+        else:
+            self._loop.call_exception_handler({
+                'message': message,
+                'exception': exc,
+                'transport': self._transport,
+                'protocol': self,
+            })
+        if self._transport:
+            self._transport._force_close(exc)
+
+    def _finalize(self):
+        if self._transport is not None:
+            self._transport.close()
+
+    def _abort(self):
+        if self._transport is not None:
+            try:
+                self._transport.abort()
+            finally:
+                self._finalize()

+ 486 - 0
env/Lib/site-packages/asyncio/streams.py

@@ -0,0 +1,486 @@
+"""Stream-related things."""
+
+__all__ = ['StreamReader', 'StreamWriter', 'StreamReaderProtocol',
+           'open_connection', 'start_server',
+           'IncompleteReadError',
+           ]
+
+import socket
+
+if hasattr(socket, 'AF_UNIX'):
+    __all__.extend(['open_unix_connection', 'start_unix_server'])
+
+from . import coroutines
+from . import events
+from . import futures
+from . import protocols
+from .coroutines import coroutine
+from .log import logger
+
+
+_DEFAULT_LIMIT = 2**16
+
+
+class IncompleteReadError(EOFError):
+    """
+    Incomplete read error. Attributes:
+
+    - partial: read bytes string before the end of stream was reached
+    - expected: total number of expected bytes
+    """
+    def __init__(self, partial, expected):
+        EOFError.__init__(self, "%s bytes read on a total of %s expected bytes"
+                                % (len(partial), expected))
+        self.partial = partial
+        self.expected = expected
+
+
+@coroutine
+def open_connection(host=None, port=None, *,
+                    loop=None, limit=_DEFAULT_LIMIT, **kwds):
+    """A wrapper for create_connection() returning a (reader, writer) pair.
+
+    The reader returned is a StreamReader instance; the writer is a
+    StreamWriter instance.
+
+    The arguments are all the usual arguments to create_connection()
+    except protocol_factory; most common are positional host and port,
+    with various optional keyword arguments following.
+
+    Additional optional keyword arguments are loop (to set the event loop
+    instance to use) and limit (to set the buffer limit passed to the
+    StreamReader).
+
+    (If you want to customize the StreamReader and/or
+    StreamReaderProtocol classes, just copy the code -- there's
+    really nothing special here except some convenience.)
+    """
+    if loop is None:
+        loop = events.get_event_loop()
+    reader = StreamReader(limit=limit, loop=loop)
+    protocol = StreamReaderProtocol(reader, loop=loop)
+    transport, _ = yield from loop.create_connection(
+        lambda: protocol, host, port, **kwds)
+    writer = StreamWriter(transport, protocol, reader, loop)
+    return reader, writer
+
+
+@coroutine
+def start_server(client_connected_cb, host=None, port=None, *,
+                 loop=None, limit=_DEFAULT_LIMIT, **kwds):
+    """Start a socket server, call back for each client connected.
+
+    The first parameter, `client_connected_cb`, takes two parameters:
+    client_reader, client_writer.  client_reader is a StreamReader
+    object, while client_writer is a StreamWriter object.  This
+    parameter can either be a plain callback function or a coroutine;
+    if it is a coroutine, it will be automatically converted into a
+    Task.
+
+    The rest of the arguments are all the usual arguments to
+    loop.create_server() except protocol_factory; most common are
+    positional host and port, with various optional keyword arguments
+    following.  The return value is the same as loop.create_server().
+
+    Additional optional keyword arguments are loop (to set the event loop
+    instance to use) and limit (to set the buffer limit passed to the
+    StreamReader).
+
+    The return value is the same as loop.create_server(), i.e. a
+    Server object which can be used to stop the service.
+    """
+    if loop is None:
+        loop = events.get_event_loop()
+
+    def factory():
+        reader = StreamReader(limit=limit, loop=loop)
+        protocol = StreamReaderProtocol(reader, client_connected_cb,
+                                        loop=loop)
+        return protocol
+
+    return (yield from loop.create_server(factory, host, port, **kwds))
+
+
+if hasattr(socket, 'AF_UNIX'):
+    # UNIX Domain Sockets are supported on this platform
+
+    @coroutine
+    def open_unix_connection(path=None, *,
+                             loop=None, limit=_DEFAULT_LIMIT, **kwds):
+        """Similar to `open_connection` but works with UNIX Domain Sockets."""
+        if loop is None:
+            loop = events.get_event_loop()
+        reader = StreamReader(limit=limit, loop=loop)
+        protocol = StreamReaderProtocol(reader, loop=loop)
+        transport, _ = yield from loop.create_unix_connection(
+            lambda: protocol, path, **kwds)
+        writer = StreamWriter(transport, protocol, reader, loop)
+        return reader, writer
+
+
+    @coroutine
+    def start_unix_server(client_connected_cb, path=None, *,
+                          loop=None, limit=_DEFAULT_LIMIT, **kwds):
+        """Similar to `start_server` but works with UNIX Domain Sockets."""
+        if loop is None:
+            loop = events.get_event_loop()
+
+        def factory():
+            reader = StreamReader(limit=limit, loop=loop)
+            protocol = StreamReaderProtocol(reader, client_connected_cb,
+                                            loop=loop)
+            return protocol
+
+        return (yield from loop.create_unix_server(factory, path, **kwds))
+
+
+class FlowControlMixin(protocols.Protocol):
+    """Reusable flow control logic for StreamWriter.drain().
+
+    This implements the protocol methods pause_writing(),
+    resume_reading() and connection_lost().  If the subclass overrides
+    these it must call the super methods.
+
+    StreamWriter.drain() must wait for _drain_helper() coroutine.
+    """
+
+    def __init__(self, loop=None):
+        if loop is None:
+            self._loop = events.get_event_loop()
+        else:
+            self._loop = loop
+        self._paused = False
+        self._drain_waiter = None
+        self._connection_lost = False
+
+    def pause_writing(self):
+        assert not self._paused
+        self._paused = True
+        if self._loop.get_debug():
+            logger.debug("%r pauses writing", self)
+
+    def resume_writing(self):
+        assert self._paused
+        self._paused = False
+        if self._loop.get_debug():
+            logger.debug("%r resumes writing", self)
+
+        waiter = self._drain_waiter
+        if waiter is not None:
+            self._drain_waiter = None
+            if not waiter.done():
+                waiter.set_result(None)
+
+    def connection_lost(self, exc):
+        self._connection_lost = True
+        # Wake up the writer if currently paused.
+        if not self._paused:
+            return
+        waiter = self._drain_waiter
+        if waiter is None:
+            return
+        self._drain_waiter = None
+        if waiter.done():
+            return
+        if exc is None:
+            waiter.set_result(None)
+        else:
+            waiter.set_exception(exc)
+
+    @coroutine
+    def _drain_helper(self):
+        if self._connection_lost:
+            raise ConnectionResetError('Connection lost')
+        if not self._paused:
+            return
+        waiter = self._drain_waiter
+        assert waiter is None or waiter.cancelled()
+        waiter = futures.Future(loop=self._loop)
+        self._drain_waiter = waiter
+        yield from waiter
+
+
+class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
+    """Helper class to adapt between Protocol and StreamReader.
+
+    (This is a helper class instead of making StreamReader itself a
+    Protocol subclass, because the StreamReader has other potential
+    uses, and to prevent the user of the StreamReader to accidentally
+    call inappropriate methods of the protocol.)
+    """
+
+    def __init__(self, stream_reader, client_connected_cb=None, loop=None):
+        super().__init__(loop=loop)
+        self._stream_reader = stream_reader
+        self._stream_writer = None
+        self._client_connected_cb = client_connected_cb
+
+    def connection_made(self, transport):
+        self._stream_reader.set_transport(transport)
+        if self._client_connected_cb is not None:
+            self._stream_writer = StreamWriter(transport, self,
+                                               self._stream_reader,
+                                               self._loop)
+            res = self._client_connected_cb(self._stream_reader,
+                                            self._stream_writer)
+            if coroutines.iscoroutine(res):
+                self._loop.create_task(res)
+
+    def connection_lost(self, exc):
+        if exc is None:
+            self._stream_reader.feed_eof()
+        else:
+            self._stream_reader.set_exception(exc)
+        super().connection_lost(exc)
+
+    def data_received(self, data):
+        self._stream_reader.feed_data(data)
+
+    def eof_received(self):
+        self._stream_reader.feed_eof()
+
+
+class StreamWriter:
+    """Wraps a Transport.
+
+    This exposes write(), writelines(), [can_]write_eof(),
+    get_extra_info() and close().  It adds drain() which returns an
+    optional Future on which you can wait for flow control.  It also
+    adds a transport property which references the Transport
+    directly.
+    """
+
+    def __init__(self, transport, protocol, reader, loop):
+        self._transport = transport
+        self._protocol = protocol
+        # drain() expects that the reader has a exception() method
+        assert reader is None or isinstance(reader, StreamReader)
+        self._reader = reader
+        self._loop = loop
+
+    def __repr__(self):
+        info = [self.__class__.__name__, 'transport=%r' % self._transport]
+        if self._reader is not None:
+            info.append('reader=%r' % self._reader)
+        return '<%s>' % ' '.join(info)
+
+    @property
+    def transport(self):
+        return self._transport
+
+    def write(self, data):
+        self._transport.write(data)
+
+    def writelines(self, data):
+        self._transport.writelines(data)
+
+    def write_eof(self):
+        return self._transport.write_eof()
+
+    def can_write_eof(self):
+        return self._transport.can_write_eof()
+
+    def close(self):
+        return self._transport.close()
+
+    def get_extra_info(self, name, default=None):
+        return self._transport.get_extra_info(name, default)
+
+    @coroutine
+    def drain(self):
+        """Flush the write buffer.
+
+        The intended use is to write
+
+          w.write(data)
+          yield from w.drain()
+        """
+        if self._reader is not None:
+            exc = self._reader.exception()
+            if exc is not None:
+                raise exc
+        yield from self._protocol._drain_helper()
+
+
+class StreamReader:
+
+    def __init__(self, limit=_DEFAULT_LIMIT, loop=None):
+        # The line length limit is  a security feature;
+        # it also doubles as half the buffer limit.
+        self._limit = limit
+        if loop is None:
+            self._loop = events.get_event_loop()
+        else:
+            self._loop = loop
+        self._buffer = bytearray()
+        self._eof = False    # Whether we're done.
+        self._waiter = None  # A future used by _wait_for_data()
+        self._exception = None
+        self._transport = None
+        self._paused = False
+
+    def exception(self):
+        return self._exception
+
+    def set_exception(self, exc):
+        self._exception = exc
+
+        waiter = self._waiter
+        if waiter is not None:
+            self._waiter = None
+            if not waiter.cancelled():
+                waiter.set_exception(exc)
+
+    def _wakeup_waiter(self):
+        """Wakeup read() or readline() function waiting for data or EOF."""
+        waiter = self._waiter
+        if waiter is not None:
+            self._waiter = None
+            if not waiter.cancelled():
+                waiter.set_result(None)
+
+    def set_transport(self, transport):
+        assert self._transport is None, 'Transport already set'
+        self._transport = transport
+
+    def _maybe_resume_transport(self):
+        if self._paused and len(self._buffer) <= self._limit:
+            self._paused = False
+            self._transport.resume_reading()
+
+    def feed_eof(self):
+        self._eof = True
+        self._wakeup_waiter()
+
+    def at_eof(self):
+        """Return True if the buffer is empty and 'feed_eof' was called."""
+        return self._eof and not self._buffer
+
+    def feed_data(self, data):
+        assert not self._eof, 'feed_data after feed_eof'
+
+        if not data:
+            return
+
+        self._buffer.extend(data)
+        self._wakeup_waiter()
+
+        if (self._transport is not None and
+            not self._paused and
+            len(self._buffer) > 2*self._limit):
+            try:
+                self._transport.pause_reading()
+            except NotImplementedError:
+                # The transport can't be paused.
+                # We'll just have to buffer all data.
+                # Forget the transport so we don't keep trying.
+                self._transport = None
+            else:
+                self._paused = True
+
+    def _wait_for_data(self, func_name):
+        """Wait until feed_data() or feed_eof() is called."""
+        # StreamReader uses a future to link the protocol feed_data() method
+        # to a read coroutine. Running two read coroutines at the same time
+        # would have an unexpected behaviour. It would not possible to know
+        # which coroutine would get the next data.
+        if self._waiter is not None:
+            raise RuntimeError('%s() called while another coroutine is '
+                               'already waiting for incoming data' % func_name)
+
+        self._waiter = futures.Future(loop=self._loop)
+        try:
+            yield from self._waiter
+        finally:
+            self._waiter = None
+
+    @coroutine
+    def readline(self):
+        if self._exception is not None:
+            raise self._exception
+
+        line = bytearray()
+        not_enough = True
+
+        while not_enough:
+            while self._buffer and not_enough:
+                ichar = self._buffer.find(b'\n')
+                if ichar < 0:
+                    line.extend(self._buffer)
+                    self._buffer.clear()
+                else:
+                    ichar += 1
+                    line.extend(self._buffer[:ichar])
+                    del self._buffer[:ichar]
+                    not_enough = False
+
+                if len(line) > self._limit:
+                    self._maybe_resume_transport()
+                    raise ValueError('Line is too long')
+
+            if self._eof:
+                break
+
+            if not_enough:
+                yield from self._wait_for_data('readline')
+
+        self._maybe_resume_transport()
+        return bytes(line)
+
+    @coroutine
+    def read(self, n=-1):
+        if self._exception is not None:
+            raise self._exception
+
+        if not n:
+            return b''
+
+        if n < 0:
+            # This used to just loop creating a new waiter hoping to
+            # collect everything in self._buffer, but that would
+            # deadlock if the subprocess sends more than self.limit
+            # bytes.  So just call self.read(self._limit) until EOF.
+            blocks = []
+            while True:
+                block = yield from self.read(self._limit)
+                if not block:
+                    break
+                blocks.append(block)
+            return b''.join(blocks)
+        else:
+            if not self._buffer and not self._eof:
+                yield from self._wait_for_data('read')
+
+        if n < 0 or len(self._buffer) <= n:
+            data = bytes(self._buffer)
+            self._buffer.clear()
+        else:
+            # n > 0 and len(self._buffer) > n
+            data = bytes(self._buffer[:n])
+            del self._buffer[:n]
+
+        self._maybe_resume_transport()
+        return data
+
+    @coroutine
+    def readexactly(self, n):
+        if self._exception is not None:
+            raise self._exception
+
+        # There used to be "optimized" code here.  It created its own
+        # Future and waited until self._buffer had at least the n
+        # bytes, then called read(n).  Unfortunately, this could pause
+        # the transport if the argument was larger than the pause
+        # limit (which is twice self._limit).  So now we just read()
+        # into a local buffer.
+
+        blocks = []
+        while n > 0:
+            block = yield from self.read(n)
+            if not block:
+                partial = b''.join(blocks)
+                raise IncompleteReadError(partial, len(partial) + n)
+            blocks.append(block)
+            n -= len(block)
+
+        return b''.join(blocks)

+ 215 - 0
env/Lib/site-packages/asyncio/subprocess.py

@@ -0,0 +1,215 @@
+__all__ = ['create_subprocess_exec', 'create_subprocess_shell']
+
+import collections
+import subprocess
+
+from . import events
+from . import futures
+from . import protocols
+from . import streams
+from . import tasks
+from .coroutines import coroutine
+from .log import logger
+
+
+PIPE = subprocess.PIPE
+STDOUT = subprocess.STDOUT
+DEVNULL = subprocess.DEVNULL
+
+
+class SubprocessStreamProtocol(streams.FlowControlMixin,
+                               protocols.SubprocessProtocol):
+    """Like StreamReaderProtocol, but for a subprocess."""
+
+    def __init__(self, limit, loop):
+        super().__init__(loop=loop)
+        self._limit = limit
+        self.stdin = self.stdout = self.stderr = None
+        self._transport = None
+
+    def __repr__(self):
+        info = [self.__class__.__name__]
+        if self.stdin is not None:
+            info.append('stdin=%r' % self.stdin)
+        if self.stdout is not None:
+            info.append('stdout=%r' % self.stdout)
+        if self.stderr is not None:
+            info.append('stderr=%r' % self.stderr)
+        return '<%s>' % ' '.join(info)
+
+    def connection_made(self, transport):
+        self._transport = transport
+
+        stdout_transport = transport.get_pipe_transport(1)
+        if stdout_transport is not None:
+            self.stdout = streams.StreamReader(limit=self._limit,
+                                               loop=self._loop)
+            self.stdout.set_transport(stdout_transport)
+
+        stderr_transport = transport.get_pipe_transport(2)
+        if stderr_transport is not None:
+            self.stderr = streams.StreamReader(limit=self._limit,
+                                               loop=self._loop)
+            self.stderr.set_transport(stderr_transport)
+
+        stdin_transport = transport.get_pipe_transport(0)
+        if stdin_transport is not None:
+            self.stdin = streams.StreamWriter(stdin_transport,
+                                              protocol=self,
+                                              reader=None,
+                                              loop=self._loop)
+
+    def pipe_data_received(self, fd, data):
+        if fd == 1:
+            reader = self.stdout
+        elif fd == 2:
+            reader = self.stderr
+        else:
+            reader = None
+        if reader is not None:
+            reader.feed_data(data)
+
+    def pipe_connection_lost(self, fd, exc):
+        if fd == 0:
+            pipe = self.stdin
+            if pipe is not None:
+                pipe.close()
+            self.connection_lost(exc)
+            return
+        if fd == 1:
+            reader = self.stdout
+        elif fd == 2:
+            reader = self.stderr
+        else:
+            reader = None
+        if reader != None:
+            if exc is None:
+                reader.feed_eof()
+            else:
+                reader.set_exception(exc)
+
+    def process_exited(self):
+        self._transport.close()
+        self._transport = None
+
+
+class Process:
+    def __init__(self, transport, protocol, loop):
+        self._transport = transport
+        self._protocol = protocol
+        self._loop = loop
+        self.stdin = protocol.stdin
+        self.stdout = protocol.stdout
+        self.stderr = protocol.stderr
+        self.pid = transport.get_pid()
+
+    def __repr__(self):
+        return '<%s %s>' % (self.__class__.__name__, self.pid)
+
+    @property
+    def returncode(self):
+        return self._transport.get_returncode()
+
+    @coroutine
+    def wait(self):
+        """Wait until the process exit and return the process return code.
+
+        This method is a coroutine."""
+        return (yield from self._transport._wait())
+
+    def send_signal(self, signal):
+        self._transport.send_signal(signal)
+
+    def terminate(self):
+        self._transport.terminate()
+
+    def kill(self):
+        self._transport.kill()
+
+    @coroutine
+    def _feed_stdin(self, input):
+        debug = self._loop.get_debug()
+        self.stdin.write(input)
+        if debug:
+            logger.debug('%r communicate: feed stdin (%s bytes)',
+                        self, len(input))
+        try:
+            yield from self.stdin.drain()
+        except (BrokenPipeError, ConnectionResetError) as exc:
+            # communicate() ignores BrokenPipeError and ConnectionResetError
+            if debug:
+                logger.debug('%r communicate: stdin got %r', self, exc)
+
+        if debug:
+            logger.debug('%r communicate: close stdin', self)
+        self.stdin.close()
+
+    @coroutine
+    def _noop(self):
+        return None
+
+    @coroutine
+    def _read_stream(self, fd):
+        transport = self._transport.get_pipe_transport(fd)
+        if fd == 2:
+            stream = self.stderr
+        else:
+            assert fd == 1
+            stream = self.stdout
+        if self._loop.get_debug():
+            name = 'stdout' if fd == 1 else 'stderr'
+            logger.debug('%r communicate: read %s', self, name)
+        output = yield from stream.read()
+        if self._loop.get_debug():
+            name = 'stdout' if fd == 1 else 'stderr'
+            logger.debug('%r communicate: close %s', self, name)
+        transport.close()
+        return output
+
+    @coroutine
+    def communicate(self, input=None):
+        if input:
+            stdin = self._feed_stdin(input)
+        else:
+            stdin = self._noop()
+        if self.stdout is not None:
+            stdout = self._read_stream(1)
+        else:
+            stdout = self._noop()
+        if self.stderr is not None:
+            stderr = self._read_stream(2)
+        else:
+            stderr = self._noop()
+        stdin, stdout, stderr = yield from tasks.gather(stdin, stdout, stderr,
+                                                        loop=self._loop)
+        yield from self.wait()
+        return (stdout, stderr)
+
+
+@coroutine
+def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
+                            loop=None, limit=streams._DEFAULT_LIMIT, **kwds):
+    if loop is None:
+        loop = events.get_event_loop()
+    protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
+                                                        loop=loop)
+    transport, protocol = yield from loop.subprocess_shell(
+                                            protocol_factory,
+                                            cmd, stdin=stdin, stdout=stdout,
+                                            stderr=stderr, **kwds)
+    return Process(transport, protocol, loop)
+
+@coroutine
+def create_subprocess_exec(program, *args, stdin=None, stdout=None,
+                           stderr=None, loop=None,
+                           limit=streams._DEFAULT_LIMIT, **kwds):
+    if loop is None:
+        loop = events.get_event_loop()
+    protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
+                                                        loop=loop)
+    transport, protocol = yield from loop.subprocess_exec(
+                                            protocol_factory,
+                                            program, *args,
+                                            stdin=stdin, stdout=stdout,
+                                            stderr=stderr, **kwds)
+    return Process(transport, protocol, loop)

+ 667 - 0
env/Lib/site-packages/asyncio/tasks.py

@@ -0,0 +1,667 @@
+"""Support for tasks, coroutines and the scheduler."""
+
+__all__ = ['Task',
+           'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
+           'wait', 'wait_for', 'as_completed', 'sleep', 'async',
+           'gather', 'shield',
+           ]
+
+import concurrent.futures
+import functools
+import inspect
+import linecache
+import sys
+import traceback
+import weakref
+
+from . import coroutines
+from . import events
+from . import futures
+from .coroutines import coroutine
+
+_PY34 = (sys.version_info >= (3, 4))
+
+
+class Task(futures.Future):
+    """A coroutine wrapped in a Future."""
+
+    # An important invariant maintained while a Task not done:
+    #
+    # - Either _fut_waiter is None, and _step() is scheduled;
+    # - or _fut_waiter is some Future, and _step() is *not* scheduled.
+    #
+    # The only transition from the latter to the former is through
+    # _wakeup().  When _fut_waiter is not None, one of its callbacks
+    # must be _wakeup().
+
+    # Weak set containing all tasks alive.
+    _all_tasks = weakref.WeakSet()
+
+    # Dictionary containing tasks that are currently active in
+    # all running event loops.  {EventLoop: Task}
+    _current_tasks = {}
+
+    # If False, don't log a message if the task is destroyed whereas its
+    # status is still pending
+    _log_destroy_pending = True
+
+    @classmethod
+    def current_task(cls, loop=None):
+        """Return the currently running task in an event loop or None.
+
+        By default the current task for the current event loop is returned.
+
+        None is returned when called not in the context of a Task.
+        """
+        if loop is None:
+            loop = events.get_event_loop()
+        return cls._current_tasks.get(loop)
+
+    @classmethod
+    def all_tasks(cls, loop=None):
+        """Return a set of all tasks for an event loop.
+
+        By default all tasks for the current event loop are returned.
+        """
+        if loop is None:
+            loop = events.get_event_loop()
+        return {t for t in cls._all_tasks if t._loop is loop}
+
+    def __init__(self, coro, *, loop=None):
+        assert coroutines.iscoroutine(coro), repr(coro)
+        super().__init__(loop=loop)
+        if self._source_traceback:
+            del self._source_traceback[-1]
+        self._coro = iter(coro)  # Use the iterator just in case.
+        self._fut_waiter = None
+        self._must_cancel = False
+        self._loop.call_soon(self._step)
+        self.__class__._all_tasks.add(self)
+
+    # On Python 3.3 or older, objects with a destructor that are part of a
+    # reference cycle are never destroyed. That's not the case any more on
+    # Python 3.4 thanks to the PEP 442.
+    if _PY34:
+        def __del__(self):
+            if self._state == futures._PENDING and self._log_destroy_pending:
+                context = {
+                    'task': self,
+                    'message': 'Task was destroyed but it is pending!',
+                }
+                if self._source_traceback:
+                    context['source_traceback'] = self._source_traceback
+                self._loop.call_exception_handler(context)
+            futures.Future.__del__(self)
+
+    def _repr_info(self):
+        info = super()._repr_info()
+
+        if self._must_cancel:
+            # replace status
+            info[0] = 'cancelling'
+
+        coro = coroutines._format_coroutine(self._coro)
+        info.insert(1, 'coro=<%s>' % coro)
+
+        if self._fut_waiter is not None:
+            info.insert(2, 'wait_for=%r' % self._fut_waiter)
+        return info
+
+    def get_stack(self, *, limit=None):
+        """Return the list of stack frames for this task's coroutine.
+
+        If the coroutine is not done, this returns the stack where it is
+        suspended.  If the coroutine has completed successfully or was
+        cancelled, this returns an empty list.  If the coroutine was
+        terminated by an exception, this returns the list of traceback
+        frames.
+
+        The frames are always ordered from oldest to newest.
+
+        The optional limit gives the maximum number of frames to
+        return; by default all available frames are returned.  Its
+        meaning differs depending on whether a stack or a traceback is
+        returned: the newest frames of a stack are returned, but the
+        oldest frames of a traceback are returned.  (This matches the
+        behavior of the traceback module.)
+
+        For reasons beyond our control, only one stack frame is
+        returned for a suspended coroutine.
+        """
+        frames = []
+        f = self._coro.gi_frame
+        if f is not None:
+            while f is not None:
+                if limit is not None:
+                    if limit <= 0:
+                        break
+                    limit -= 1
+                frames.append(f)
+                f = f.f_back
+            frames.reverse()
+        elif self._exception is not None:
+            tb = self._exception.__traceback__
+            while tb is not None:
+                if limit is not None:
+                    if limit <= 0:
+                        break
+                    limit -= 1
+                frames.append(tb.tb_frame)
+                tb = tb.tb_next
+        return frames
+
+    def print_stack(self, *, limit=None, file=None):
+        """Print the stack or traceback for this task's coroutine.
+
+        This produces output similar to that of the traceback module,
+        for the frames retrieved by get_stack().  The limit argument
+        is passed to get_stack().  The file argument is an I/O stream
+        to which the output is written; by default output is written
+        to sys.stderr.
+        """
+        extracted_list = []
+        checked = set()
+        for f in self.get_stack(limit=limit):
+            lineno = f.f_lineno
+            co = f.f_code
+            filename = co.co_filename
+            name = co.co_name
+            if filename not in checked:
+                checked.add(filename)
+                linecache.checkcache(filename)
+            line = linecache.getline(filename, lineno, f.f_globals)
+            extracted_list.append((filename, lineno, name, line))
+        exc = self._exception
+        if not extracted_list:
+            print('No stack for %r' % self, file=file)
+        elif exc is not None:
+            print('Traceback for %r (most recent call last):' % self,
+                  file=file)
+        else:
+            print('Stack for %r (most recent call last):' % self,
+                  file=file)
+        traceback.print_list(extracted_list, file=file)
+        if exc is not None:
+            for line in traceback.format_exception_only(exc.__class__, exc):
+                print(line, file=file, end='')
+
+    def cancel(self):
+        """Request that this task cancel itself.
+
+        This arranges for a CancelledError to be thrown into the
+        wrapped coroutine on the next cycle through the event loop.
+        The coroutine then has a chance to clean up or even deny
+        the request using try/except/finally.
+
+        Unlike Future.cancel, this does not guarantee that the
+        task will be cancelled: the exception might be caught and
+        acted upon, delaying cancellation of the task or preventing
+        cancellation completely.  The task may also return a value or
+        raise a different exception.
+
+        Immediately after this method is called, Task.cancelled() will
+        not return True (unless the task was already cancelled).  A
+        task will be marked as cancelled when the wrapped coroutine
+        terminates with a CancelledError exception (even if cancel()
+        was not called).
+        """
+        if self.done():
+            return False
+        if self._fut_waiter is not None:
+            if self._fut_waiter.cancel():
+                # Leave self._fut_waiter; it may be a Task that
+                # catches and ignores the cancellation so we may have
+                # to cancel it again later.
+                return True
+        # It must be the case that self._step is already scheduled.
+        self._must_cancel = True
+        return True
+
+    def _step(self, value=None, exc=None):
+        assert not self.done(), \
+            '_step(): already done: {!r}, {!r}, {!r}'.format(self, value, exc)
+        if self._must_cancel:
+            if not isinstance(exc, futures.CancelledError):
+                exc = futures.CancelledError()
+            self._must_cancel = False
+        coro = self._coro
+        self._fut_waiter = None
+
+        self.__class__._current_tasks[self._loop] = self
+        # Call either coro.throw(exc) or coro.send(value).
+        try:
+            if exc is not None:
+                result = coro.throw(exc)
+            elif value is not None:
+                result = coro.send(value)
+            else:
+                result = next(coro)
+        except StopIteration as exc:
+            self.set_result(exc.value)
+        except futures.CancelledError as exc:
+            super().cancel()  # I.e., Future.cancel(self).
+        except Exception as exc:
+            self.set_exception(exc)
+        except BaseException as exc:
+            self.set_exception(exc)
+            raise
+        else:
+            if isinstance(result, futures.Future):
+                # Yielded Future must come from Future.__iter__().
+                if result._blocking:
+                    result._blocking = False
+                    result.add_done_callback(self._wakeup)
+                    self._fut_waiter = result
+                    if self._must_cancel:
+                        if self._fut_waiter.cancel():
+                            self._must_cancel = False
+                else:
+                    self._loop.call_soon(
+                        self._step, None,
+                        RuntimeError(
+                            'yield was used instead of yield from '
+                            'in task {!r} with {!r}'.format(self, result)))
+            elif result is None:
+                # Bare yield relinquishes control for one event loop iteration.
+                self._loop.call_soon(self._step)
+            elif inspect.isgenerator(result):
+                # Yielding a generator is just wrong.
+                self._loop.call_soon(
+                    self._step, None,
+                    RuntimeError(
+                        'yield was used instead of yield from for '
+                        'generator in task {!r} with {}'.format(
+                            self, result)))
+            else:
+                # Yielding something else is an error.
+                self._loop.call_soon(
+                    self._step, None,
+                    RuntimeError(
+                        'Task got bad yield: {!r}'.format(result)))
+        finally:
+            self.__class__._current_tasks.pop(self._loop)
+            self = None  # Needed to break cycles when an exception occurs.
+
+    def _wakeup(self, future):
+        try:
+            value = future.result()
+        except Exception as exc:
+            # This may also be a cancellation.
+            self._step(None, exc)
+        else:
+            self._step(value, None)
+        self = None  # Needed to break cycles when an exception occurs.
+
+
+# wait() and as_completed() similar to those in PEP 3148.
+
+FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED
+FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION
+ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
+
+
+@coroutine
+def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
+    """Wait for the Futures and coroutines given by fs to complete.
+
+    The sequence futures must not be empty.
+
+    Coroutines will be wrapped in Tasks.
+
+    Returns two sets of Future: (done, pending).
+
+    Usage:
+
+        done, pending = yield from asyncio.wait(fs)
+
+    Note: This does not raise TimeoutError! Futures that aren't done
+    when the timeout occurs are returned in the second set.
+    """
+    if isinstance(fs, futures.Future) or coroutines.iscoroutine(fs):
+        raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
+    if not fs:
+        raise ValueError('Set of coroutines/Futures is empty.')
+    if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED):
+        raise ValueError('Invalid return_when value: {}'.format(return_when))
+
+    if loop is None:
+        loop = events.get_event_loop()
+
+    fs = {async(f, loop=loop) for f in set(fs)}
+
+    return (yield from _wait(fs, timeout, return_when, loop))
+
+
+def _release_waiter(waiter, *args):
+    if not waiter.done():
+        waiter.set_result(None)
+
+
+@coroutine
+def wait_for(fut, timeout, *, loop=None):
+    """Wait for the single Future or coroutine to complete, with timeout.
+
+    Coroutine will be wrapped in Task.
+
+    Returns result of the Future or coroutine.  When a timeout occurs,
+    it cancels the task and raises TimeoutError.  To avoid the task
+    cancellation, wrap it in shield().
+
+    If the wait is cancelled, the task is also cancelled.
+
+    This function is a coroutine.
+    """
+    if loop is None:
+        loop = events.get_event_loop()
+
+    if timeout is None:
+        return (yield from fut)
+
+    waiter = futures.Future(loop=loop)
+    timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
+    cb = functools.partial(_release_waiter, waiter)
+
+    fut = async(fut, loop=loop)
+    fut.add_done_callback(cb)
+
+    try:
+        # wait until the future completes or the timeout
+        try:
+            yield from waiter
+        except futures.CancelledError:
+            fut.remove_done_callback(cb)
+            fut.cancel()
+            raise
+
+        if fut.done():
+            return fut.result()
+        else:
+            fut.remove_done_callback(cb)
+            fut.cancel()
+            raise futures.TimeoutError()
+    finally:
+        timeout_handle.cancel()
+
+
+@coroutine
+def _wait(fs, timeout, return_when, loop):
+    """Internal helper for wait() and _wait_for().
+
+    The fs argument must be a collection of Futures.
+    """
+    assert fs, 'Set of Futures is empty.'
+    waiter = futures.Future(loop=loop)
+    timeout_handle = None
+    if timeout is not None:
+        timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
+    counter = len(fs)
+
+    def _on_completion(f):
+        nonlocal counter
+        counter -= 1
+        if (counter <= 0 or
+            return_when == FIRST_COMPLETED or
+            return_when == FIRST_EXCEPTION and (not f.cancelled() and
+                                                f.exception() is not None)):
+            if timeout_handle is not None:
+                timeout_handle.cancel()
+            if not waiter.done():
+                waiter.set_result(None)
+
+    for f in fs:
+        f.add_done_callback(_on_completion)
+
+    try:
+        yield from waiter
+    finally:
+        if timeout_handle is not None:
+            timeout_handle.cancel()
+
+    done, pending = set(), set()
+    for f in fs:
+        f.remove_done_callback(_on_completion)
+        if f.done():
+            done.add(f)
+        else:
+            pending.add(f)
+    return done, pending
+
+
+# This is *not* a @coroutine!  It is just an iterator (yielding Futures).
+def as_completed(fs, *, loop=None, timeout=None):
+    """Return an iterator whose values are coroutines.
+
+    When waiting for the yielded coroutines you'll get the results (or
+    exceptions!) of the original Futures (or coroutines), in the order
+    in which and as soon as they complete.
+
+    This differs from PEP 3148; the proper way to use this is:
+
+        for f in as_completed(fs):
+            result = yield from f  # The 'yield from' may raise.
+            # Use result.
+
+    If a timeout is specified, the 'yield from' will raise
+    TimeoutError when the timeout occurs before all Futures are done.
+
+    Note: The futures 'f' are not necessarily members of fs.
+    """
+    if isinstance(fs, futures.Future) or coroutines.iscoroutine(fs):
+        raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
+    loop = loop if loop is not None else events.get_event_loop()
+    todo = {async(f, loop=loop) for f in set(fs)}
+    from .queues import Queue  # Import here to avoid circular import problem.
+    done = Queue(loop=loop)
+    timeout_handle = None
+
+    def _on_timeout():
+        for f in todo:
+            f.remove_done_callback(_on_completion)
+            done.put_nowait(None)  # Queue a dummy value for _wait_for_one().
+        todo.clear()  # Can't do todo.remove(f) in the loop.
+
+    def _on_completion(f):
+        if not todo:
+            return  # _on_timeout() was here first.
+        todo.remove(f)
+        done.put_nowait(f)
+        if not todo and timeout_handle is not None:
+            timeout_handle.cancel()
+
+    @coroutine
+    def _wait_for_one():
+        f = yield from done.get()
+        if f is None:
+            # Dummy value from _on_timeout().
+            raise futures.TimeoutError
+        return f.result()  # May raise f.exception().
+
+    for f in todo:
+        f.add_done_callback(_on_completion)
+    if todo and timeout is not None:
+        timeout_handle = loop.call_later(timeout, _on_timeout)
+    for _ in range(len(todo)):
+        yield _wait_for_one()
+
+
+@coroutine
+def sleep(delay, result=None, *, loop=None):
+    """Coroutine that completes after a given time (in seconds)."""
+    future = futures.Future(loop=loop)
+    h = future._loop.call_later(delay,
+                                future._set_result_unless_cancelled, result)
+    try:
+        return (yield from future)
+    finally:
+        h.cancel()
+
+
+def async(coro_or_future, *, loop=None):
+    """Wrap a coroutine in a future.
+
+    If the argument is a Future, it is returned directly.
+    """
+    if isinstance(coro_or_future, futures.Future):
+        if loop is not None and loop is not coro_or_future._loop:
+            raise ValueError('loop argument must agree with Future')
+        return coro_or_future
+    elif coroutines.iscoroutine(coro_or_future):
+        if loop is None:
+            loop = events.get_event_loop()
+        task = loop.create_task(coro_or_future)
+        if task._source_traceback:
+            del task._source_traceback[-1]
+        return task
+    else:
+        raise TypeError('A Future or coroutine is required')
+
+
+class _GatheringFuture(futures.Future):
+    """Helper for gather().
+
+    This overrides cancel() to cancel all the children and act more
+    like Task.cancel(), which doesn't immediately mark itself as
+    cancelled.
+    """
+
+    def __init__(self, children, *, loop=None):
+        super().__init__(loop=loop)
+        self._children = children
+
+    def cancel(self):
+        if self.done():
+            return False
+        for child in self._children:
+            child.cancel()
+        return True
+
+
+def gather(*coros_or_futures, loop=None, return_exceptions=False):
+    """Return a future aggregating results from the given coroutines
+    or futures.
+
+    All futures must share the same event loop.  If all the tasks are
+    done successfully, the returned future's result is the list of
+    results (in the order of the original sequence, not necessarily
+    the order of results arrival).  If *return_exceptions* is True,
+    exceptions in the tasks are treated the same as successful
+    results, and gathered in the result list; otherwise, the first
+    raised exception will be immediately propagated to the returned
+    future.
+
+    Cancellation: if the outer Future is cancelled, all children (that
+    have not completed yet) are also cancelled.  If any child is
+    cancelled, this is treated as if it raised CancelledError --
+    the outer Future is *not* cancelled in this case.  (This is to
+    prevent the cancellation of one child to cause other children to
+    be cancelled.)
+    """
+    if not coros_or_futures:
+        outer = futures.Future(loop=loop)
+        outer.set_result([])
+        return outer
+
+    arg_to_fut = {}
+    for arg in set(coros_or_futures):
+        if not isinstance(arg, futures.Future):
+            fut = async(arg, loop=loop)
+            if loop is None:
+                loop = fut._loop
+            # The caller cannot control this future, the "destroy pending task"
+            # warning should not be emitted.
+            fut._log_destroy_pending = False
+        else:
+            fut = arg
+            if loop is None:
+                loop = fut._loop
+            elif fut._loop is not loop:
+                raise ValueError("futures are tied to different event loops")
+        arg_to_fut[arg] = fut
+
+    children = [arg_to_fut[arg] for arg in coros_or_futures]
+    nchildren = len(children)
+    outer = _GatheringFuture(children, loop=loop)
+    nfinished = 0
+    results = [None] * nchildren
+
+    def _done_callback(i, fut):
+        nonlocal nfinished
+        if outer.done():
+            if not fut.cancelled():
+                # Mark exception retrieved.
+                fut.exception()
+            return
+
+        if fut.cancelled():
+            res = futures.CancelledError()
+            if not return_exceptions:
+                outer.set_exception(res)
+                return
+        elif fut._exception is not None:
+            res = fut.exception()  # Mark exception retrieved.
+            if not return_exceptions:
+                outer.set_exception(res)
+                return
+        else:
+            res = fut._result
+        results[i] = res
+        nfinished += 1
+        if nfinished == nchildren:
+            outer.set_result(results)
+
+    for i, fut in enumerate(children):
+        fut.add_done_callback(functools.partial(_done_callback, i))
+    return outer
+
+
+def shield(arg, *, loop=None):
+    """Wait for a future, shielding it from cancellation.
+
+    The statement
+
+        res = yield from shield(something())
+
+    is exactly equivalent to the statement
+
+        res = yield from something()
+
+    *except* that if the coroutine containing it is cancelled, the
+    task running in something() is not cancelled.  From the POV of
+    something(), the cancellation did not happen.  But its caller is
+    still cancelled, so the yield-from expression still raises
+    CancelledError.  Note: If something() is cancelled by other means
+    this will still cancel shield().
+
+    If you want to completely ignore cancellation (not recommended)
+    you can combine shield() with a try/except clause, as follows:
+
+        try:
+            res = yield from shield(something())
+        except CancelledError:
+            res = None
+    """
+    inner = async(arg, loop=loop)
+    if inner.done():
+        # Shortcut.
+        return inner
+    loop = inner._loop
+    outer = futures.Future(loop=loop)
+
+    def _done_callback(inner):
+        if outer.cancelled():
+            if not inner.cancelled():
+                # Mark inner's result as retrieved.
+                inner.exception()
+            return
+
+        if inner.cancelled():
+            outer.cancel()
+        else:
+            exc = inner.exception()
+            if exc is not None:
+                outer.set_exception(exc)
+            else:
+                outer.set_result(inner.result())
+
+    inner.add_done_callback(_done_callback)
+    return outer

+ 305 - 0
env/Lib/site-packages/asyncio/test_support.py

@@ -0,0 +1,305 @@
+# Subset of test.support from CPython 3.5, just what we need to run asyncio
+# test suite. The code is copied from CPython 3.5 to not depend on the test
+# module because it is rarely installed.
+
+# Ignore symbol TEST_HOME_DIR: test_events works without it
+
+import functools
+import gc
+import os
+import platform
+import re
+import socket
+import subprocess
+import sys
+import time
+
+
+# A constant likely larger than the underlying OS pipe buffer size, to
+# make writes blocking.
+# Windows limit seems to be around 512 B, and many Unix kernels have a
+# 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure.
+# (see issue #17835 for a discussion of this number).
+PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1
+
+def strip_python_stderr(stderr):
+    """Strip the stderr of a Python process from potential debug output
+    emitted by the interpreter.
+
+    This will typically be run on the result of the communicate() method
+    of a subprocess.Popen object.
+    """
+    stderr = re.sub(br"\[\d+ refs, \d+ blocks\]\r?\n?", b"", stderr).strip()
+    return stderr
+
+
+# Executing the interpreter in a subprocess
+def _assert_python(expected_success, *args, **env_vars):
+    if '__isolated' in env_vars:
+        isolated = env_vars.pop('__isolated')
+    else:
+        isolated = not env_vars
+    cmd_line = [sys.executable, '-X', 'faulthandler']
+    if isolated and sys.version_info >= (3, 4):
+        # isolated mode: ignore Python environment variables, ignore user
+        # site-packages, and don't add the current directory to sys.path
+        cmd_line.append('-I')
+    elif not env_vars:
+        # ignore Python environment variables
+        cmd_line.append('-E')
+    # Need to preserve the original environment, for in-place testing of
+    # shared library builds.
+    env = os.environ.copy()
+    # But a special flag that can be set to override -- in this case, the
+    # caller is responsible to pass the full environment.
+    if env_vars.pop('__cleanenv', None):
+        env = {}
+    env.update(env_vars)
+    cmd_line.extend(args)
+    p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
+                         stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+                         env=env)
+    try:
+        out, err = p.communicate()
+    finally:
+        subprocess._cleanup()
+        p.stdout.close()
+        p.stderr.close()
+    rc = p.returncode
+    err = strip_python_stderr(err)
+    if (rc and expected_success) or (not rc and not expected_success):
+        raise AssertionError(
+            "Process return code is %d, "
+            "stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore')))
+    return rc, out, err
+
+
+def assert_python_ok(*args, **env_vars):
+    """
+    Assert that running the interpreter with `args` and optional environment
+    variables `env_vars` succeeds (rc == 0) and return a (return code, stdout,
+    stderr) tuple.
+
+    If the __cleanenv keyword is set, env_vars is used a fresh environment.
+
+    Python is started in isolated mode (command line option -I),
+    except if the __isolated keyword is set to False.
+    """
+    return _assert_python(True, *args, **env_vars)
+
+
+is_jython = sys.platform.startswith('java')
+
+def gc_collect():
+    """Force as many objects as possible to be collected.
+
+    In non-CPython implementations of Python, this is needed because timely
+    deallocation is not guaranteed by the garbage collector.  (Even in CPython
+    this can be the case in case of reference cycles.)  This means that __del__
+    methods may be called later than expected and weakrefs may remain alive for
+    longer than expected.  This function tries its best to force all garbage
+    objects to disappear.
+    """
+    gc.collect()
+    if is_jython:
+        time.sleep(0.1)
+    gc.collect()
+    gc.collect()
+
+
+HOST = "127.0.0.1"
+HOSTv6 = "::1"
+
+
+def _is_ipv6_enabled():
+    """Check whether IPv6 is enabled on this host."""
+    if socket.has_ipv6:
+        sock = None
+        try:
+            sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+            sock.bind((HOSTv6, 0))
+            return True
+        except OSError:
+            pass
+        finally:
+            if sock:
+                sock.close()
+    return False
+
+IPV6_ENABLED = _is_ipv6_enabled()
+
+
+def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
+    """Returns an unused port that should be suitable for binding.  This is
+    achieved by creating a temporary socket with the same family and type as
+    the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
+    the specified host address (defaults to 0.0.0.0) with the port set to 0,
+    eliciting an unused ephemeral port from the OS.  The temporary socket is
+    then closed and deleted, and the ephemeral port is returned.
+
+    Either this method or bind_port() should be used for any tests where a
+    server socket needs to be bound to a particular port for the duration of
+    the test.  Which one to use depends on whether the calling code is creating
+    a python socket, or if an unused port needs to be provided in a constructor
+    or passed to an external program (i.e. the -accept argument to openssl's
+    s_server mode).  Always prefer bind_port() over find_unused_port() where
+    possible.  Hard coded ports should *NEVER* be used.  As soon as a server
+    socket is bound to a hard coded port, the ability to run multiple instances
+    of the test simultaneously on the same host is compromised, which makes the
+    test a ticking time bomb in a buildbot environment. On Unix buildbots, this
+    may simply manifest as a failed test, which can be recovered from without
+    intervention in most cases, but on Windows, the entire python process can
+    completely and utterly wedge, requiring someone to log in to the buildbot
+    and manually kill the affected process.
+
+    (This is easy to reproduce on Windows, unfortunately, and can be traced to
+    the SO_REUSEADDR socket option having different semantics on Windows versus
+    Unix/Linux.  On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
+    listen and then accept connections on identical host/ports.  An EADDRINUSE
+    OSError will be raised at some point (depending on the platform and
+    the order bind and listen were called on each socket).
+
+    However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
+    will ever be raised when attempting to bind two identical host/ports. When
+    accept() is called on each socket, the second caller's process will steal
+    the port from the first caller, leaving them both in an awkwardly wedged
+    state where they'll no longer respond to any signals or graceful kills, and
+    must be forcibly killed via OpenProcess()/TerminateProcess().
+
+    The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
+    instead of SO_REUSEADDR, which effectively affords the same semantics as
+    SO_REUSEADDR on Unix.  Given the propensity of Unix developers in the Open
+    Source world compared to Windows ones, this is a common mistake.  A quick
+    look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
+    openssl.exe is called with the 's_server' option, for example. See
+    http://bugs.python.org/issue2550 for more info.  The following site also
+    has a very thorough description about the implications of both REUSEADDR
+    and EXCLUSIVEADDRUSE on Windows:
+    http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
+
+    XXX: although this approach is a vast improvement on previous attempts to
+    elicit unused ports, it rests heavily on the assumption that the ephemeral
+    port returned to us by the OS won't immediately be dished back out to some
+    other process when we close and delete our temporary socket but before our
+    calling code has a chance to bind the returned port.  We can deal with this
+    issue if/when we come across it.
+    """
+
+    tempsock = socket.socket(family, socktype)
+    port = bind_port(tempsock)
+    tempsock.close()
+    del tempsock
+    return port
+
+def bind_port(sock, host=HOST):
+    """Bind the socket to a free port and return the port number.  Relies on
+    ephemeral ports in order to ensure we are using an unbound port.  This is
+    important as many tests may be running simultaneously, especially in a
+    buildbot environment.  This method raises an exception if the sock.family
+    is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
+    or SO_REUSEPORT set on it.  Tests should *never* set these socket options
+    for TCP/IP sockets.  The only case for setting these options is testing
+    multicasting via multiple UDP sockets.
+
+    Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
+    on Windows), it will be set on the socket.  This will prevent anyone else
+    from bind()'ing to our host/port for the duration of the test.
+    """
+
+    if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
+        if hasattr(socket, 'SO_REUSEADDR'):
+            if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
+                raise TestFailed("tests should never set the SO_REUSEADDR "
+                                 "socket option on TCP/IP sockets!")
+        if hasattr(socket, 'SO_REUSEPORT'):
+            try:
+                reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
+                if reuse == 1:
+                    raise TestFailed("tests should never set the SO_REUSEPORT "
+                                     "socket option on TCP/IP sockets!")
+            except OSError:
+                # Python's socket module was compiled using modern headers
+                # thus defining SO_REUSEPORT but this process is running
+                # under an older kernel that does not support SO_REUSEPORT.
+                pass
+        if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
+            sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
+
+    sock.bind((host, 0))
+    port = sock.getsockname()[1]
+    return port
+
+def requires_mac_ver(*min_version):
+    """Decorator raising SkipTest if the OS is Mac OS X and the OS X
+    version if less than min_version.
+
+    For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version
+    is lesser than 10.5.
+    """
+    def decorator(func):
+        @functools.wraps(func)
+        def wrapper(*args, **kw):
+            if sys.platform == 'darwin':
+                version_txt = platform.mac_ver()[0]
+                try:
+                    version = tuple(map(int, version_txt.split('.')))
+                except ValueError:
+                    pass
+                else:
+                    if version < min_version:
+                        min_version_txt = '.'.join(map(str, min_version))
+                        raise unittest.SkipTest(
+                            "Mac OS X %s or higher required, not %s"
+                            % (min_version_txt, version_txt))
+            return func(*args, **kw)
+        wrapper.min_version = min_version
+        return wrapper
+    return decorator
+
+def _requires_unix_version(sysname, min_version):
+    """Decorator raising SkipTest if the OS is `sysname` and the version is
+    less than `min_version`.
+
+    For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if
+    the FreeBSD version is less than 7.2.
+    """
+    def decorator(func):
+        @functools.wraps(func)
+        def wrapper(*args, **kw):
+            if platform.system() == sysname:
+                version_txt = platform.release().split('-', 1)[0]
+                try:
+                    version = tuple(map(int, version_txt.split('.')))
+                except ValueError:
+                    pass
+                else:
+                    if version < min_version:
+                        min_version_txt = '.'.join(map(str, min_version))
+                        raise unittest.SkipTest(
+                            "%s version %s or higher required, not %s"
+                            % (sysname, min_version_txt, version_txt))
+            return func(*args, **kw)
+        wrapper.min_version = min_version
+        return wrapper
+    return decorator
+
+def requires_freebsd_version(*min_version):
+    """Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD version
+    is less than `min_version`.
+
+    For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD
+    version is less than 7.2.
+    """
+    return _requires_unix_version('FreeBSD', min_version)
+
+# Use test.support if available
+try:
+    from test.support import *
+except ImportError:
+    pass
+
+# Use test.script_helper if available
+try:
+    from test.script_helper import assert_python_ok
+except ImportError:
+    pass

+ 446 - 0
env/Lib/site-packages/asyncio/test_utils.py

@@ -0,0 +1,446 @@
+"""Utilities shared by tests."""
+
+import collections
+import contextlib
+import io
+import logging
+import os
+import re
+import socket
+import socketserver
+import sys
+import tempfile
+import threading
+import time
+import unittest
+from unittest import mock
+
+from http.server import HTTPServer
+from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
+
+try:
+    import ssl
+except ImportError:  # pragma: no cover
+    ssl = None
+
+from . import base_events
+from . import events
+from . import futures
+from . import selectors
+from . import tasks
+from .coroutines import coroutine
+from .log import logger
+
+
+if sys.platform == 'win32':  # pragma: no cover
+    from .windows_utils import socketpair
+else:
+    from socket import socketpair  # pragma: no cover
+
+
+def dummy_ssl_context():
+    if ssl is None:
+        return None
+    else:
+        return ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+
+
+def run_briefly(loop):
+    @coroutine
+    def once():
+        pass
+    gen = once()
+    t = loop.create_task(gen)
+    # Don't log a warning if the task is not done after run_until_complete().
+    # It occurs if the loop is stopped or if a task raises a BaseException.
+    t._log_destroy_pending = False
+    try:
+        loop.run_until_complete(t)
+    finally:
+        gen.close()
+
+
+def run_until(loop, pred, timeout=30):
+    deadline = time.time() + timeout
+    while not pred():
+        if timeout is not None:
+            timeout = deadline - time.time()
+            if timeout <= 0:
+                raise futures.TimeoutError()
+        loop.run_until_complete(tasks.sleep(0.001, loop=loop))
+
+
+def run_once(loop):
+    """loop.stop() schedules _raise_stop_error()
+    and run_forever() runs until _raise_stop_error() callback.
+    this wont work if test waits for some IO events, because
+    _raise_stop_error() runs before any of io events callbacks.
+    """
+    loop.stop()
+    loop.run_forever()
+
+
+class SilentWSGIRequestHandler(WSGIRequestHandler):
+
+    def get_stderr(self):
+        return io.StringIO()
+
+    def log_message(self, format, *args):
+        pass
+
+
+class SilentWSGIServer(WSGIServer):
+
+    request_timeout = 2
+
+    def get_request(self):
+        request, client_addr = super().get_request()
+        request.settimeout(self.request_timeout)
+        return request, client_addr
+
+    def handle_error(self, request, client_address):
+        pass
+
+
+class SSLWSGIServerMixin:
+
+    def finish_request(self, request, client_address):
+        # The relative location of our test directory (which
+        # contains the ssl key and certificate files) differs
+        # between the stdlib and stand-alone asyncio.
+        # Prefer our own if we can find it.
+        here = os.path.join(os.path.dirname(__file__), '..', 'tests')
+        if not os.path.isdir(here):
+            here = os.path.join(os.path.dirname(os.__file__),
+                                'test', 'test_asyncio')
+        keyfile = os.path.join(here, 'ssl_key.pem')
+        certfile = os.path.join(here, 'ssl_cert.pem')
+        ssock = ssl.wrap_socket(request,
+                                keyfile=keyfile,
+                                certfile=certfile,
+                                server_side=True)
+        try:
+            self.RequestHandlerClass(ssock, client_address, self)
+            ssock.close()
+        except OSError:
+            # maybe socket has been closed by peer
+            pass
+
+
+class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
+    pass
+
+
+def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
+
+    def app(environ, start_response):
+        status = '200 OK'
+        headers = [('Content-type', 'text/plain')]
+        start_response(status, headers)
+        return [b'Test message']
+
+    # Run the test WSGI server in a separate thread in order not to
+    # interfere with event handling in the main thread
+    server_class = server_ssl_cls if use_ssl else server_cls
+    httpd = server_class(address, SilentWSGIRequestHandler)
+    httpd.set_app(app)
+    httpd.address = httpd.server_address
+    server_thread = threading.Thread(
+        target=lambda: httpd.serve_forever(poll_interval=0.05))
+    server_thread.start()
+    try:
+        yield httpd
+    finally:
+        httpd.shutdown()
+        httpd.server_close()
+        server_thread.join()
+
+
+if hasattr(socket, 'AF_UNIX'):
+
+    class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer):
+
+        def server_bind(self):
+            socketserver.UnixStreamServer.server_bind(self)
+            self.server_name = '127.0.0.1'
+            self.server_port = 80
+
+
+    class UnixWSGIServer(UnixHTTPServer, WSGIServer):
+
+        request_timeout = 2
+
+        def server_bind(self):
+            UnixHTTPServer.server_bind(self)
+            self.setup_environ()
+
+        def get_request(self):
+            request, client_addr = super().get_request()
+            request.settimeout(self.request_timeout)
+            # Code in the stdlib expects that get_request
+            # will return a socket and a tuple (host, port).
+            # However, this isn't true for UNIX sockets,
+            # as the second return value will be a path;
+            # hence we return some fake data sufficient
+            # to get the tests going
+            return request, ('127.0.0.1', '')
+
+
+    class SilentUnixWSGIServer(UnixWSGIServer):
+
+        def handle_error(self, request, client_address):
+            pass
+
+
+    class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer):
+        pass
+
+
+    def gen_unix_socket_path():
+        with tempfile.NamedTemporaryFile() as file:
+            return file.name
+
+
+    @contextlib.contextmanager
+    def unix_socket_path():
+        path = gen_unix_socket_path()
+        try:
+            yield path
+        finally:
+            try:
+                os.unlink(path)
+            except OSError:
+                pass
+
+
+    @contextlib.contextmanager
+    def run_test_unix_server(*, use_ssl=False):
+        with unix_socket_path() as path:
+            yield from _run_test_server(address=path, use_ssl=use_ssl,
+                                        server_cls=SilentUnixWSGIServer,
+                                        server_ssl_cls=UnixSSLWSGIServer)
+
+
+@contextlib.contextmanager
+def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False):
+    yield from _run_test_server(address=(host, port), use_ssl=use_ssl,
+                                server_cls=SilentWSGIServer,
+                                server_ssl_cls=SSLWSGIServer)
+
+
+def make_test_protocol(base):
+    dct = {}
+    for name in dir(base):
+        if name.startswith('__') and name.endswith('__'):
+            # skip magic names
+            continue
+        dct[name] = MockCallback(return_value=None)
+    return type('TestProtocol', (base,) + base.__bases__, dct)()
+
+
+class TestSelector(selectors.BaseSelector):
+
+    def __init__(self):
+        self.keys = {}
+
+    def register(self, fileobj, events, data=None):
+        key = selectors.SelectorKey(fileobj, 0, events, data)
+        self.keys[fileobj] = key
+        return key
+
+    def unregister(self, fileobj):
+        return self.keys.pop(fileobj)
+
+    def select(self, timeout):
+        return []
+
+    def get_map(self):
+        return self.keys
+
+
+class TestLoop(base_events.BaseEventLoop):
+    """Loop for unittests.
+
+    It manages self time directly.
+    If something scheduled to be executed later then
+    on next loop iteration after all ready handlers done
+    generator passed to __init__ is calling.
+
+    Generator should be like this:
+
+        def gen():
+            ...
+            when = yield ...
+            ... = yield time_advance
+
+    Value returned by yield is absolute time of next scheduled handler.
+    Value passed to yield is time advance to move loop's time forward.
+    """
+
+    def __init__(self, gen=None):
+        super().__init__()
+
+        if gen is None:
+            def gen():
+                yield
+            self._check_on_close = False
+        else:
+            self._check_on_close = True
+
+        self._gen = gen()
+        next(self._gen)
+        self._time = 0
+        self._clock_resolution = 1e-9
+        self._timers = []
+        self._selector = TestSelector()
+
+        self.readers = {}
+        self.writers = {}
+        self.reset_counters()
+
+    def time(self):
+        return self._time
+
+    def advance_time(self, advance):
+        """Move test time forward."""
+        if advance:
+            self._time += advance
+
+    def close(self):
+        super().close()
+        if self._check_on_close:
+            try:
+                self._gen.send(0)
+            except StopIteration:
+                pass
+            else:  # pragma: no cover
+                raise AssertionError("Time generator is not finished")
+
+    def add_reader(self, fd, callback, *args):
+        self.readers[fd] = events.Handle(callback, args, self)
+
+    def remove_reader(self, fd):
+        self.remove_reader_count[fd] += 1
+        if fd in self.readers:
+            del self.readers[fd]
+            return True
+        else:
+            return False
+
+    def assert_reader(self, fd, callback, *args):
+        assert fd in self.readers, 'fd {} is not registered'.format(fd)
+        handle = self.readers[fd]
+        assert handle._callback == callback, '{!r} != {!r}'.format(
+            handle._callback, callback)
+        assert handle._args == args, '{!r} != {!r}'.format(
+            handle._args, args)
+
+    def add_writer(self, fd, callback, *args):
+        self.writers[fd] = events.Handle(callback, args, self)
+
+    def remove_writer(self, fd):
+        self.remove_writer_count[fd] += 1
+        if fd in self.writers:
+            del self.writers[fd]
+            return True
+        else:
+            return False
+
+    def assert_writer(self, fd, callback, *args):
+        assert fd in self.writers, 'fd {} is not registered'.format(fd)
+        handle = self.writers[fd]
+        assert handle._callback == callback, '{!r} != {!r}'.format(
+            handle._callback, callback)
+        assert handle._args == args, '{!r} != {!r}'.format(
+            handle._args, args)
+
+    def reset_counters(self):
+        self.remove_reader_count = collections.defaultdict(int)
+        self.remove_writer_count = collections.defaultdict(int)
+
+    def _run_once(self):
+        super()._run_once()
+        for when in self._timers:
+            advance = self._gen.send(when)
+            self.advance_time(advance)
+        self._timers = []
+
+    def call_at(self, when, callback, *args):
+        self._timers.append(when)
+        return super().call_at(when, callback, *args)
+
+    def _process_events(self, event_list):
+        return
+
+    def _write_to_self(self):
+        pass
+
+
+def MockCallback(**kwargs):
+    return mock.Mock(spec=['__call__'], **kwargs)
+
+
+class MockPattern(str):
+    """A regex based str with a fuzzy __eq__.
+
+    Use this helper with 'mock.assert_called_with', or anywhere
+    where a regex comparison between strings is needed.
+
+    For instance:
+       mock_call.assert_called_with(MockPattern('spam.*ham'))
+    """
+    def __eq__(self, other):
+        return bool(re.search(str(self), other, re.S))
+
+
+def get_function_source(func):
+    source = events._get_function_source(func)
+    if source is None:
+        raise ValueError("unable to get the source of %r" % (func,))
+    return source
+
+
+class TestCase(unittest.TestCase):
+    def set_event_loop(self, loop, *, cleanup=True):
+        assert loop is not None
+        # ensure that the event loop is passed explicitly in asyncio
+        events.set_event_loop(None)
+        if cleanup:
+            self.addCleanup(loop.close)
+
+    def new_test_loop(self, gen=None):
+        loop = TestLoop(gen)
+        self.set_event_loop(loop)
+        return loop
+
+    def tearDown(self):
+        events.set_event_loop(None)
+
+        # Detect CPython bug #23353: ensure that yield/yield-from is not used
+        # in an except block of a generator
+        self.assertEqual(sys.exc_info(), (None, None, None))
+
+
+@contextlib.contextmanager
+def disable_logger():
+    """Context manager to disable asyncio logger.
+
+    For example, it can be used to ignore warnings in debug mode.
+    """
+    old_level = logger.level
+    try:
+        logger.setLevel(logging.CRITICAL+1)
+        yield
+    finally:
+        logger.setLevel(old_level)
+
+def mock_nonblocking_socket():
+    """Create a mock of a non-blocking socket."""
+    sock = mock.Mock(socket.socket)
+    sock.gettimeout.return_value = 0.0
+    return sock
+
+
+def force_legacy_ssl_support():
+    return mock.patch('asyncio.sslproto._is_sslproto_available',
+                      return_value=False)

+ 300 - 0
env/Lib/site-packages/asyncio/transports.py

@@ -0,0 +1,300 @@
+"""Abstract Transport class."""
+
+import sys
+
+_PY34 = sys.version_info >= (3, 4)
+
+__all__ = ['BaseTransport', 'ReadTransport', 'WriteTransport',
+           'Transport', 'DatagramTransport', 'SubprocessTransport',
+           ]
+
+
+class BaseTransport:
+    """Base class for transports."""
+
+    def __init__(self, extra=None):
+        if extra is None:
+            extra = {}
+        self._extra = extra
+
+    def get_extra_info(self, name, default=None):
+        """Get optional transport information."""
+        return self._extra.get(name, default)
+
+    def close(self):
+        """Close the transport.
+
+        Buffered data will be flushed asynchronously.  No more data
+        will be received.  After all buffered data is flushed, the
+        protocol's connection_lost() method will (eventually) called
+        with None as its argument.
+        """
+        raise NotImplementedError
+
+
+class ReadTransport(BaseTransport):
+    """Interface for read-only transports."""
+
+    def pause_reading(self):
+        """Pause the receiving end.
+
+        No data will be passed to the protocol's data_received()
+        method until resume_reading() is called.
+        """
+        raise NotImplementedError
+
+    def resume_reading(self):
+        """Resume the receiving end.
+
+        Data received will once again be passed to the protocol's
+        data_received() method.
+        """
+        raise NotImplementedError
+
+
+class WriteTransport(BaseTransport):
+    """Interface for write-only transports."""
+
+    def set_write_buffer_limits(self, high=None, low=None):
+        """Set the high- and low-water limits for write flow control.
+
+        These two values control when to call the protocol's
+        pause_writing() and resume_writing() methods.  If specified,
+        the low-water limit must be less than or equal to the
+        high-water limit.  Neither value can be negative.
+
+        The defaults are implementation-specific.  If only the
+        high-water limit is given, the low-water limit defaults to a
+        implementation-specific value less than or equal to the
+        high-water limit.  Setting high to zero forces low to zero as
+        well, and causes pause_writing() to be called whenever the
+        buffer becomes non-empty.  Setting low to zero causes
+        resume_writing() to be called only once the buffer is empty.
+        Use of zero for either limit is generally sub-optimal as it
+        reduces opportunities for doing I/O and computation
+        concurrently.
+        """
+        raise NotImplementedError
+
+    def get_write_buffer_size(self):
+        """Return the current size of the write buffer."""
+        raise NotImplementedError
+
+    def write(self, data):
+        """Write some data bytes to the transport.
+
+        This does not block; it buffers the data and arranges for it
+        to be sent out asynchronously.
+        """
+        raise NotImplementedError
+
+    def writelines(self, list_of_data):
+        """Write a list (or any iterable) of data bytes to the transport.
+
+        The default implementation concatenates the arguments and
+        calls write() on the result.
+        """
+        if not _PY34:
+            # In Python 3.3, bytes.join() doesn't handle memoryview.
+            list_of_data = (
+                bytes(data) if isinstance(data, memoryview) else data
+                for data in list_of_data)
+        self.write(b''.join(list_of_data))
+
+    def write_eof(self):
+        """Close the write end after flushing buffered data.
+
+        (This is like typing ^D into a UNIX program reading from stdin.)
+
+        Data may still be received.
+        """
+        raise NotImplementedError
+
+    def can_write_eof(self):
+        """Return True if this transport supports write_eof(), False if not."""
+        raise NotImplementedError
+
+    def abort(self):
+        """Close the transport immediately.
+
+        Buffered data will be lost.  No more data will be received.
+        The protocol's connection_lost() method will (eventually) be
+        called with None as its argument.
+        """
+        raise NotImplementedError
+
+
+class Transport(ReadTransport, WriteTransport):
+    """Interface representing a bidirectional transport.
+
+    There may be several implementations, but typically, the user does
+    not implement new transports; rather, the platform provides some
+    useful transports that are implemented using the platform's best
+    practices.
+
+    The user never instantiates a transport directly; they call a
+    utility function, passing it a protocol factory and other
+    information necessary to create the transport and protocol.  (E.g.
+    EventLoop.create_connection() or EventLoop.create_server().)
+
+    The utility function will asynchronously create a transport and a
+    protocol and hook them up by calling the protocol's
+    connection_made() method, passing it the transport.
+
+    The implementation here raises NotImplemented for every method
+    except writelines(), which calls write() in a loop.
+    """
+
+
+class DatagramTransport(BaseTransport):
+    """Interface for datagram (UDP) transports."""
+
+    def sendto(self, data, addr=None):
+        """Send data to the transport.
+
+        This does not block; it buffers the data and arranges for it
+        to be sent out asynchronously.
+        addr is target socket address.
+        If addr is None use target address pointed on transport creation.
+        """
+        raise NotImplementedError
+
+    def abort(self):
+        """Close the transport immediately.
+
+        Buffered data will be lost.  No more data will be received.
+        The protocol's connection_lost() method will (eventually) be
+        called with None as its argument.
+        """
+        raise NotImplementedError
+
+
+class SubprocessTransport(BaseTransport):
+
+    def get_pid(self):
+        """Get subprocess id."""
+        raise NotImplementedError
+
+    def get_returncode(self):
+        """Get subprocess returncode.
+
+        See also
+        http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
+        """
+        raise NotImplementedError
+
+    def get_pipe_transport(self, fd):
+        """Get transport for pipe with number fd."""
+        raise NotImplementedError
+
+    def send_signal(self, signal):
+        """Send signal to subprocess.
+
+        See also:
+        docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
+        """
+        raise NotImplementedError
+
+    def terminate(self):
+        """Stop the subprocess.
+
+        Alias for close() method.
+
+        On Posix OSs the method sends SIGTERM to the subprocess.
+        On Windows the Win32 API function TerminateProcess()
+         is called to stop the subprocess.
+
+        See also:
+        http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
+        """
+        raise NotImplementedError
+
+    def kill(self):
+        """Kill the subprocess.
+
+        On Posix OSs the function sends SIGKILL to the subprocess.
+        On Windows kill() is an alias for terminate().
+
+        See also:
+        http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
+        """
+        raise NotImplementedError
+
+
+class _FlowControlMixin(Transport):
+    """All the logic for (write) flow control in a mix-in base class.
+
+    The subclass must implement get_write_buffer_size().  It must call
+    _maybe_pause_protocol() whenever the write buffer size increases,
+    and _maybe_resume_protocol() whenever it decreases.  It may also
+    override set_write_buffer_limits() (e.g. to specify different
+    defaults).
+
+    The subclass constructor must call super().__init__(extra).  This
+    will call set_write_buffer_limits().
+
+    The user may call set_write_buffer_limits() and
+    get_write_buffer_size(), and their protocol's pause_writing() and
+    resume_writing() may be called.
+    """
+
+    def __init__(self, extra=None, loop=None):
+        super().__init__(extra)
+        assert loop is not None
+        self._loop = loop
+        self._protocol_paused = False
+        self._set_write_buffer_limits()
+
+    def _maybe_pause_protocol(self):
+        size = self.get_write_buffer_size()
+        if size <= self._high_water:
+            return
+        if not self._protocol_paused:
+            self._protocol_paused = True
+            try:
+                self._protocol.pause_writing()
+            except Exception as exc:
+                self._loop.call_exception_handler({
+                    'message': 'protocol.pause_writing() failed',
+                    'exception': exc,
+                    'transport': self,
+                    'protocol': self._protocol,
+                })
+
+    def _maybe_resume_protocol(self):
+        if (self._protocol_paused and
+            self.get_write_buffer_size() <= self._low_water):
+            self._protocol_paused = False
+            try:
+                self._protocol.resume_writing()
+            except Exception as exc:
+                self._loop.call_exception_handler({
+                    'message': 'protocol.resume_writing() failed',
+                    'exception': exc,
+                    'transport': self,
+                    'protocol': self._protocol,
+                })
+
+    def get_write_buffer_limits(self):
+        return (self._low_water, self._high_water)
+
+    def _set_write_buffer_limits(self, high=None, low=None):
+        if high is None:
+            if low is None:
+                high = 64*1024
+            else:
+                high = 4*low
+        if low is None:
+            low = high // 4
+        if not high >= low >= 0:
+            raise ValueError('high (%r) must be >= low (%r) must be >= 0' %
+                             (high, low))
+        self._high_water = high
+        self._low_water = low
+
+    def set_write_buffer_limits(self, high=None, low=None):
+        self._set_write_buffer_limits(high=high, low=low)
+        self._maybe_pause_protocol()
+
+    def get_write_buffer_size(self):
+        raise NotImplementedError

+ 998 - 0
env/Lib/site-packages/asyncio/unix_events.py

@@ -0,0 +1,998 @@
+"""Selector event loop for Unix with signal handling."""
+
+import errno
+import os
+import signal
+import socket
+import stat
+import subprocess
+import sys
+import threading
+import warnings
+
+
+from . import base_events
+from . import base_subprocess
+from . import constants
+from . import coroutines
+from . import events
+from . import futures
+from . import selector_events
+from . import selectors
+from . import transports
+from .coroutines import coroutine
+from .log import logger
+
+
+__all__ = ['SelectorEventLoop',
+           'AbstractChildWatcher', 'SafeChildWatcher',
+           'FastChildWatcher', 'DefaultEventLoopPolicy',
+           ]
+
+if sys.platform == 'win32':  # pragma: no cover
+    raise ImportError('Signals are not really supported on Windows')
+
+
+def _sighandler_noop(signum, frame):
+    """Dummy signal handler."""
+    pass
+
+
+class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
+    """Unix event loop.
+
+    Adds signal handling and UNIX Domain Socket support to SelectorEventLoop.
+    """
+
+    def __init__(self, selector=None):
+        super().__init__(selector)
+        self._signal_handlers = {}
+
+    def _socketpair(self):
+        return socket.socketpair()
+
+    def close(self):
+        super().close()
+        for sig in list(self._signal_handlers):
+            self.remove_signal_handler(sig)
+
+    def _process_self_data(self, data):
+        for signum in data:
+            if not signum:
+                # ignore null bytes written by _write_to_self()
+                continue
+            self._handle_signal(signum)
+
+    def add_signal_handler(self, sig, callback, *args):
+        """Add a handler for a signal.  UNIX only.
+
+        Raise ValueError if the signal number is invalid or uncatchable.
+        Raise RuntimeError if there is a problem setting up the handler.
+        """
+        if (coroutines.iscoroutine(callback)
+        or coroutines.iscoroutinefunction(callback)):
+            raise TypeError("coroutines cannot be used "
+                            "with add_signal_handler()")
+        self._check_signal(sig)
+        self._check_closed()
+        try:
+            # set_wakeup_fd() raises ValueError if this is not the
+            # main thread.  By calling it early we ensure that an
+            # event loop running in another thread cannot add a signal
+            # handler.
+            signal.set_wakeup_fd(self._csock.fileno())
+        except (ValueError, OSError) as exc:
+            raise RuntimeError(str(exc))
+
+        handle = events.Handle(callback, args, self)
+        self._signal_handlers[sig] = handle
+
+        try:
+            # Register a dummy signal handler to ask Python to write the signal
+            # number in the wakup file descriptor. _process_self_data() will
+            # read signal numbers from this file descriptor to handle signals.
+            signal.signal(sig, _sighandler_noop)
+
+            # Set SA_RESTART to limit EINTR occurrences.
+            signal.siginterrupt(sig, False)
+        except OSError as exc:
+            del self._signal_handlers[sig]
+            if not self._signal_handlers:
+                try:
+                    signal.set_wakeup_fd(-1)
+                except (ValueError, OSError) as nexc:
+                    logger.info('set_wakeup_fd(-1) failed: %s', nexc)
+
+            if exc.errno == errno.EINVAL:
+                raise RuntimeError('sig {} cannot be caught'.format(sig))
+            else:
+                raise
+
+    def _handle_signal(self, sig):
+        """Internal helper that is the actual signal handler."""
+        handle = self._signal_handlers.get(sig)
+        if handle is None:
+            return  # Assume it's some race condition.
+        if handle._cancelled:
+            self.remove_signal_handler(sig)  # Remove it properly.
+        else:
+            self._add_callback_signalsafe(handle)
+
+    def remove_signal_handler(self, sig):
+        """Remove a handler for a signal.  UNIX only.
+
+        Return True if a signal handler was removed, False if not.
+        """
+        self._check_signal(sig)
+        try:
+            del self._signal_handlers[sig]
+        except KeyError:
+            return False
+
+        if sig == signal.SIGINT:
+            handler = signal.default_int_handler
+        else:
+            handler = signal.SIG_DFL
+
+        try:
+            signal.signal(sig, handler)
+        except OSError as exc:
+            if exc.errno == errno.EINVAL:
+                raise RuntimeError('sig {} cannot be caught'.format(sig))
+            else:
+                raise
+
+        if not self._signal_handlers:
+            try:
+                signal.set_wakeup_fd(-1)
+            except (ValueError, OSError) as exc:
+                logger.info('set_wakeup_fd(-1) failed: %s', exc)
+
+        return True
+
+    def _check_signal(self, sig):
+        """Internal helper to validate a signal.
+
+        Raise ValueError if the signal number is invalid or uncatchable.
+        Raise RuntimeError if there is a problem setting up the handler.
+        """
+        if not isinstance(sig, int):
+            raise TypeError('sig must be an int, not {!r}'.format(sig))
+
+        if not (1 <= sig < signal.NSIG):
+            raise ValueError(
+                'sig {} out of range(1, {})'.format(sig, signal.NSIG))
+
+    def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
+                                  extra=None):
+        return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra)
+
+    def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
+                                   extra=None):
+        return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra)
+
+    @coroutine
+    def _make_subprocess_transport(self, protocol, args, shell,
+                                   stdin, stdout, stderr, bufsize,
+                                   extra=None, **kwargs):
+        with events.get_child_watcher() as watcher:
+            waiter = futures.Future(loop=self)
+            transp = _UnixSubprocessTransport(self, protocol, args, shell,
+                                              stdin, stdout, stderr, bufsize,
+                                              waiter=waiter, extra=extra,
+                                              **kwargs)
+
+            watcher.add_child_handler(transp.get_pid(),
+                                      self._child_watcher_callback, transp)
+            try:
+                yield from waiter
+            except Exception as exc:
+                # Workaround CPython bug #23353: using yield/yield-from in an
+                # except block of a generator doesn't clear properly
+                # sys.exc_info()
+                err = exc
+            else:
+                err = None
+
+            if err is not None:
+                transp.close()
+                yield from transp._wait()
+                raise err
+
+        return transp
+
+    def _child_watcher_callback(self, pid, returncode, transp):
+        self.call_soon_threadsafe(transp._process_exited, returncode)
+
+    @coroutine
+    def create_unix_connection(self, protocol_factory, path, *,
+                               ssl=None, sock=None,
+                               server_hostname=None):
+        assert server_hostname is None or isinstance(server_hostname, str)
+        if ssl:
+            if server_hostname is None:
+                raise ValueError(
+                    'you have to pass server_hostname when using ssl')
+        else:
+            if server_hostname is not None:
+                raise ValueError('server_hostname is only meaningful with ssl')
+
+        if path is not None:
+            if sock is not None:
+                raise ValueError(
+                    'path and sock can not be specified at the same time')
+
+            sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
+            try:
+                sock.setblocking(False)
+                yield from self.sock_connect(sock, path)
+            except:
+                sock.close()
+                raise
+
+        else:
+            if sock is None:
+                raise ValueError('no path and sock were specified')
+            sock.setblocking(False)
+
+        transport, protocol = yield from self._create_connection_transport(
+            sock, protocol_factory, ssl, server_hostname)
+        return transport, protocol
+
+    @coroutine
+    def create_unix_server(self, protocol_factory, path=None, *,
+                           sock=None, backlog=100, ssl=None):
+        if isinstance(ssl, bool):
+            raise TypeError('ssl argument must be an SSLContext or None')
+
+        if path is not None:
+            if sock is not None:
+                raise ValueError(
+                    'path and sock can not be specified at the same time')
+
+            sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+
+            try:
+                sock.bind(path)
+            except OSError as exc:
+                sock.close()
+                if exc.errno == errno.EADDRINUSE:
+                    # Let's improve the error message by adding
+                    # with what exact address it occurs.
+                    msg = 'Address {!r} is already in use'.format(path)
+                    raise OSError(errno.EADDRINUSE, msg) from None
+                else:
+                    raise
+            except:
+                sock.close()
+                raise
+        else:
+            if sock is None:
+                raise ValueError(
+                    'path was not specified, and no sock specified')
+
+            if sock.family != socket.AF_UNIX:
+                raise ValueError(
+                    'A UNIX Domain Socket was expected, got {!r}'.format(sock))
+
+        server = base_events.Server(self, [sock])
+        sock.listen(backlog)
+        sock.setblocking(False)
+        self._start_serving(protocol_factory, sock, ssl, server)
+        return server
+
+
+if hasattr(os, 'set_blocking'):
+    def _set_nonblocking(fd):
+        os.set_blocking(fd, False)
+else:
+    import fcntl
+
+    def _set_nonblocking(fd):
+        flags = fcntl.fcntl(fd, fcntl.F_GETFL)
+        flags = flags | os.O_NONBLOCK
+        fcntl.fcntl(fd, fcntl.F_SETFL, flags)
+
+
+class _UnixReadPipeTransport(transports.ReadTransport):
+
+    max_size = 256 * 1024  # max bytes we read in one event loop iteration
+
+    def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
+        super().__init__(extra)
+        self._extra['pipe'] = pipe
+        self._loop = loop
+        self._pipe = pipe
+        self._fileno = pipe.fileno()
+        mode = os.fstat(self._fileno).st_mode
+        if not (stat.S_ISFIFO(mode) or
+                stat.S_ISSOCK(mode) or
+                stat.S_ISCHR(mode)):
+            raise ValueError("Pipe transport is for pipes/sockets only.")
+        _set_nonblocking(self._fileno)
+        self._protocol = protocol
+        self._closing = False
+        self._loop.call_soon(self._protocol.connection_made, self)
+        # only start reading when connection_made() has been called
+        self._loop.call_soon(self._loop.add_reader,
+                             self._fileno, self._read_ready)
+        if waiter is not None:
+            # only wake up the waiter when connection_made() has been called
+            self._loop.call_soon(waiter._set_result_unless_cancelled, None)
+
+    def __repr__(self):
+        info = [self.__class__.__name__]
+        if self._pipe is None:
+            info.append('closed')
+        elif self._closing:
+            info.append('closing')
+        info.append('fd=%s' % self._fileno)
+        if self._pipe is not None:
+            polling = selector_events._test_selector_event(
+                          self._loop._selector,
+                          self._fileno, selectors.EVENT_READ)
+            if polling:
+                info.append('polling')
+            else:
+                info.append('idle')
+        else:
+            info.append('closed')
+        return '<%s>' % ' '.join(info)
+
+    def _read_ready(self):
+        try:
+            data = os.read(self._fileno, self.max_size)
+        except (BlockingIOError, InterruptedError):
+            pass
+        except OSError as exc:
+            self._fatal_error(exc, 'Fatal read error on pipe transport')
+        else:
+            if data:
+                self._protocol.data_received(data)
+            else:
+                if self._loop.get_debug():
+                    logger.info("%r was closed by peer", self)
+                self._closing = True
+                self._loop.remove_reader(self._fileno)
+                self._loop.call_soon(self._protocol.eof_received)
+                self._loop.call_soon(self._call_connection_lost, None)
+
+    def pause_reading(self):
+        self._loop.remove_reader(self._fileno)
+
+    def resume_reading(self):
+        self._loop.add_reader(self._fileno, self._read_ready)
+
+    def close(self):
+        if not self._closing:
+            self._close(None)
+
+    # On Python 3.3 and older, objects with a destructor part of a reference
+    # cycle are never destroyed. It's not more the case on Python 3.4 thanks
+    # to the PEP 442.
+    if sys.version_info >= (3, 4):
+        def __del__(self):
+            if self._pipe is not None:
+                warnings.warn("unclosed transport %r" % self, ResourceWarning)
+                self._pipe.close()
+
+    def _fatal_error(self, exc, message='Fatal error on pipe transport'):
+        # should be called by exception handler only
+        if (isinstance(exc, OSError) and exc.errno == errno.EIO):
+            if self._loop.get_debug():
+                logger.debug("%r: %s", self, message, exc_info=True)
+        else:
+            self._loop.call_exception_handler({
+                'message': message,
+                'exception': exc,
+                'transport': self,
+                'protocol': self._protocol,
+            })
+        self._close(exc)
+
+    def _close(self, exc):
+        self._closing = True
+        self._loop.remove_reader(self._fileno)
+        self._loop.call_soon(self._call_connection_lost, exc)
+
+    def _call_connection_lost(self, exc):
+        try:
+            self._protocol.connection_lost(exc)
+        finally:
+            self._pipe.close()
+            self._pipe = None
+            self._protocol = None
+            self._loop = None
+
+
+class _UnixWritePipeTransport(transports._FlowControlMixin,
+                              transports.WriteTransport):
+
+    def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
+        super().__init__(extra, loop)
+        self._extra['pipe'] = pipe
+        self._pipe = pipe
+        self._fileno = pipe.fileno()
+        mode = os.fstat(self._fileno).st_mode
+        is_socket = stat.S_ISSOCK(mode)
+        if not (is_socket or
+                stat.S_ISFIFO(mode) or
+                stat.S_ISCHR(mode)):
+            raise ValueError("Pipe transport is only for "
+                             "pipes, sockets and character devices")
+        _set_nonblocking(self._fileno)
+        self._protocol = protocol
+        self._buffer = []
+        self._conn_lost = 0
+        self._closing = False  # Set when close() or write_eof() called.
+
+        self._loop.call_soon(self._protocol.connection_made, self)
+
+        # On AIX, the reader trick (to be notified when the read end of the
+        # socket is closed) only works for sockets. On other platforms it
+        # works for pipes and sockets. (Exception: OS X 10.4?  Issue #19294.)
+        if is_socket or not sys.platform.startswith("aix"):
+            # only start reading when connection_made() has been called
+            self._loop.call_soon(self._loop.add_reader,
+                                 self._fileno, self._read_ready)
+
+        if waiter is not None:
+            # only wake up the waiter when connection_made() has been called
+            self._loop.call_soon(waiter._set_result_unless_cancelled, None)
+
+    def __repr__(self):
+        info = [self.__class__.__name__]
+        if self._pipe is None:
+            info.append('closed')
+        elif self._closing:
+            info.append('closing')
+        info.append('fd=%s' % self._fileno)
+        if self._pipe is not None:
+            polling = selector_events._test_selector_event(
+                          self._loop._selector,
+                          self._fileno, selectors.EVENT_WRITE)
+            if polling:
+                info.append('polling')
+            else:
+                info.append('idle')
+
+            bufsize = self.get_write_buffer_size()
+            info.append('bufsize=%s' % bufsize)
+        else:
+            info.append('closed')
+        return '<%s>' % ' '.join(info)
+
+    def get_write_buffer_size(self):
+        return sum(len(data) for data in self._buffer)
+
+    def _read_ready(self):
+        # Pipe was closed by peer.
+        if self._loop.get_debug():
+            logger.info("%r was closed by peer", self)
+        if self._buffer:
+            self._close(BrokenPipeError())
+        else:
+            self._close()
+
+    def write(self, data):
+        assert isinstance(data, (bytes, bytearray, memoryview)), repr(data)
+        if isinstance(data, bytearray):
+            data = memoryview(data)
+        if not data:
+            return
+
+        if self._conn_lost or self._closing:
+            if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
+                logger.warning('pipe closed by peer or '
+                               'os.write(pipe, data) raised exception.')
+            self._conn_lost += 1
+            return
+
+        if not self._buffer:
+            # Attempt to send it right away first.
+            try:
+                n = os.write(self._fileno, data)
+            except (BlockingIOError, InterruptedError):
+                n = 0
+            except Exception as exc:
+                self._conn_lost += 1
+                self._fatal_error(exc, 'Fatal write error on pipe transport')
+                return
+            if n == len(data):
+                return
+            elif n > 0:
+                data = data[n:]
+            self._loop.add_writer(self._fileno, self._write_ready)
+
+        self._buffer.append(data)
+        self._maybe_pause_protocol()
+
+    def _write_ready(self):
+        data = b''.join(self._buffer)
+        assert data, 'Data should not be empty'
+
+        self._buffer.clear()
+        try:
+            n = os.write(self._fileno, data)
+        except (BlockingIOError, InterruptedError):
+            self._buffer.append(data)
+        except Exception as exc:
+            self._conn_lost += 1
+            # Remove writer here, _fatal_error() doesn't it
+            # because _buffer is empty.
+            self._loop.remove_writer(self._fileno)
+            self._fatal_error(exc, 'Fatal write error on pipe transport')
+        else:
+            if n == len(data):
+                self._loop.remove_writer(self._fileno)
+                self._maybe_resume_protocol()  # May append to buffer.
+                if not self._buffer and self._closing:
+                    self._loop.remove_reader(self._fileno)
+                    self._call_connection_lost(None)
+                return
+            elif n > 0:
+                data = data[n:]
+
+            self._buffer.append(data)  # Try again later.
+
+    def can_write_eof(self):
+        return True
+
+    def write_eof(self):
+        if self._closing:
+            return
+        assert self._pipe
+        self._closing = True
+        if not self._buffer:
+            self._loop.remove_reader(self._fileno)
+            self._loop.call_soon(self._call_connection_lost, None)
+
+    def close(self):
+        if self._pipe is not None and not self._closing:
+            # write_eof is all what we needed to close the write pipe
+            self.write_eof()
+
+    # On Python 3.3 and older, objects with a destructor part of a reference
+    # cycle are never destroyed. It's not more the case on Python 3.4 thanks
+    # to the PEP 442.
+    if sys.version_info >= (3, 4):
+        def __del__(self):
+            if self._pipe is not None:
+                warnings.warn("unclosed transport %r" % self, ResourceWarning)
+                self._pipe.close()
+
+    def abort(self):
+        self._close(None)
+
+    def _fatal_error(self, exc, message='Fatal error on pipe transport'):
+        # should be called by exception handler only
+        if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
+            if self._loop.get_debug():
+                logger.debug("%r: %s", self, message, exc_info=True)
+        else:
+            self._loop.call_exception_handler({
+                'message': message,
+                'exception': exc,
+                'transport': self,
+                'protocol': self._protocol,
+            })
+        self._close(exc)
+
+    def _close(self, exc=None):
+        self._closing = True
+        if self._buffer:
+            self._loop.remove_writer(self._fileno)
+        self._buffer.clear()
+        self._loop.remove_reader(self._fileno)
+        self._loop.call_soon(self._call_connection_lost, exc)
+
+    def _call_connection_lost(self, exc):
+        try:
+            self._protocol.connection_lost(exc)
+        finally:
+            self._pipe.close()
+            self._pipe = None
+            self._protocol = None
+            self._loop = None
+
+
+if hasattr(os, 'set_inheritable'):
+    # Python 3.4 and newer
+    _set_inheritable = os.set_inheritable
+else:
+    import fcntl
+
+    def _set_inheritable(fd, inheritable):
+        cloexec_flag = getattr(fcntl, 'FD_CLOEXEC', 1)
+
+        old = fcntl.fcntl(fd, fcntl.F_GETFD)
+        if not inheritable:
+            fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
+        else:
+            fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)
+
+
+class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport):
+
+    def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
+        stdin_w = None
+        if stdin == subprocess.PIPE:
+            # Use a socket pair for stdin, since not all platforms
+            # support selecting read events on the write end of a
+            # socket (which we use in order to detect closing of the
+            # other end).  Notably this is needed on AIX, and works
+            # just fine on other platforms.
+            stdin, stdin_w = self._loop._socketpair()
+
+            # Mark the write end of the stdin pipe as non-inheritable,
+            # needed by close_fds=False on Python 3.3 and older
+            # (Python 3.4 implements the PEP 446, socketpair returns
+            # non-inheritable sockets)
+            _set_inheritable(stdin_w.fileno(), False)
+        self._proc = subprocess.Popen(
+            args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
+            universal_newlines=False, bufsize=bufsize, **kwargs)
+        if stdin_w is not None:
+            stdin.close()
+            self._proc.stdin = open(stdin_w.detach(), 'wb', buffering=bufsize)
+
+
+class AbstractChildWatcher:
+    """Abstract base class for monitoring child processes.
+
+    Objects derived from this class monitor a collection of subprocesses and
+    report their termination or interruption by a signal.
+
+    New callbacks are registered with .add_child_handler(). Starting a new
+    process must be done within a 'with' block to allow the watcher to suspend
+    its activity until the new process if fully registered (this is needed to
+    prevent a race condition in some implementations).
+
+    Example:
+        with watcher:
+            proc = subprocess.Popen("sleep 1")
+            watcher.add_child_handler(proc.pid, callback)
+
+    Notes:
+        Implementations of this class must be thread-safe.
+
+        Since child watcher objects may catch the SIGCHLD signal and call
+        waitpid(-1), there should be only one active object per process.
+    """
+
+    def add_child_handler(self, pid, callback, *args):
+        """Register a new child handler.
+
+        Arrange for callback(pid, returncode, *args) to be called when
+        process 'pid' terminates. Specifying another callback for the same
+        process replaces the previous handler.
+
+        Note: callback() must be thread-safe.
+        """
+        raise NotImplementedError()
+
+    def remove_child_handler(self, pid):
+        """Removes the handler for process 'pid'.
+
+        The function returns True if the handler was successfully removed,
+        False if there was nothing to remove."""
+
+        raise NotImplementedError()
+
+    def attach_loop(self, loop):
+        """Attach the watcher to an event loop.
+
+        If the watcher was previously attached to an event loop, then it is
+        first detached before attaching to the new loop.
+
+        Note: loop may be None.
+        """
+        raise NotImplementedError()
+
+    def close(self):
+        """Close the watcher.
+
+        This must be called to make sure that any underlying resource is freed.
+        """
+        raise NotImplementedError()
+
+    def __enter__(self):
+        """Enter the watcher's context and allow starting new processes
+
+        This function must return self"""
+        raise NotImplementedError()
+
+    def __exit__(self, a, b, c):
+        """Exit the watcher's context"""
+        raise NotImplementedError()
+
+
+class BaseChildWatcher(AbstractChildWatcher):
+
+    def __init__(self):
+        self._loop = None
+
+    def close(self):
+        self.attach_loop(None)
+
+    def _do_waitpid(self, expected_pid):
+        raise NotImplementedError()
+
+    def _do_waitpid_all(self):
+        raise NotImplementedError()
+
+    def attach_loop(self, loop):
+        assert loop is None or isinstance(loop, events.AbstractEventLoop)
+
+        if self._loop is not None:
+            self._loop.remove_signal_handler(signal.SIGCHLD)
+
+        self._loop = loop
+        if loop is not None:
+            loop.add_signal_handler(signal.SIGCHLD, self._sig_chld)
+
+            # Prevent a race condition in case a child terminated
+            # during the switch.
+            self._do_waitpid_all()
+
+    def _sig_chld(self):
+        try:
+            self._do_waitpid_all()
+        except Exception as exc:
+            # self._loop should always be available here
+            # as '_sig_chld' is added as a signal handler
+            # in 'attach_loop'
+            self._loop.call_exception_handler({
+                'message': 'Unknown exception in SIGCHLD handler',
+                'exception': exc,
+            })
+
+    def _compute_returncode(self, status):
+        if os.WIFSIGNALED(status):
+            # The child process died because of a signal.
+            return -os.WTERMSIG(status)
+        elif os.WIFEXITED(status):
+            # The child process exited (e.g sys.exit()).
+            return os.WEXITSTATUS(status)
+        else:
+            # The child exited, but we don't understand its status.
+            # This shouldn't happen, but if it does, let's just
+            # return that status; perhaps that helps debug it.
+            return status
+
+
+class SafeChildWatcher(BaseChildWatcher):
+    """'Safe' child watcher implementation.
+
+    This implementation avoids disrupting other code spawning processes by
+    polling explicitly each process in the SIGCHLD handler instead of calling
+    os.waitpid(-1).
+
+    This is a safe solution but it has a significant overhead when handling a
+    big number of children (O(n) each time SIGCHLD is raised)
+    """
+
+    def __init__(self):
+        super().__init__()
+        self._callbacks = {}
+
+    def close(self):
+        self._callbacks.clear()
+        super().close()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, a, b, c):
+        pass
+
+    def add_child_handler(self, pid, callback, *args):
+        self._callbacks[pid] = (callback, args)
+
+        # Prevent a race condition in case the child is already terminated.
+        self._do_waitpid(pid)
+
+    def remove_child_handler(self, pid):
+        try:
+            del self._callbacks[pid]
+            return True
+        except KeyError:
+            return False
+
+    def _do_waitpid_all(self):
+
+        for pid in list(self._callbacks):
+            self._do_waitpid(pid)
+
+    def _do_waitpid(self, expected_pid):
+        assert expected_pid > 0
+
+        try:
+            pid, status = os.waitpid(expected_pid, os.WNOHANG)
+        except ChildProcessError:
+            # The child process is already reaped
+            # (may happen if waitpid() is called elsewhere).
+            pid = expected_pid
+            returncode = 255
+            logger.warning(
+                "Unknown child process pid %d, will report returncode 255",
+                pid)
+        else:
+            if pid == 0:
+                # The child process is still alive.
+                return
+
+            returncode = self._compute_returncode(status)
+            if self._loop.get_debug():
+                logger.debug('process %s exited with returncode %s',
+                             expected_pid, returncode)
+
+        try:
+            callback, args = self._callbacks.pop(pid)
+        except KeyError:  # pragma: no cover
+            # May happen if .remove_child_handler() is called
+            # after os.waitpid() returns.
+            if self._loop.get_debug():
+                logger.warning("Child watcher got an unexpected pid: %r",
+                               pid, exc_info=True)
+        else:
+            callback(pid, returncode, *args)
+
+
+class FastChildWatcher(BaseChildWatcher):
+    """'Fast' child watcher implementation.
+
+    This implementation reaps every terminated processes by calling
+    os.waitpid(-1) directly, possibly breaking other code spawning processes
+    and waiting for their termination.
+
+    There is no noticeable overhead when handling a big number of children
+    (O(1) each time a child terminates).
+    """
+    def __init__(self):
+        super().__init__()
+        self._callbacks = {}
+        self._lock = threading.Lock()
+        self._zombies = {}
+        self._forks = 0
+
+    def close(self):
+        self._callbacks.clear()
+        self._zombies.clear()
+        super().close()
+
+    def __enter__(self):
+        with self._lock:
+            self._forks += 1
+
+            return self
+
+    def __exit__(self, a, b, c):
+        with self._lock:
+            self._forks -= 1
+
+            if self._forks or not self._zombies:
+                return
+
+            collateral_victims = str(self._zombies)
+            self._zombies.clear()
+
+        logger.warning(
+            "Caught subprocesses termination from unknown pids: %s",
+            collateral_victims)
+
+    def add_child_handler(self, pid, callback, *args):
+        assert self._forks, "Must use the context manager"
+        with self._lock:
+            try:
+                returncode = self._zombies.pop(pid)
+            except KeyError:
+                # The child is running.
+                self._callbacks[pid] = callback, args
+                return
+
+        # The child is dead already. We can fire the callback.
+        callback(pid, returncode, *args)
+
+    def remove_child_handler(self, pid):
+        try:
+            del self._callbacks[pid]
+            return True
+        except KeyError:
+            return False
+
+    def _do_waitpid_all(self):
+        # Because of signal coalescing, we must keep calling waitpid() as
+        # long as we're able to reap a child.
+        while True:
+            try:
+                pid, status = os.waitpid(-1, os.WNOHANG)
+            except ChildProcessError:
+                # No more child processes exist.
+                return
+            else:
+                if pid == 0:
+                    # A child process is still alive.
+                    return
+
+                returncode = self._compute_returncode(status)
+
+            with self._lock:
+                try:
+                    callback, args = self._callbacks.pop(pid)
+                except KeyError:
+                    # unknown child
+                    if self._forks:
+                        # It may not be registered yet.
+                        self._zombies[pid] = returncode
+                        if self._loop.get_debug():
+                            logger.debug('unknown process %s exited '
+                                         'with returncode %s',
+                                         pid, returncode)
+                        continue
+                    callback = None
+                else:
+                    if self._loop.get_debug():
+                        logger.debug('process %s exited with returncode %s',
+                                     pid, returncode)
+
+            if callback is None:
+                logger.warning(
+                    "Caught subprocess termination from unknown pid: "
+                    "%d -> %d", pid, returncode)
+            else:
+                callback(pid, returncode, *args)
+
+
+class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
+    """UNIX event loop policy with a watcher for child processes."""
+    _loop_factory = _UnixSelectorEventLoop
+
+    def __init__(self):
+        super().__init__()
+        self._watcher = None
+
+    def _init_watcher(self):
+        with events._lock:
+            if self._watcher is None:  # pragma: no branch
+                self._watcher = SafeChildWatcher()
+                if isinstance(threading.current_thread(),
+                              threading._MainThread):
+                    self._watcher.attach_loop(self._local._loop)
+
+    def set_event_loop(self, loop):
+        """Set the event loop.
+
+        As a side effect, if a child watcher was set before, then calling
+        .set_event_loop() from the main thread will call .attach_loop(loop) on
+        the child watcher.
+        """
+
+        super().set_event_loop(loop)
+
+        if self._watcher is not None and \
+            isinstance(threading.current_thread(), threading._MainThread):
+            self._watcher.attach_loop(loop)
+
+    def get_child_watcher(self):
+        """Get the watcher for child processes.
+
+        If not yet set, a SafeChildWatcher object is automatically created.
+        """
+        if self._watcher is None:
+            self._init_watcher()
+
+        return self._watcher
+
+    def set_child_watcher(self, watcher):
+        """Set the watcher for child processes."""
+
+        assert watcher is None or isinstance(watcher, AbstractChildWatcher)
+
+        if self._watcher is not None:
+            self._watcher.close()
+
+        self._watcher = watcher
+
+SelectorEventLoop = _UnixSelectorEventLoop
+DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy

+ 774 - 0
env/Lib/site-packages/asyncio/windows_events.py

@@ -0,0 +1,774 @@
+"""Selector and proactor event loops for Windows."""
+
+import _winapi
+import errno
+import math
+import socket
+import struct
+import weakref
+
+from . import events
+from . import base_subprocess
+from . import futures
+from . import proactor_events
+from . import selector_events
+from . import tasks
+from . import windows_utils
+from . import _overlapped
+from .coroutines import coroutine
+from .log import logger
+
+
+__all__ = ['SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor',
+           'DefaultEventLoopPolicy',
+           ]
+
+
+NULL = 0
+INFINITE = 0xffffffff
+ERROR_CONNECTION_REFUSED = 1225
+ERROR_CONNECTION_ABORTED = 1236
+
+# Initial delay in seconds for connect_pipe() before retrying to connect
+CONNECT_PIPE_INIT_DELAY = 0.001
+
+# Maximum delay in seconds for connect_pipe() before retrying to connect
+CONNECT_PIPE_MAX_DELAY = 0.100
+
+
+class _OverlappedFuture(futures.Future):
+    """Subclass of Future which represents an overlapped operation.
+
+    Cancelling it will immediately cancel the overlapped operation.
+    """
+
+    def __init__(self, ov, *, loop=None):
+        super().__init__(loop=loop)
+        if self._source_traceback:
+            del self._source_traceback[-1]
+        self._ov = ov
+
+    def _repr_info(self):
+        info = super()._repr_info()
+        if self._ov is not None:
+            state = 'pending' if self._ov.pending else 'completed'
+            info.insert(1, 'overlapped=<%s, %#x>' % (state, self._ov.address))
+        return info
+
+    def _cancel_overlapped(self):
+        if self._ov is None:
+            return
+        try:
+            self._ov.cancel()
+        except OSError as exc:
+            context = {
+                'message': 'Cancelling an overlapped future failed',
+                'exception': exc,
+                'future': self,
+            }
+            if self._source_traceback:
+                context['source_traceback'] = self._source_traceback
+            self._loop.call_exception_handler(context)
+        self._ov = None
+
+    def cancel(self):
+        self._cancel_overlapped()
+        return super().cancel()
+
+    def set_exception(self, exception):
+        super().set_exception(exception)
+        self._cancel_overlapped()
+
+    def set_result(self, result):
+        super().set_result(result)
+        self._ov = None
+
+
+class _BaseWaitHandleFuture(futures.Future):
+    """Subclass of Future which represents a wait handle."""
+
+    def __init__(self, ov, handle, wait_handle, *, loop=None):
+        super().__init__(loop=loop)
+        if self._source_traceback:
+            del self._source_traceback[-1]
+        # Keep a reference to the Overlapped object to keep it alive until the
+        # wait is unregistered
+        self._ov = ov
+        self._handle = handle
+        self._wait_handle = wait_handle
+
+        # Should we call UnregisterWaitEx() if the wait completes
+        # or is cancelled?
+        self._registered = True
+
+    def _poll(self):
+        # non-blocking wait: use a timeout of 0 millisecond
+        return (_winapi.WaitForSingleObject(self._handle, 0) ==
+                _winapi.WAIT_OBJECT_0)
+
+    def _repr_info(self):
+        info = super()._repr_info()
+        info.append('handle=%#x' % self._handle)
+        if self._handle is not None:
+            state = 'signaled' if self._poll() else 'waiting'
+            info.append(state)
+        if self._wait_handle is not None:
+            info.append('wait_handle=%#x' % self._wait_handle)
+        return info
+
+    def _unregister_wait_cb(self, fut):
+        # The wait was unregistered: it's not safe to destroy the Overlapped
+        # object
+        self._ov = None
+
+    def _unregister_wait(self):
+        if not self._registered:
+            return
+        self._registered = False
+
+        wait_handle = self._wait_handle
+        self._wait_handle = None
+        try:
+            _overlapped.UnregisterWait(wait_handle)
+        except OSError as exc:
+            if exc.winerror != _overlapped.ERROR_IO_PENDING:
+                context = {
+                    'message': 'Failed to unregister the wait handle',
+                    'exception': exc,
+                    'future': self,
+                }
+                if self._source_traceback:
+                    context['source_traceback'] = self._source_traceback
+                self._loop.call_exception_handler(context)
+                return
+            # ERROR_IO_PENDING means that the unregister is pending
+
+        self._unregister_wait_cb(None)
+
+    def cancel(self):
+        self._unregister_wait()
+        return super().cancel()
+
+    def set_exception(self, exception):
+        self._unregister_wait()
+        super().set_exception(exception)
+
+    def set_result(self, result):
+        self._unregister_wait()
+        super().set_result(result)
+
+
+class _WaitCancelFuture(_BaseWaitHandleFuture):
+    """Subclass of Future which represents a wait for the cancellation of a
+    _WaitHandleFuture using an event.
+    """
+
+    def __init__(self, ov, event, wait_handle, *, loop=None):
+        super().__init__(ov, event, wait_handle, loop=loop)
+
+        self._done_callback = None
+
+    def cancel(self):
+        raise RuntimeError("_WaitCancelFuture must not be cancelled")
+
+    def _schedule_callbacks(self):
+        super(_WaitCancelFuture, self)._schedule_callbacks()
+        if self._done_callback is not None:
+            self._done_callback(self)
+
+
+class _WaitHandleFuture(_BaseWaitHandleFuture):
+    def __init__(self, ov, handle, wait_handle, proactor, *, loop=None):
+        super().__init__(ov, handle, wait_handle, loop=loop)
+        self._proactor = proactor
+        self._unregister_proactor = True
+        self._event = _overlapped.CreateEvent(None, True, False, None)
+        self._event_fut = None
+
+    def _unregister_wait_cb(self, fut):
+        if self._event is not None:
+            _winapi.CloseHandle(self._event)
+            self._event = None
+            self._event_fut = None
+
+        # If the wait was cancelled, the wait may never be signalled, so
+        # it's required to unregister it. Otherwise, IocpProactor.close() will
+        # wait forever for an event which will never come.
+        #
+        # If the IocpProactor already received the event, it's safe to call
+        # _unregister() because we kept a reference to the Overlapped object
+        # which is used as an unique key.
+        self._proactor._unregister(self._ov)
+        self._proactor = None
+
+        super()._unregister_wait_cb(fut)
+
+    def _unregister_wait(self):
+        if not self._registered:
+            return
+        self._registered = False
+
+        wait_handle = self._wait_handle
+        self._wait_handle = None
+        try:
+            _overlapped.UnregisterWaitEx(wait_handle, self._event)
+        except OSError as exc:
+            if exc.winerror != _overlapped.ERROR_IO_PENDING:
+                context = {
+                    'message': 'Failed to unregister the wait handle',
+                    'exception': exc,
+                    'future': self,
+                }
+                if self._source_traceback:
+                    context['source_traceback'] = self._source_traceback
+                self._loop.call_exception_handler(context)
+                return
+            # ERROR_IO_PENDING is not an error, the wait was unregistered
+
+        self._event_fut = self._proactor._wait_cancel(self._event,
+                                                      self._unregister_wait_cb)
+
+
+class PipeServer(object):
+    """Class representing a pipe server.
+
+    This is much like a bound, listening socket.
+    """
+    def __init__(self, address):
+        self._address = address
+        self._free_instances = weakref.WeakSet()
+        # initialize the pipe attribute before calling _server_pipe_handle()
+        # because this function can raise an exception and the destructor calls
+        # the close() method
+        self._pipe = None
+        self._accept_pipe_future = None
+        self._pipe = self._server_pipe_handle(True)
+
+    def _get_unconnected_pipe(self):
+        # Create new instance and return previous one.  This ensures
+        # that (until the server is closed) there is always at least
+        # one pipe handle for address.  Therefore if a client attempt
+        # to connect it will not fail with FileNotFoundError.
+        tmp, self._pipe = self._pipe, self._server_pipe_handle(False)
+        return tmp
+
+    def _server_pipe_handle(self, first):
+        # Return a wrapper for a new pipe handle.
+        if self.closed():
+            return None
+        flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
+        if first:
+            flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
+        h = _winapi.CreateNamedPipe(
+            self._address, flags,
+            _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
+            _winapi.PIPE_WAIT,
+            _winapi.PIPE_UNLIMITED_INSTANCES,
+            windows_utils.BUFSIZE, windows_utils.BUFSIZE,
+            _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
+        pipe = windows_utils.PipeHandle(h)
+        self._free_instances.add(pipe)
+        return pipe
+
+    def closed(self):
+        return (self._address is None)
+
+    def close(self):
+        if self._accept_pipe_future is not None:
+            self._accept_pipe_future.cancel()
+            self._accept_pipe_future = None
+        # Close all instances which have not been connected to by a client.
+        if self._address is not None:
+            for pipe in self._free_instances:
+                pipe.close()
+            self._pipe = None
+            self._address = None
+            self._free_instances.clear()
+
+    __del__ = close
+
+
+class _WindowsSelectorEventLoop(selector_events.BaseSelectorEventLoop):
+    """Windows version of selector event loop."""
+
+    def _socketpair(self):
+        return windows_utils.socketpair()
+
+
+class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
+    """Windows version of proactor event loop using IOCP."""
+
+    def __init__(self, proactor=None):
+        if proactor is None:
+            proactor = IocpProactor()
+        super().__init__(proactor)
+
+    def _socketpair(self):
+        return windows_utils.socketpair()
+
+    @coroutine
+    def create_pipe_connection(self, protocol_factory, address):
+        f = self._proactor.connect_pipe(address)
+        pipe = yield from f
+        protocol = protocol_factory()
+        trans = self._make_duplex_pipe_transport(pipe, protocol,
+                                                 extra={'addr': address})
+        return trans, protocol
+
+    @coroutine
+    def start_serving_pipe(self, protocol_factory, address):
+        server = PipeServer(address)
+
+        def loop_accept_pipe(f=None):
+            pipe = None
+            try:
+                if f:
+                    pipe = f.result()
+                    server._free_instances.discard(pipe)
+
+                    if server.closed():
+                        # A client connected before the server was closed:
+                        # drop the client (close the pipe) and exit
+                        pipe.close()
+                        return
+
+                    protocol = protocol_factory()
+                    self._make_duplex_pipe_transport(
+                        pipe, protocol, extra={'addr': address})
+
+                pipe = server._get_unconnected_pipe()
+                if pipe is None:
+                    return
+
+                f = self._proactor.accept_pipe(pipe)
+            except OSError as exc:
+                if pipe and pipe.fileno() != -1:
+                    self.call_exception_handler({
+                        'message': 'Pipe accept failed',
+                        'exception': exc,
+                        'pipe': pipe,
+                    })
+                    pipe.close()
+                elif self._debug:
+                    logger.warning("Accept pipe failed on pipe %r",
+                                   pipe, exc_info=True)
+            except futures.CancelledError:
+                if pipe:
+                    pipe.close()
+            else:
+                server._accept_pipe_future = f
+                f.add_done_callback(loop_accept_pipe)
+
+        self.call_soon(loop_accept_pipe)
+        return [server]
+
+    @coroutine
+    def _make_subprocess_transport(self, protocol, args, shell,
+                                   stdin, stdout, stderr, bufsize,
+                                   extra=None, **kwargs):
+        waiter = futures.Future(loop=self)
+        transp = _WindowsSubprocessTransport(self, protocol, args, shell,
+                                             stdin, stdout, stderr, bufsize,
+                                             waiter=waiter, extra=extra,
+                                             **kwargs)
+        try:
+            yield from waiter
+        except Exception as exc:
+            # Workaround CPython bug #23353: using yield/yield-from in an
+            # except block of a generator doesn't clear properly sys.exc_info()
+            err = exc
+        else:
+            err = None
+
+        if err is not None:
+            transp.close()
+            yield from transp._wait()
+            raise err
+
+        return transp
+
+
+class IocpProactor:
+    """Proactor implementation using IOCP."""
+
+    def __init__(self, concurrency=0xffffffff):
+        self._loop = None
+        self._results = []
+        self._iocp = _overlapped.CreateIoCompletionPort(
+            _overlapped.INVALID_HANDLE_VALUE, NULL, 0, concurrency)
+        self._cache = {}
+        self._registered = weakref.WeakSet()
+        self._unregistered = []
+        self._stopped_serving = weakref.WeakSet()
+
+    def __repr__(self):
+        return ('<%s overlapped#=%s result#=%s>'
+                % (self.__class__.__name__, len(self._cache),
+                   len(self._results)))
+
+    def set_loop(self, loop):
+        self._loop = loop
+
+    def select(self, timeout=None):
+        if not self._results:
+            self._poll(timeout)
+        tmp = self._results
+        self._results = []
+        return tmp
+
+    def _result(self, value):
+        fut = futures.Future(loop=self._loop)
+        fut.set_result(value)
+        return fut
+
+    def recv(self, conn, nbytes, flags=0):
+        self._register_with_iocp(conn)
+        ov = _overlapped.Overlapped(NULL)
+        try:
+            if isinstance(conn, socket.socket):
+                ov.WSARecv(conn.fileno(), nbytes, flags)
+            else:
+                ov.ReadFile(conn.fileno(), nbytes)
+        except BrokenPipeError:
+            return self._result(b'')
+
+        def finish_recv(trans, key, ov):
+            try:
+                return ov.getresult()
+            except OSError as exc:
+                if exc.winerror == _overlapped.ERROR_NETNAME_DELETED:
+                    raise ConnectionResetError(*exc.args)
+                else:
+                    raise
+
+        return self._register(ov, conn, finish_recv)
+
+    def send(self, conn, buf, flags=0):
+        self._register_with_iocp(conn)
+        ov = _overlapped.Overlapped(NULL)
+        if isinstance(conn, socket.socket):
+            ov.WSASend(conn.fileno(), buf, flags)
+        else:
+            ov.WriteFile(conn.fileno(), buf)
+
+        def finish_send(trans, key, ov):
+            try:
+                return ov.getresult()
+            except OSError as exc:
+                if exc.winerror == _overlapped.ERROR_NETNAME_DELETED:
+                    raise ConnectionResetError(*exc.args)
+                else:
+                    raise
+
+        return self._register(ov, conn, finish_send)
+
+    def accept(self, listener):
+        self._register_with_iocp(listener)
+        conn = self._get_accept_socket(listener.family)
+        ov = _overlapped.Overlapped(NULL)
+        ov.AcceptEx(listener.fileno(), conn.fileno())
+
+        def finish_accept(trans, key, ov):
+            ov.getresult()
+            # Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work.
+            buf = struct.pack('@P', listener.fileno())
+            conn.setsockopt(socket.SOL_SOCKET,
+                            _overlapped.SO_UPDATE_ACCEPT_CONTEXT, buf)
+            conn.settimeout(listener.gettimeout())
+            return conn, conn.getpeername()
+
+        @coroutine
+        def accept_coro(future, conn):
+            # Coroutine closing the accept socket if the future is cancelled
+            try:
+                yield from future
+            except futures.CancelledError:
+                conn.close()
+                raise
+
+        future = self._register(ov, listener, finish_accept)
+        coro = accept_coro(future, conn)
+        tasks.async(coro, loop=self._loop)
+        return future
+
+    def connect(self, conn, address):
+        self._register_with_iocp(conn)
+        # The socket needs to be locally bound before we call ConnectEx().
+        try:
+            _overlapped.BindLocal(conn.fileno(), conn.family)
+        except OSError as e:
+            if e.winerror != errno.WSAEINVAL:
+                raise
+            # Probably already locally bound; check using getsockname().
+            if conn.getsockname()[1] == 0:
+                raise
+        ov = _overlapped.Overlapped(NULL)
+        ov.ConnectEx(conn.fileno(), address)
+
+        def finish_connect(trans, key, ov):
+            ov.getresult()
+            # Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work.
+            conn.setsockopt(socket.SOL_SOCKET,
+                            _overlapped.SO_UPDATE_CONNECT_CONTEXT, 0)
+            return conn
+
+        return self._register(ov, conn, finish_connect)
+
+    def accept_pipe(self, pipe):
+        self._register_with_iocp(pipe)
+        ov = _overlapped.Overlapped(NULL)
+        connected = ov.ConnectNamedPipe(pipe.fileno())
+
+        if connected:
+            # ConnectNamePipe() failed with ERROR_PIPE_CONNECTED which means
+            # that the pipe is connected. There is no need to wait for the
+            # completion of the connection.
+            return self._result(pipe)
+
+        def finish_accept_pipe(trans, key, ov):
+            ov.getresult()
+            return pipe
+
+        return self._register(ov, pipe, finish_accept_pipe)
+
+    @coroutine
+    def connect_pipe(self, address):
+        delay = CONNECT_PIPE_INIT_DELAY
+        while True:
+            # Unfortunately there is no way to do an overlapped connect to a pipe.
+            # Call CreateFile() in a loop until it doesn't fail with
+            # ERROR_PIPE_BUSY
+            try:
+                handle = _overlapped.ConnectPipe(address)
+                break
+            except OSError as exc:
+                if exc.winerror != _overlapped.ERROR_PIPE_BUSY:
+                    raise
+
+            # ConnectPipe() failed with ERROR_PIPE_BUSY: retry later
+            delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY)
+            yield from tasks.sleep(delay, loop=self._loop)
+
+        return windows_utils.PipeHandle(handle)
+
+    def wait_for_handle(self, handle, timeout=None):
+        """Wait for a handle.
+
+        Return a Future object. The result of the future is True if the wait
+        completed, or False if the wait did not complete (on timeout).
+        """
+        return self._wait_for_handle(handle, timeout, False)
+
+    def _wait_cancel(self, event, done_callback):
+        fut = self._wait_for_handle(event, None, True)
+        # add_done_callback() cannot be used because the wait may only complete
+        # in IocpProactor.close(), while the event loop is not running.
+        fut._done_callback = done_callback
+        return fut
+
+    def _wait_for_handle(self, handle, timeout, _is_cancel):
+        if timeout is None:
+            ms = _winapi.INFINITE
+        else:
+            # RegisterWaitForSingleObject() has a resolution of 1 millisecond,
+            # round away from zero to wait *at least* timeout seconds.
+            ms = math.ceil(timeout * 1e3)
+
+        # We only create ov so we can use ov.address as a key for the cache.
+        ov = _overlapped.Overlapped(NULL)
+        wait_handle = _overlapped.RegisterWaitWithQueue(
+            handle, self._iocp, ov.address, ms)
+        if _is_cancel:
+            f = _WaitCancelFuture(ov, handle, wait_handle, loop=self._loop)
+        else:
+            f = _WaitHandleFuture(ov, handle, wait_handle, self,
+                                  loop=self._loop)
+        if f._source_traceback:
+            del f._source_traceback[-1]
+
+        def finish_wait_for_handle(trans, key, ov):
+            # Note that this second wait means that we should only use
+            # this with handles types where a successful wait has no
+            # effect.  So events or processes are all right, but locks
+            # or semaphores are not.  Also note if the handle is
+            # signalled and then quickly reset, then we may return
+            # False even though we have not timed out.
+            return f._poll()
+
+        self._cache[ov.address] = (f, ov, 0, finish_wait_for_handle)
+        return f
+
+    def _register_with_iocp(self, obj):
+        # To get notifications of finished ops on this objects sent to the
+        # completion port, were must register the handle.
+        if obj not in self._registered:
+            self._registered.add(obj)
+            _overlapped.CreateIoCompletionPort(obj.fileno(), self._iocp, 0, 0)
+            # XXX We could also use SetFileCompletionNotificationModes()
+            # to avoid sending notifications to completion port of ops
+            # that succeed immediately.
+
+    def _register(self, ov, obj, callback):
+        # Return a future which will be set with the result of the
+        # operation when it completes.  The future's value is actually
+        # the value returned by callback().
+        f = _OverlappedFuture(ov, loop=self._loop)
+        if f._source_traceback:
+            del f._source_traceback[-1]
+        if not ov.pending:
+            # The operation has completed, so no need to postpone the
+            # work.  We cannot take this short cut if we need the
+            # NumberOfBytes, CompletionKey values returned by
+            # PostQueuedCompletionStatus().
+            try:
+                value = callback(None, None, ov)
+            except OSError as e:
+                f.set_exception(e)
+            else:
+                f.set_result(value)
+            # Even if GetOverlappedResult() was called, we have to wait for the
+            # notification of the completion in GetQueuedCompletionStatus().
+            # Register the overlapped operation to keep a reference to the
+            # OVERLAPPED object, otherwise the memory is freed and Windows may
+            # read uninitialized memory.
+
+        # Register the overlapped operation for later.  Note that
+        # we only store obj to prevent it from being garbage
+        # collected too early.
+        self._cache[ov.address] = (f, ov, obj, callback)
+        return f
+
+    def _unregister(self, ov):
+        """Unregister an overlapped object.
+
+        Call this method when its future has been cancelled. The event can
+        already be signalled (pending in the proactor event queue). It is also
+        safe if the event is never signalled (because it was cancelled).
+        """
+        self._unregistered.append(ov)
+
+    def _get_accept_socket(self, family):
+        s = socket.socket(family)
+        s.settimeout(0)
+        return s
+
+    def _poll(self, timeout=None):
+        if timeout is None:
+            ms = INFINITE
+        elif timeout < 0:
+            raise ValueError("negative timeout")
+        else:
+            # GetQueuedCompletionStatus() has a resolution of 1 millisecond,
+            # round away from zero to wait *at least* timeout seconds.
+            ms = math.ceil(timeout * 1e3)
+            if ms >= INFINITE:
+                raise ValueError("timeout too big")
+
+        while True:
+            status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms)
+            if status is None:
+                break
+            ms = 0
+
+            err, transferred, key, address = status
+            try:
+                f, ov, obj, callback = self._cache.pop(address)
+            except KeyError:
+                if self._loop.get_debug():
+                    self._loop.call_exception_handler({
+                        'message': ('GetQueuedCompletionStatus() returned an '
+                                    'unexpected event'),
+                        'status': ('err=%s transferred=%s key=%#x address=%#x'
+                                   % (err, transferred, key, address)),
+                    })
+
+                # key is either zero, or it is used to return a pipe
+                # handle which should be closed to avoid a leak.
+                if key not in (0, _overlapped.INVALID_HANDLE_VALUE):
+                    _winapi.CloseHandle(key)
+                continue
+
+            if obj in self._stopped_serving:
+                f.cancel()
+            # Don't call the callback if _register() already read the result or
+            # if the overlapped has been cancelled
+            elif not f.done():
+                try:
+                    value = callback(transferred, key, ov)
+                except OSError as e:
+                    f.set_exception(e)
+                    self._results.append(f)
+                else:
+                    f.set_result(value)
+                    self._results.append(f)
+
+        # Remove unregisted futures
+        for ov in self._unregistered:
+            self._cache.pop(ov.address, None)
+        self._unregistered.clear()
+
+    def _stop_serving(self, obj):
+        # obj is a socket or pipe handle.  It will be closed in
+        # BaseProactorEventLoop._stop_serving() which will make any
+        # pending operations fail quickly.
+        self._stopped_serving.add(obj)
+
+    def close(self):
+        # Cancel remaining registered operations.
+        for address, (fut, ov, obj, callback) in list(self._cache.items()):
+            if fut.cancelled():
+                # Nothing to do with cancelled futures
+                pass
+            elif isinstance(fut, _WaitCancelFuture):
+                # _WaitCancelFuture must not be cancelled
+                pass
+            else:
+                try:
+                    fut.cancel()
+                except OSError as exc:
+                    if self._loop is not None:
+                        context = {
+                            'message': 'Cancelling a future failed',
+                            'exception': exc,
+                            'future': fut,
+                        }
+                        if fut._source_traceback:
+                            context['source_traceback'] = fut._source_traceback
+                        self._loop.call_exception_handler(context)
+
+        while self._cache:
+            if not self._poll(1):
+                logger.debug('taking long time to close proactor')
+
+        self._results = []
+        if self._iocp is not None:
+            _winapi.CloseHandle(self._iocp)
+            self._iocp = None
+
+    def __del__(self):
+        self.close()
+
+
+class _WindowsSubprocessTransport(base_subprocess.BaseSubprocessTransport):
+
+    def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
+        self._proc = windows_utils.Popen(
+            args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
+            bufsize=bufsize, **kwargs)
+
+        def callback(f):
+            returncode = self._proc.poll()
+            self._process_exited(returncode)
+
+        f = self._loop._proactor.wait_for_handle(int(self._proc._handle))
+        f.add_done_callback(callback)
+
+
+SelectorEventLoop = _WindowsSelectorEventLoop
+
+
+class _WindowsDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
+    _loop_factory = SelectorEventLoop
+
+
+DefaultEventLoopPolicy = _WindowsDefaultEventLoopPolicy

+ 223 - 0
env/Lib/site-packages/asyncio/windows_utils.py

@@ -0,0 +1,223 @@
+"""
+Various Windows specific bits and pieces
+"""
+
+import sys
+
+if sys.platform != 'win32':  # pragma: no cover
+    raise ImportError('win32 only')
+
+import _winapi
+import itertools
+import msvcrt
+import os
+import socket
+import subprocess
+import tempfile
+import warnings
+
+
+__all__ = ['socketpair', 'pipe', 'Popen', 'PIPE', 'PipeHandle']
+
+
+# Constants/globals
+
+
+BUFSIZE = 8192
+PIPE = subprocess.PIPE
+STDOUT = subprocess.STDOUT
+_mmap_counter = itertools.count()
+
+
+if hasattr(socket, 'socketpair'):
+    # Since Python 3.5, socket.socketpair() is now also available on Windows
+    socketpair = socket.socketpair
+else:
+    # Replacement for socket.socketpair()
+    def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0):
+        """A socket pair usable as a self-pipe, for Windows.
+
+        Origin: https://gist.github.com/4325783, by Geert Jansen.
+        Public domain.
+        """
+        if family == socket.AF_INET:
+            host = '127.0.0.1'
+        elif family == socket.AF_INET6:
+            host = '::1'
+        else:
+            raise ValueError("Only AF_INET and AF_INET6 socket address "
+                             "families are supported")
+        if type != socket.SOCK_STREAM:
+            raise ValueError("Only SOCK_STREAM socket type is supported")
+        if proto != 0:
+            raise ValueError("Only protocol zero is supported")
+
+        # We create a connected TCP socket. Note the trick with setblocking(0)
+        # that prevents us from having to create a thread.
+        lsock = socket.socket(family, type, proto)
+        try:
+            lsock.bind((host, 0))
+            lsock.listen(1)
+            # On IPv6, ignore flow_info and scope_id
+            addr, port = lsock.getsockname()[:2]
+            csock = socket.socket(family, type, proto)
+            try:
+                csock.setblocking(False)
+                try:
+                    csock.connect((addr, port))
+                except (BlockingIOError, InterruptedError):
+                    pass
+                csock.setblocking(True)
+                ssock, _ = lsock.accept()
+            except:
+                csock.close()
+                raise
+        finally:
+            lsock.close()
+        return (ssock, csock)
+
+
+# Replacement for os.pipe() using handles instead of fds
+
+
+def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
+    """Like os.pipe() but with overlapped support and using handles not fds."""
+    address = tempfile.mktemp(prefix=r'\\.\pipe\python-pipe-%d-%d-' %
+                              (os.getpid(), next(_mmap_counter)))
+
+    if duplex:
+        openmode = _winapi.PIPE_ACCESS_DUPLEX
+        access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
+        obsize, ibsize = bufsize, bufsize
+    else:
+        openmode = _winapi.PIPE_ACCESS_INBOUND
+        access = _winapi.GENERIC_WRITE
+        obsize, ibsize = 0, bufsize
+
+    openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
+
+    if overlapped[0]:
+        openmode |= _winapi.FILE_FLAG_OVERLAPPED
+
+    if overlapped[1]:
+        flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED
+    else:
+        flags_and_attribs = 0
+
+    h1 = h2 = None
+    try:
+        h1 = _winapi.CreateNamedPipe(
+            address, openmode, _winapi.PIPE_WAIT,
+            1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
+
+        h2 = _winapi.CreateFile(
+            address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
+            flags_and_attribs, _winapi.NULL)
+
+        ov = _winapi.ConnectNamedPipe(h1, overlapped=True)
+        ov.GetOverlappedResult(True)
+        return h1, h2
+    except:
+        if h1 is not None:
+            _winapi.CloseHandle(h1)
+        if h2 is not None:
+            _winapi.CloseHandle(h2)
+        raise
+
+
+# Wrapper for a pipe handle
+
+
+class PipeHandle:
+    """Wrapper for an overlapped pipe handle which is vaguely file-object like.
+
+    The IOCP event loop can use these instead of socket objects.
+    """
+    def __init__(self, handle):
+        self._handle = handle
+
+    def __repr__(self):
+        if self._handle is not None:
+            handle = 'handle=%r' % self._handle
+        else:
+            handle = 'closed'
+        return '<%s %s>' % (self.__class__.__name__, handle)
+
+    @property
+    def handle(self):
+        return self._handle
+
+    def fileno(self):
+        if self._handle is None:
+            raise ValueError("I/O operatioon on closed pipe")
+        return self._handle
+
+    def close(self, *, CloseHandle=_winapi.CloseHandle):
+        if self._handle is not None:
+            CloseHandle(self._handle)
+            self._handle = None
+
+    def __del__(self):
+        if self._handle is not None:
+            warnings.warn("unclosed %r" % self, ResourceWarning)
+            self.close()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, t, v, tb):
+        self.close()
+
+
+# Replacement for subprocess.Popen using overlapped pipe handles
+
+
+class Popen(subprocess.Popen):
+    """Replacement for subprocess.Popen using overlapped pipe handles.
+
+    The stdin, stdout, stderr are None or instances of PipeHandle.
+    """
+    def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds):
+        assert not kwds.get('universal_newlines')
+        assert kwds.get('bufsize', 0) == 0
+        stdin_rfd = stdout_wfd = stderr_wfd = None
+        stdin_wh = stdout_rh = stderr_rh = None
+        if stdin == PIPE:
+            stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True)
+            stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY)
+        else:
+            stdin_rfd = stdin
+        if stdout == PIPE:
+            stdout_rh, stdout_wh = pipe(overlapped=(True, False))
+            stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0)
+        else:
+            stdout_wfd = stdout
+        if stderr == PIPE:
+            stderr_rh, stderr_wh = pipe(overlapped=(True, False))
+            stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0)
+        elif stderr == STDOUT:
+            stderr_wfd = stdout_wfd
+        else:
+            stderr_wfd = stderr
+        try:
+            super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd,
+                             stderr=stderr_wfd, **kwds)
+        except:
+            for h in (stdin_wh, stdout_rh, stderr_rh):
+                if h is not None:
+                    _winapi.CloseHandle(h)
+            raise
+        else:
+            if stdin_wh is not None:
+                self.stdin = PipeHandle(stdin_wh)
+            if stdout_rh is not None:
+                self.stdout = PipeHandle(stdout_rh)
+            if stderr_rh is not None:
+                self.stderr = PipeHandle(stderr_rh)
+        finally:
+            if stdin == PIPE:
+                os.close(stdin_rfd)
+            if stdout == PIPE:
+                os.close(stdout_wfd)
+            if stderr == PIPE:
+                os.close(stderr_wfd)

+ 5 - 0
env/Lib/site-packages/easy_install.py

@@ -0,0 +1,5 @@
+"""Run the EasyInstall command"""
+
+if __name__ == '__main__':
+    from setuptools.command.easy_install import main
+    main()

+ 1 - 0
env/Lib/site-packages/pip-23.0.1.dist-info/INSTALLER

@@ -0,0 +1 @@
+pip

+ 20 - 0
env/Lib/site-packages/pip-23.0.1.dist-info/LICENSE.txt

@@ -0,0 +1,20 @@
+Copyright (c) 2008-present The pip developers (see AUTHORS.txt file)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 88 - 0
env/Lib/site-packages/pip-23.0.1.dist-info/METADATA

@@ -0,0 +1,88 @@
+Metadata-Version: 2.1
+Name: pip
+Version: 23.0.1
+Summary: The PyPA recommended tool for installing Python packages.
+Home-page: https://pip.pypa.io/
+Author: The pip developers
+Author-email: distutils-sig@python.org
+License: MIT
+Project-URL: Documentation, https://pip.pypa.io
+Project-URL: Source, https://github.com/pypa/pip
+Project-URL: Changelog, https://pip.pypa.io/en/stable/news/
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Topic :: Software Development :: Build Tools
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=3.7
+License-File: LICENSE.txt
+
+pip - The Python Package Installer
+==================================
+
+.. image:: https://img.shields.io/pypi/v/pip.svg
+   :target: https://pypi.org/project/pip/
+
+.. image:: https://readthedocs.org/projects/pip/badge/?version=latest
+   :target: https://pip.pypa.io/en/latest
+
+pip is the `package installer`_ for Python. You can use pip to install packages from the `Python Package Index`_ and other indexes.
+
+Please take a look at our documentation for how to install and use pip:
+
+* `Installation`_
+* `Usage`_
+
+We release updates regularly, with a new version every 3 months. Find more details in our documentation:
+
+* `Release notes`_
+* `Release process`_
+
+In pip 20.3, we've `made a big improvement to the heart of pip`_; `learn more`_. We want your input, so `sign up for our user experience research studies`_ to help us do it right.
+
+**Note**: pip 21.0, in January 2021, removed Python 2 support, per pip's `Python 2 support policy`_. Please migrate to Python 3.
+
+If you find bugs, need help, or want to talk to the developers, please use our mailing lists or chat rooms:
+
+* `Issue tracking`_
+* `Discourse channel`_
+* `User IRC`_
+
+If you want to get involved head over to GitHub to get the source code, look at our development documentation and feel free to jump on the developer mailing lists and chat rooms:
+
+* `GitHub page`_
+* `Development documentation`_
+* `Development IRC`_
+
+Code of Conduct
+---------------
+
+Everyone interacting in the pip project's codebases, issue trackers, chat
+rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_.
+
+.. _package installer: https://packaging.python.org/guides/tool-recommendations/
+.. _Python Package Index: https://pypi.org
+.. _Installation: https://pip.pypa.io/en/stable/installation/
+.. _Usage: https://pip.pypa.io/en/stable/
+.. _Release notes: https://pip.pypa.io/en/stable/news.html
+.. _Release process: https://pip.pypa.io/en/latest/development/release-process/
+.. _GitHub page: https://github.com/pypa/pip
+.. _Development documentation: https://pip.pypa.io/en/latest/development
+.. _made a big improvement to the heart of pip: https://pyfound.blogspot.com/2020/11/pip-20-3-new-resolver.html
+.. _learn more: https://pip.pypa.io/en/latest/user_guide/#changes-to-the-pip-dependency-resolver-in-20-3-2020
+.. _sign up for our user experience research studies: https://pyfound.blogspot.com/2020/03/new-pip-resolver-to-roll-out-this-year.html
+.. _Python 2 support policy: https://pip.pypa.io/en/latest/development/release-process/#python-2-support
+.. _Issue tracking: https://github.com/pypa/pip/issues
+.. _Discourse channel: https://discuss.python.org/c/packaging
+.. _User IRC: https://kiwiirc.com/nextclient/#ircs://irc.libera.chat:+6697/pypa
+.. _Development IRC: https://kiwiirc.com/nextclient/#ircs://irc.libera.chat:+6697/pypa-dev
+.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md

+ 1002 - 0
env/Lib/site-packages/pip-23.0.1.dist-info/RECORD

@@ -0,0 +1,1002 @@
+../../Scripts/pip.exe,sha256=_kTJYn0Yr8T7gXkBw84Q1-YeKU07hg-d9VsDR8u7bWk,108416
+../../Scripts/pip3.9.exe,sha256=_kTJYn0Yr8T7gXkBw84Q1-YeKU07hg-d9VsDR8u7bWk,108416
+../../Scripts/pip3.exe,sha256=_kTJYn0Yr8T7gXkBw84Q1-YeKU07hg-d9VsDR8u7bWk,108416
+pip-23.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+pip-23.0.1.dist-info/LICENSE.txt,sha256=Y0MApmnUmurmWxLGxIySTFGkzfPR_whtw0VtyLyqIQQ,1093
+pip-23.0.1.dist-info/METADATA,sha256=POh89utz-H1e0K-xDY9CL9gs-x0MjH-AWxbhJG3aaVE,4072
+pip-23.0.1.dist-info/RECORD,,
+pip-23.0.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip-23.0.1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
+pip-23.0.1.dist-info/entry_points.txt,sha256=w694mjHYSfmSoUVVSaHoQ9UkOBBdtKKIJbyDRLdKju8,124
+pip-23.0.1.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+pip/__init__.py,sha256=5yroedzc2dKKbcynDrHX8vBoLxqU27KmFvvHmdqQN9w,357
+pip/__main__.py,sha256=mXwWDftNLMKfwVqKFWGE_uuBZvGSIiUELhLkeysIuZc,1198
+pip/__pip-runner__.py,sha256=EnrfKmKMzWAdqg_JicLCOP9Y95Ux7zHh4ObvqLtQcjo,1444
+pip/__pycache__/__init__.cpython-39.pyc,,
+pip/__pycache__/__main__.cpython-39.pyc,,
+pip/__pycache__/__pip-runner__.cpython-39.pyc,,
+pip/_internal/__init__.py,sha256=nnFCuxrPMgALrIDxSoy-H6Zj4W4UY60D-uL1aJyq0pc,573
+pip/_internal/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/__pycache__/build_env.cpython-39.pyc,,
+pip/_internal/__pycache__/cache.cpython-39.pyc,,
+pip/_internal/__pycache__/configuration.cpython-39.pyc,,
+pip/_internal/__pycache__/exceptions.cpython-39.pyc,,
+pip/_internal/__pycache__/main.cpython-39.pyc,,
+pip/_internal/__pycache__/pyproject.cpython-39.pyc,,
+pip/_internal/__pycache__/self_outdated_check.cpython-39.pyc,,
+pip/_internal/__pycache__/wheel_builder.cpython-39.pyc,,
+pip/_internal/build_env.py,sha256=1ESpqw0iupS_K7phZK5zshVE5Czy9BtGLFU4W6Enva8,10243
+pip/_internal/cache.py,sha256=C3n78VnBga9rjPXZqht_4A4d-T25poC7K0qBM7FHDhU,10734
+pip/_internal/cli/__init__.py,sha256=FkHBgpxxb-_gd6r1FjnNhfMOzAUYyXoXKJ6abijfcFU,132
+pip/_internal/cli/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/cli/__pycache__/autocompletion.cpython-39.pyc,,
+pip/_internal/cli/__pycache__/base_command.cpython-39.pyc,,
+pip/_internal/cli/__pycache__/cmdoptions.cpython-39.pyc,,
+pip/_internal/cli/__pycache__/command_context.cpython-39.pyc,,
+pip/_internal/cli/__pycache__/main.cpython-39.pyc,,
+pip/_internal/cli/__pycache__/main_parser.cpython-39.pyc,,
+pip/_internal/cli/__pycache__/parser.cpython-39.pyc,,
+pip/_internal/cli/__pycache__/progress_bars.cpython-39.pyc,,
+pip/_internal/cli/__pycache__/req_command.cpython-39.pyc,,
+pip/_internal/cli/__pycache__/spinners.cpython-39.pyc,,
+pip/_internal/cli/__pycache__/status_codes.cpython-39.pyc,,
+pip/_internal/cli/autocompletion.py,sha256=wY2JPZY2Eji1vhR7bVo-yCBPJ9LCy6P80iOAhZD1Vi8,6676
+pip/_internal/cli/base_command.py,sha256=t1D5x40Hfn9HnPnMt-iSxvqL14nht2olBCacW74pc-k,7842
+pip/_internal/cli/cmdoptions.py,sha256=0OHXkgnppCtC4QyF28ZL8FBosVUXG5pWj2uzO1CgWhM,29497
+pip/_internal/cli/command_context.py,sha256=RHgIPwtObh5KhMrd3YZTkl8zbVG-6Okml7YbFX4Ehg0,774
+pip/_internal/cli/main.py,sha256=ioJ8IVlb2K1qLOxR-tXkee9lURhYV89CDM71MKag7YY,2472
+pip/_internal/cli/main_parser.py,sha256=laDpsuBDl6kyfywp9eMMA9s84jfH2TJJn-vmL0GG90w,4338
+pip/_internal/cli/parser.py,sha256=tWP-K1uSxnJyXu3WE0kkH3niAYRBeuUaxeydhzOdhL4,10817
+pip/_internal/cli/progress_bars.py,sha256=So4mPoSjXkXiSHiTzzquH3VVyVD_njXlHJSExYPXAow,1968
+pip/_internal/cli/req_command.py,sha256=ypTutLv4j_efxC2f6C6aCQufxre-zaJdi5m_tWlLeBk,18172
+pip/_internal/cli/spinners.py,sha256=hIJ83GerdFgFCdobIA23Jggetegl_uC4Sp586nzFbPE,5118
+pip/_internal/cli/status_codes.py,sha256=sEFHUaUJbqv8iArL3HAtcztWZmGOFX01hTesSytDEh0,116
+pip/_internal/commands/__init__.py,sha256=5oRO9O3dM2vGuh0bFw4HOVletryrz5HHMmmPWwJrH9U,3882
+pip/_internal/commands/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/commands/__pycache__/cache.cpython-39.pyc,,
+pip/_internal/commands/__pycache__/check.cpython-39.pyc,,
+pip/_internal/commands/__pycache__/completion.cpython-39.pyc,,
+pip/_internal/commands/__pycache__/configuration.cpython-39.pyc,,
+pip/_internal/commands/__pycache__/debug.cpython-39.pyc,,
+pip/_internal/commands/__pycache__/download.cpython-39.pyc,,
+pip/_internal/commands/__pycache__/freeze.cpython-39.pyc,,
+pip/_internal/commands/__pycache__/hash.cpython-39.pyc,,
+pip/_internal/commands/__pycache__/help.cpython-39.pyc,,
+pip/_internal/commands/__pycache__/index.cpython-39.pyc,,
+pip/_internal/commands/__pycache__/inspect.cpython-39.pyc,,
+pip/_internal/commands/__pycache__/install.cpython-39.pyc,,
+pip/_internal/commands/__pycache__/list.cpython-39.pyc,,
+pip/_internal/commands/__pycache__/search.cpython-39.pyc,,
+pip/_internal/commands/__pycache__/show.cpython-39.pyc,,
+pip/_internal/commands/__pycache__/uninstall.cpython-39.pyc,,
+pip/_internal/commands/__pycache__/wheel.cpython-39.pyc,,
+pip/_internal/commands/cache.py,sha256=muaT0mbL-ZUpn6AaushVAipzTiMwE4nV2BLbJBwt_KQ,7582
+pip/_internal/commands/check.py,sha256=0gjXR7j36xJT5cs2heYU_dfOfpnFfzX8OoPNNoKhqdM,1685
+pip/_internal/commands/completion.py,sha256=H0TJvGrdsoleuIyQKzJbicLFppYx2OZA0BLNpQDeFjI,4129
+pip/_internal/commands/configuration.py,sha256=NB5uf8HIX8-li95YLoZO09nALIWlLCHDF5aifSKcBn8,9815
+pip/_internal/commands/debug.py,sha256=AesEID-4gPFDWTwPiPaGZuD4twdT-imaGuMR5ZfSn8s,6591
+pip/_internal/commands/download.py,sha256=LwKEyYMG2L67nQRyGo8hQdNEeMU2bmGWqJfcB8JDXas,5289
+pip/_internal/commands/freeze.py,sha256=gCjoD6foBZPBAAYx5t8zZLkJhsF_ZRtnb3dPuD7beO8,2951
+pip/_internal/commands/hash.py,sha256=EVVOuvGtoPEdFi8SNnmdqlCQrhCxV-kJsdwtdcCnXGQ,1703
+pip/_internal/commands/help.py,sha256=gcc6QDkcgHMOuAn5UxaZwAStsRBrnGSn_yxjS57JIoM,1132
+pip/_internal/commands/index.py,sha256=cGQVSA5dAs7caQ9sz4kllYvaI4ZpGiq1WhCgaImXNSA,4793
+pip/_internal/commands/inspect.py,sha256=2wSPt9yfr3r6g-s2S5L6PvRtaHNVyb4TuodMStJ39cw,3188
+pip/_internal/commands/install.py,sha256=3vT9tnHOV-p6dPMaKDqzivqmcq_kPAI-jVkxOEwN5C4,32389
+pip/_internal/commands/list.py,sha256=Fk1TSxB33NlRS4qlLQ0xwnytnF9-zkQJbKQYv2xc4Q4,12343
+pip/_internal/commands/search.py,sha256=sbBZiARRc050QquOKcCvOr2K3XLsoYebLKZGRi__iUI,5697
+pip/_internal/commands/show.py,sha256=t5jia4zcYJRJZy4U_Von7zMl03hJmmcofj6oDNTnj7Y,6419
+pip/_internal/commands/uninstall.py,sha256=OIqO9tqadY8kM4HwhFf1Q62fUIp7v8KDrTRo8yWMz7Y,3886
+pip/_internal/commands/wheel.py,sha256=mbFJd4dmUfrVFJkQbK8n2zHyRcD3AI91f7EUo9l3KYg,7396
+pip/_internal/configuration.py,sha256=uBKTus43pDIO6IzT2mLWQeROmHhtnoabhniKNjPYvD0,13529
+pip/_internal/distributions/__init__.py,sha256=Hq6kt6gXBgjNit5hTTWLAzeCNOKoB-N0pGYSqehrli8,858
+pip/_internal/distributions/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/distributions/__pycache__/base.cpython-39.pyc,,
+pip/_internal/distributions/__pycache__/installed.cpython-39.pyc,,
+pip/_internal/distributions/__pycache__/sdist.cpython-39.pyc,,
+pip/_internal/distributions/__pycache__/wheel.cpython-39.pyc,,
+pip/_internal/distributions/base.py,sha256=jrF1Vi7eGyqFqMHrieh1PIOrGU7KeCxhYPZnbvtmvGY,1221
+pip/_internal/distributions/installed.py,sha256=NI2OgsgH9iBq9l5vB-56vOg5YsybOy-AU4VE5CSCO2I,729
+pip/_internal/distributions/sdist.py,sha256=SQBdkatXSigKGG_SaD0U0p1Jwdfrg26UCNcHgkXZfdA,6494
+pip/_internal/distributions/wheel.py,sha256=m-J4XO-gvFerlYsFzzSXYDvrx8tLZlJFTCgDxctn8ig,1164
+pip/_internal/exceptions.py,sha256=cU4dz7x-1uFGrf2A1_Np9tKcy599bRJKRJkikgARxW4,24244
+pip/_internal/index/__init__.py,sha256=vpt-JeTZefh8a-FC22ZeBSXFVbuBcXSGiILhQZJaNpQ,30
+pip/_internal/index/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/index/__pycache__/collector.cpython-39.pyc,,
+pip/_internal/index/__pycache__/package_finder.cpython-39.pyc,,
+pip/_internal/index/__pycache__/sources.cpython-39.pyc,,
+pip/_internal/index/collector.py,sha256=3OmYZ3tCoRPGOrELSgQWG-03M-bQHa2-VCA3R_nJAaU,16504
+pip/_internal/index/package_finder.py,sha256=rrUw4vj7QE_eMt022jw--wQiKznMaUgVBkJ1UCrVUxo,37873
+pip/_internal/index/sources.py,sha256=SVyPitv08-Qalh2_Bk5diAJ9GAA_d-a93koouQodAG0,6557
+pip/_internal/locations/__init__.py,sha256=Dh8LJWG8LRlDK4JIj9sfRF96TREzE--N_AIlx7Tqoe4,15365
+pip/_internal/locations/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/locations/__pycache__/_distutils.cpython-39.pyc,,
+pip/_internal/locations/__pycache__/_sysconfig.cpython-39.pyc,,
+pip/_internal/locations/__pycache__/base.cpython-39.pyc,,
+pip/_internal/locations/_distutils.py,sha256=cmi6h63xYNXhQe7KEWEMaANjHFy5yQOPt_1_RCWyXMY,6100
+pip/_internal/locations/_sysconfig.py,sha256=jyNVtUfMIf0mtyY-Xp1m9yQ8iwECozSVVFmjkN9a2yw,7680
+pip/_internal/locations/base.py,sha256=RQiPi1d4FVM2Bxk04dQhXZ2PqkeljEL2fZZ9SYqIQ78,2556
+pip/_internal/main.py,sha256=r-UnUe8HLo5XFJz8inTcOOTiu_sxNhgHb6VwlGUllOI,340
+pip/_internal/metadata/__init__.py,sha256=84j1dPJaIoz5Q2ZTPi0uB1iaDAHiUNfKtYSGQCfFKpo,4280
+pip/_internal/metadata/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/metadata/__pycache__/_json.cpython-39.pyc,,
+pip/_internal/metadata/__pycache__/base.cpython-39.pyc,,
+pip/_internal/metadata/__pycache__/pkg_resources.cpython-39.pyc,,
+pip/_internal/metadata/_json.py,sha256=BTkWfFDrWFwuSodImjtbAh8wCL3isecbnjTb5E6UUDI,2595
+pip/_internal/metadata/base.py,sha256=vIwIo1BtoqegehWMAXhNrpLGYBq245rcaCNkBMPnTU8,25277
+pip/_internal/metadata/importlib/__init__.py,sha256=9ZVO8BoE7NEZPmoHp5Ap_NJo0HgNIezXXg-TFTtt3Z4,107
+pip/_internal/metadata/importlib/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/metadata/importlib/__pycache__/_compat.cpython-39.pyc,,
+pip/_internal/metadata/importlib/__pycache__/_dists.cpython-39.pyc,,
+pip/_internal/metadata/importlib/__pycache__/_envs.cpython-39.pyc,,
+pip/_internal/metadata/importlib/_compat.py,sha256=GAe_prIfCE4iUylrnr_2dJRlkkBVRUbOidEoID7LPoE,1882
+pip/_internal/metadata/importlib/_dists.py,sha256=BUV8y6D0PePZrEN3vfJL-m1FDqZ6YPRgAiBeBinHhNg,8181
+pip/_internal/metadata/importlib/_envs.py,sha256=7BxanCh3T7arusys__O2ZHJdnmDhQXFmfU7x1-jB5xI,7457
+pip/_internal/metadata/pkg_resources.py,sha256=WjwiNdRsvxqxL4MA5Tb5a_q3Q3sUhdpbZF8wGLtPMI0,9773
+pip/_internal/models/__init__.py,sha256=3DHUd_qxpPozfzouoqa9g9ts1Czr5qaHfFxbnxriepM,63
+pip/_internal/models/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/models/__pycache__/candidate.cpython-39.pyc,,
+pip/_internal/models/__pycache__/direct_url.cpython-39.pyc,,
+pip/_internal/models/__pycache__/format_control.cpython-39.pyc,,
+pip/_internal/models/__pycache__/index.cpython-39.pyc,,
+pip/_internal/models/__pycache__/installation_report.cpython-39.pyc,,
+pip/_internal/models/__pycache__/link.cpython-39.pyc,,
+pip/_internal/models/__pycache__/scheme.cpython-39.pyc,,
+pip/_internal/models/__pycache__/search_scope.cpython-39.pyc,,
+pip/_internal/models/__pycache__/selection_prefs.cpython-39.pyc,,
+pip/_internal/models/__pycache__/target_python.cpython-39.pyc,,
+pip/_internal/models/__pycache__/wheel.cpython-39.pyc,,
+pip/_internal/models/candidate.py,sha256=6pcABsaR7CfIHlbJbr2_kMkVJFL_yrYjTx6SVWUnCPQ,990
+pip/_internal/models/direct_url.py,sha256=f3WiKUwWPdBkT1xm7DlolS32ZAMYh3jbkkVH-BUON5A,6626
+pip/_internal/models/format_control.py,sha256=DJpMYjxeYKKQdwNcML2_F0vtAh-qnKTYe-CpTxQe-4g,2520
+pip/_internal/models/index.py,sha256=tYnL8oxGi4aSNWur0mG8DAP7rC6yuha_MwJO8xw0crI,1030
+pip/_internal/models/installation_report.py,sha256=Hymmzv9-e3WhtewYm2NIOeMyAB6lXp736mpYqb9scZ0,2617
+pip/_internal/models/link.py,sha256=nfybVSpXgVHeU0MkC8hMkN2IgMup8Pdaudg74_sQEC8,18602
+pip/_internal/models/scheme.py,sha256=3EFQp_ICu_shH1-TBqhl0QAusKCPDFOlgHFeN4XowWs,738
+pip/_internal/models/search_scope.py,sha256=iGPQQ6a4Lau8oGQ_FWj8aRLik8A21o03SMO5KnSt-Cg,4644
+pip/_internal/models/selection_prefs.py,sha256=KZdi66gsR-_RUXUr9uejssk3rmTHrQVJWeNA2sV-VSY,1907
+pip/_internal/models/target_python.py,sha256=qKpZox7J8NAaPmDs5C_aniwfPDxzvpkrCKqfwndG87k,3858
+pip/_internal/models/wheel.py,sha256=YqazoIZyma_Q1ejFa1C7NHKQRRWlvWkdK96VRKmDBeI,3600
+pip/_internal/network/__init__.py,sha256=jf6Tt5nV_7zkARBrKojIXItgejvoegVJVKUbhAa5Ioc,50
+pip/_internal/network/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/network/__pycache__/auth.cpython-39.pyc,,
+pip/_internal/network/__pycache__/cache.cpython-39.pyc,,
+pip/_internal/network/__pycache__/download.cpython-39.pyc,,
+pip/_internal/network/__pycache__/lazy_wheel.cpython-39.pyc,,
+pip/_internal/network/__pycache__/session.cpython-39.pyc,,
+pip/_internal/network/__pycache__/utils.cpython-39.pyc,,
+pip/_internal/network/__pycache__/xmlrpc.cpython-39.pyc,,
+pip/_internal/network/auth.py,sha256=MQVP0k4hUXk8ReYEfsGQ5t7_TS7cNHQuaHJuBlJLHxU,16507
+pip/_internal/network/cache.py,sha256=hgXftU-eau4MWxHSLquTMzepYq5BPC2zhCkhN3glBy8,2145
+pip/_internal/network/download.py,sha256=HvDDq9bVqaN3jcS3DyVJHP7uTqFzbShdkf7NFSoHfkw,6096
+pip/_internal/network/lazy_wheel.py,sha256=PbPyuleNhtEq6b2S7rufoGXZWMD15FAGL4XeiAQ8FxA,7638
+pip/_internal/network/session.py,sha256=BpDOJ7_Xw5VkgPYWsePzcaqOfcyRZcB2AW7W0HGBST0,18443
+pip/_internal/network/utils.py,sha256=6A5SrUJEEUHxbGtbscwU2NpCyz-3ztiDlGWHpRRhsJ8,4073
+pip/_internal/network/xmlrpc.py,sha256=AzQgG4GgS152_cqmGr_Oz2MIXsCal-xfsis7fA7nmU0,1791
+pip/_internal/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/operations/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/operations/__pycache__/check.cpython-39.pyc,,
+pip/_internal/operations/__pycache__/freeze.cpython-39.pyc,,
+pip/_internal/operations/__pycache__/prepare.cpython-39.pyc,,
+pip/_internal/operations/build/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/operations/build/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/operations/build/__pycache__/build_tracker.cpython-39.pyc,,
+pip/_internal/operations/build/__pycache__/metadata.cpython-39.pyc,,
+pip/_internal/operations/build/__pycache__/metadata_editable.cpython-39.pyc,,
+pip/_internal/operations/build/__pycache__/metadata_legacy.cpython-39.pyc,,
+pip/_internal/operations/build/__pycache__/wheel.cpython-39.pyc,,
+pip/_internal/operations/build/__pycache__/wheel_editable.cpython-39.pyc,,
+pip/_internal/operations/build/__pycache__/wheel_legacy.cpython-39.pyc,,
+pip/_internal/operations/build/build_tracker.py,sha256=vf81EwomN3xe9G8qRJED0VGqNikmRQRQoobNsxi5Xrs,4133
+pip/_internal/operations/build/metadata.py,sha256=9S0CUD8U3QqZeXp-Zyt8HxwU90lE4QrnYDgrqZDzBnc,1422
+pip/_internal/operations/build/metadata_editable.py,sha256=VLL7LvntKE8qxdhUdEJhcotFzUsOSI8NNS043xULKew,1474
+pip/_internal/operations/build/metadata_legacy.py,sha256=o-eU21As175hDC7dluM1fJJ_FqokTIShyWpjKaIpHZw,2198
+pip/_internal/operations/build/wheel.py,sha256=sT12FBLAxDC6wyrDorh8kvcZ1jG5qInCRWzzP-UkJiQ,1075
+pip/_internal/operations/build/wheel_editable.py,sha256=yOtoH6zpAkoKYEUtr8FhzrYnkNHQaQBjWQ2HYae1MQg,1417
+pip/_internal/operations/build/wheel_legacy.py,sha256=C9j6rukgQI1n_JeQLoZGuDdfUwzCXShyIdPTp6edbMQ,3064
+pip/_internal/operations/check.py,sha256=WsN7z0_QSgJjw0JsWWcqOHj4wWTaFv0J7mxgUByDCOg,5122
+pip/_internal/operations/freeze.py,sha256=mwTZ2uML8aQgo3k8MR79a7SZmmmvdAJqdyaknKbavmg,9784
+pip/_internal/operations/install/__init__.py,sha256=mX7hyD2GNBO2mFGokDQ30r_GXv7Y_PLdtxcUv144e-s,51
+pip/_internal/operations/install/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/operations/install/__pycache__/editable_legacy.cpython-39.pyc,,
+pip/_internal/operations/install/__pycache__/legacy.cpython-39.pyc,,
+pip/_internal/operations/install/__pycache__/wheel.cpython-39.pyc,,
+pip/_internal/operations/install/editable_legacy.py,sha256=ee4kfJHNuzTdKItbfAsNOSEwq_vD7DRPGkBdK48yBhU,1354
+pip/_internal/operations/install/legacy.py,sha256=cHdcHebyzf8w7OaOLwcsTNSMSSV8WBoAPFLay_9CjE8,4105
+pip/_internal/operations/install/wheel.py,sha256=CxzEg2wTPX4SxNTPIx0ozTqF1X7LhpCyP3iM2FjcKUE,27407
+pip/_internal/operations/prepare.py,sha256=BeYXrLFpRoV5XBnRXQHxRA2plyC36kK9Pms5D9wjCo4,25091
+pip/_internal/pyproject.py,sha256=QqSZR5AGwtf3HTa8NdbDq2yj9T2r9S2h9gnU4aX2Kvg,6987
+pip/_internal/req/__init__.py,sha256=rUQ9d_Sh3E5kNYqX9pkN0D06YL-LrtcbJQ-LiIonq08,2807
+pip/_internal/req/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/req/__pycache__/constructors.cpython-39.pyc,,
+pip/_internal/req/__pycache__/req_file.cpython-39.pyc,,
+pip/_internal/req/__pycache__/req_install.cpython-39.pyc,,
+pip/_internal/req/__pycache__/req_set.cpython-39.pyc,,
+pip/_internal/req/__pycache__/req_uninstall.cpython-39.pyc,,
+pip/_internal/req/constructors.py,sha256=ypjtq1mOQ3d2mFkFPMf_6Mr8SLKeHQk3tUKHA1ddG0U,16611
+pip/_internal/req/req_file.py,sha256=N6lPO3c0to_G73YyGAnk7VUYmed5jV4Qxgmt1xtlXVg,17646
+pip/_internal/req/req_install.py,sha256=X4WNQlTtvkeATwWdSiJcNLihwbYI_EnGDgE99p-Aa00,35763
+pip/_internal/req/req_set.py,sha256=j3esG0s6SzoVReX9rWn4rpYNtyET_fwxbwJPRimvRxo,2858
+pip/_internal/req/req_uninstall.py,sha256=ZFQfgSNz6H1BMsgl87nQNr2iaQCcbFcmXpW8rKVQcic,24045
+pip/_internal/resolution/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/resolution/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/resolution/__pycache__/base.cpython-39.pyc,,
+pip/_internal/resolution/base.py,sha256=qlmh325SBVfvG6Me9gc5Nsh5sdwHBwzHBq6aEXtKsLA,583
+pip/_internal/resolution/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/resolution/legacy/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/resolution/legacy/__pycache__/resolver.cpython-39.pyc,,
+pip/_internal/resolution/legacy/resolver.py,sha256=9em8D5TcSsEN4xZM1WreaRShOnyM4LlvhMSHpUPsocE,24129
+pip/_internal/resolution/resolvelib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/resolution/resolvelib/__pycache__/base.cpython-39.pyc,,
+pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-39.pyc,,
+pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-39.pyc,,
+pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-39.pyc,,
+pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-39.pyc,,
+pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-39.pyc,,
+pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-39.pyc,,
+pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-39.pyc,,
+pip/_internal/resolution/resolvelib/base.py,sha256=u1O4fkvCO4mhmu5i32xrDv9AX5NgUci_eYVyBDQhTIM,5220
+pip/_internal/resolution/resolvelib/candidates.py,sha256=6kQZeMzwibnL4lO6bW0hUQQjNEvXfADdFphRRkRvOtc,18963
+pip/_internal/resolution/resolvelib/factory.py,sha256=OnjkLIgyk5Tol7uOOqapA1D4qiRHWmPU18DF1yN5N8o,27878
+pip/_internal/resolution/resolvelib/found_candidates.py,sha256=hvL3Hoa9VaYo-qEOZkBi2Iqw251UDxPz-uMHVaWmLpE,5705
+pip/_internal/resolution/resolvelib/provider.py,sha256=Vd4jW_NnyifB-HMkPYtZIO70M3_RM0MbL5YV6XyBM-w,9914
+pip/_internal/resolution/resolvelib/reporter.py,sha256=3ZVVYrs5PqvLFJkGLcuXoMK5mTInFzl31xjUpDBpZZk,2526
+pip/_internal/resolution/resolvelib/requirements.py,sha256=B1ndvKPSuyyyTEXt9sKhbwminViSWnBrJa7qO2ln4Z0,5455
+pip/_internal/resolution/resolvelib/resolver.py,sha256=nYZ9bTFXj5c1ILKnkSgU7tUCTYyo5V5J-J0sKoA7Wzg,11533
+pip/_internal/self_outdated_check.py,sha256=pnqBuKKZQ8OxKP0MaUUiDHl3AtyoMJHHG4rMQ7YcYXY,8167
+pip/_internal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/utils/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/_log.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/appdirs.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/compat.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/compatibility_tags.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/datetime.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/deprecation.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/direct_url_helpers.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/distutils_args.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/egg_link.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/encoding.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/entrypoints.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/filesystem.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/filetypes.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/glibc.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/hashes.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/inject_securetransport.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/logging.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/misc.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/models.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/packaging.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/setuptools_build.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/subprocess.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/temp_dir.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/unpacking.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/urls.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/virtualenv.cpython-39.pyc,,
+pip/_internal/utils/__pycache__/wheel.cpython-39.pyc,,
+pip/_internal/utils/_log.py,sha256=-jHLOE_THaZz5BFcCnoSL9EYAtJ0nXem49s9of4jvKw,1015
+pip/_internal/utils/appdirs.py,sha256=swgcTKOm3daLeXTW6v5BUS2Ti2RvEnGRQYH_yDXklAo,1665
+pip/_internal/utils/compat.py,sha256=ACyBfLgj3_XG-iA5omEDrXqDM0cQKzi8h8HRBInzG6Q,1884
+pip/_internal/utils/compatibility_tags.py,sha256=ydin8QG8BHqYRsPY4OL6cmb44CbqXl1T0xxS97VhHkk,5377
+pip/_internal/utils/datetime.py,sha256=m21Y3wAtQc-ji6Veb6k_M5g6A0ZyFI4egchTdnwh-pQ,242
+pip/_internal/utils/deprecation.py,sha256=OLc7GzDwPob9y8jscDYCKUNBV-9CWwqFplBOJPLOpBM,5764
+pip/_internal/utils/direct_url_helpers.py,sha256=6F1tc2rcKaCZmgfVwsE6ObIe_Pux23mUVYA-2D9wCFc,3206
+pip/_internal/utils/distutils_args.py,sha256=bYUt4wfFJRaeGO4VHia6FNaA8HlYXMcKuEq1zYijY5g,1115
+pip/_internal/utils/egg_link.py,sha256=ZryCchR_yQSCsdsMkCpxQjjLbQxObA5GDtLG0RR5mGc,2118
+pip/_internal/utils/encoding.py,sha256=qqsXDtiwMIjXMEiIVSaOjwH5YmirCaK-dIzb6-XJsL0,1169
+pip/_internal/utils/entrypoints.py,sha256=YlhLTRl2oHBAuqhc-zmL7USS67TPWVHImjeAQHreZTQ,3064
+pip/_internal/utils/filesystem.py,sha256=RhMIXUaNVMGjc3rhsDahWQ4MavvEQDdqXqgq-F6fpw8,5122
+pip/_internal/utils/filetypes.py,sha256=i8XAQ0eFCog26Fw9yV0Yb1ygAqKYB1w9Cz9n0fj8gZU,716
+pip/_internal/utils/glibc.py,sha256=tDfwVYnJCOC0BNVpItpy8CGLP9BjkxFHdl0mTS0J7fc,3110
+pip/_internal/utils/hashes.py,sha256=1WhkVNIHNfuYLafBHThIjVKGplxFJXSlQtuG2mXNlJI,4831
+pip/_internal/utils/inject_securetransport.py,sha256=o-QRVMGiENrTJxw3fAhA7uxpdEdw6M41TjHYtSVRrcg,795
+pip/_internal/utils/logging.py,sha256=U2q0i1n8hPS2gQh8qcocAg5dovGAa_bR24akmXMzrk4,11632
+pip/_internal/utils/misc.py,sha256=XLtMDOmy8mWiNLuPIhxPdO1bWIleLdN6JnWDZsXfTgE,22253
+pip/_internal/utils/models.py,sha256=5GoYU586SrxURMvDn_jBMJInitviJg4O5-iOU-6I0WY,1193
+pip/_internal/utils/packaging.py,sha256=5Wm6_x7lKrlqVjPI5MBN_RurcRHwVYoQ7Ksrs84de7s,2108
+pip/_internal/utils/setuptools_build.py,sha256=4i3CuS34yNrkePnZ73rR47pyDzpZBo-SX9V5PNDSSHY,5662
+pip/_internal/utils/subprocess.py,sha256=0EMhgfPGFk8FZn6Qq7Hp9PN6YHuQNWiVby4DXcTCON4,9200
+pip/_internal/utils/temp_dir.py,sha256=aCX489gRa4Nu0dMKRFyGhV6maJr60uEynu5uCbKR4Qg,7702
+pip/_internal/utils/unpacking.py,sha256=SBb2iV1crb89MDRTEKY86R4A_UOWApTQn9VQVcMDOlE,8821
+pip/_internal/utils/urls.py,sha256=AhaesUGl-9it6uvG6fsFPOr9ynFpGaTMk4t5XTX7Z_Q,1759
+pip/_internal/utils/virtualenv.py,sha256=S6f7csYorRpiD6cvn3jISZYc3I8PJC43H5iMFpRAEDU,3456
+pip/_internal/utils/wheel.py,sha256=lXOgZyTlOm5HmK8tw5iw0A3_5A6wRzsXHOaQkIvvloU,4549
+pip/_internal/vcs/__init__.py,sha256=UAqvzpbi0VbZo3Ub6skEeZAw-ooIZR-zX_WpCbxyCoU,596
+pip/_internal/vcs/__pycache__/__init__.cpython-39.pyc,,
+pip/_internal/vcs/__pycache__/bazaar.cpython-39.pyc,,
+pip/_internal/vcs/__pycache__/git.cpython-39.pyc,,
+pip/_internal/vcs/__pycache__/mercurial.cpython-39.pyc,,
+pip/_internal/vcs/__pycache__/subversion.cpython-39.pyc,,
+pip/_internal/vcs/__pycache__/versioncontrol.cpython-39.pyc,,
+pip/_internal/vcs/bazaar.py,sha256=j0oin0fpGRHcCFCxEcpPCQoFEvA-DMLULKdGP8Nv76o,3519
+pip/_internal/vcs/git.py,sha256=mjhwudCx9WlLNkxZ6_kOKmueF0rLoU2i1xeASKF6yiQ,18116
+pip/_internal/vcs/mercurial.py,sha256=Bzbd518Jsx-EJI0IhIobiQqiRsUv5TWYnrmRIFWE0Gw,5238
+pip/_internal/vcs/subversion.py,sha256=vhZs8L-TNggXqM1bbhl-FpbxE3TrIB6Tgnx8fh3S2HE,11729
+pip/_internal/vcs/versioncontrol.py,sha256=KUOc-hN51em9jrqxKwUR3JnkgSE-xSOqMiiJcSaL6B8,22811
+pip/_internal/wheel_builder.py,sha256=8cObBCu4mIsMJqZM7xXI9DO3vldiAnRNa1Gt6izPPTs,13079
+pip/_vendor/__init__.py,sha256=fNxOSVD0auElsD8fN9tuq5psfgMQ-RFBtD4X5gjlRkg,4966
+pip/_vendor/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/__pycache__/six.cpython-39.pyc,,
+pip/_vendor/__pycache__/typing_extensions.cpython-39.pyc,,
+pip/_vendor/cachecontrol/__init__.py,sha256=hrxlv3q7upsfyMw8k3gQ9vagBax1pYHSGGqYlZ0Zk0M,465
+pip/_vendor/cachecontrol/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-39.pyc,,
+pip/_vendor/cachecontrol/__pycache__/adapter.cpython-39.pyc,,
+pip/_vendor/cachecontrol/__pycache__/cache.cpython-39.pyc,,
+pip/_vendor/cachecontrol/__pycache__/compat.cpython-39.pyc,,
+pip/_vendor/cachecontrol/__pycache__/controller.cpython-39.pyc,,
+pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-39.pyc,,
+pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-39.pyc,,
+pip/_vendor/cachecontrol/__pycache__/serialize.cpython-39.pyc,,
+pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-39.pyc,,
+pip/_vendor/cachecontrol/_cmd.py,sha256=lxUXqfNTVx84zf6tcWbkLZHA6WVBRtJRpfeA9ZqhaAY,1379
+pip/_vendor/cachecontrol/adapter.py,sha256=ew9OYEQHEOjvGl06ZsuX8W3DAvHWsQKHwWAxISyGug8,5033
+pip/_vendor/cachecontrol/cache.py,sha256=Tty45fOjH40fColTGkqKQvQQmbYsMpk-nCyfLcv2vG4,1535
+pip/_vendor/cachecontrol/caches/__init__.py,sha256=h-1cUmOz6mhLsjTjOrJ8iPejpGdLCyG4lzTftfGZvLg,242
+pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-39.pyc,,
+pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-39.pyc,,
+pip/_vendor/cachecontrol/caches/file_cache.py,sha256=GpexcE29LoY4MaZwPUTcUBZaDdcsjqyLxZFznk8Hbr4,5271
+pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=mp-QWonP40I3xJGK3XVO-Gs9a3UjzlqqEmp9iLJH9F4,1033
+pip/_vendor/cachecontrol/compat.py,sha256=LNx7vqBndYdHU8YuJt53ab_8rzMGTXVrvMb7CZJkxG0,778
+pip/_vendor/cachecontrol/controller.py,sha256=bAYrt7x_VH4toNpI066LQxbHpYGpY1MxxmZAhspplvw,16416
+pip/_vendor/cachecontrol/filewrapper.py,sha256=X4BAQOO26GNOR7nH_fhTzAfeuct2rBQcx_15MyFBpcs,3946
+pip/_vendor/cachecontrol/heuristics.py,sha256=8kAyuZLSCyEIgQr6vbUwfhpqg9ows4mM0IV6DWazevI,4154
+pip/_vendor/cachecontrol/serialize.py,sha256=_U1NU_C-SDgFzkbAxAsPDgMTHeTWZZaHCQnZN_jh0U8,7105
+pip/_vendor/cachecontrol/wrapper.py,sha256=X3-KMZ20Ho3VtqyVaXclpeQpFzokR5NE8tZSfvKVaB8,774
+pip/_vendor/certifi/__init__.py,sha256=bK_nm9bLJzNvWZc2oZdiTwg2KWD4HSPBWGaM0zUDvMw,94
+pip/_vendor/certifi/__main__.py,sha256=1k3Cr95vCxxGRGDljrW3wMdpZdL3Nhf0u1n-k2qdsCY,255
+pip/_vendor/certifi/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/certifi/__pycache__/__main__.cpython-39.pyc,,
+pip/_vendor/certifi/__pycache__/core.cpython-39.pyc,,
+pip/_vendor/certifi/cacert.pem,sha256=LBHDzgj_xA05AxnHK8ENT5COnGNElNZe0svFUHMf1SQ,275233
+pip/_vendor/certifi/core.py,sha256=ZwiOsv-sD_ouU1ft8wy_xZ3LQ7UbcVzyqj2XNyrsZis,4279
+pip/_vendor/chardet/__init__.py,sha256=57R-HSxj0PWmILMN0GFmUNqEMfrEVSamXyjD-W6_fbs,4797
+pip/_vendor/chardet/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/big5freq.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/big5prober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/chardistribution.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/charsetgroupprober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/charsetprober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/codingstatemachine.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/codingstatemachinedict.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/cp949prober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/enums.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/escprober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/escsm.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/eucjpprober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/euckrfreq.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/euckrprober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/euctwfreq.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/euctwprober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/gb2312freq.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/gb2312prober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/hebrewprober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/jisfreq.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/johabfreq.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/johabprober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/jpcntx.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/langbulgarianmodel.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/langgreekmodel.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/langhebrewmodel.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/langhungarianmodel.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/langrussianmodel.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/langthaimodel.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/langturkishmodel.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/latin1prober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/macromanprober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/mbcharsetprober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/mbcsgroupprober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/mbcssm.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/resultdict.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/sbcharsetprober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/sbcsgroupprober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/sjisprober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/universaldetector.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/utf1632prober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/utf8prober.cpython-39.pyc,,
+pip/_vendor/chardet/__pycache__/version.cpython-39.pyc,,
+pip/_vendor/chardet/big5freq.py,sha256=ltcfP-3PjlNHCoo5e4a7C4z-2DhBTXRfY6jbMbB7P30,31274
+pip/_vendor/chardet/big5prober.py,sha256=lPMfwCX6v2AaPgvFh_cSWZcgLDbWiFCHLZ_p9RQ9uxE,1763
+pip/_vendor/chardet/chardistribution.py,sha256=13B8XUG4oXDuLdXvfbIWwLFeR-ZU21AqTS1zcdON8bU,10032
+pip/_vendor/chardet/charsetgroupprober.py,sha256=UKK3SaIZB2PCdKSIS0gnvMtLR9JJX62M-fZJu3OlWyg,3915
+pip/_vendor/chardet/charsetprober.py,sha256=L3t8_wIOov8em-vZWOcbkdsrwe43N6_gqNh5pH7WPd4,5420
+pip/_vendor/chardet/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/chardet/cli/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/chardet/cli/__pycache__/chardetect.cpython-39.pyc,,
+pip/_vendor/chardet/cli/chardetect.py,sha256=zibMVg5RpKb-ME9_7EYG4ZM2Sf07NHcQzZ12U-rYJho,3242
+pip/_vendor/chardet/codingstatemachine.py,sha256=K7k69sw3jY5DmTXoSJQVsUtFIQKYPQVOSJJhBuGv_yE,3732
+pip/_vendor/chardet/codingstatemachinedict.py,sha256=0GY3Hi2qIZvDrOOJ3AtqppM1RsYxr_66ER4EHjuMiMc,542
+pip/_vendor/chardet/cp949prober.py,sha256=0jKRV7fECuWI16rNnks0ZECKA1iZYCIEaP8A1ZvjUSI,1860
+pip/_vendor/chardet/enums.py,sha256=TzECiZoCKNMqgwU76cPCeKWFBqaWvAdLMev5_bCkhY8,1683
+pip/_vendor/chardet/escprober.py,sha256=Kho48X65xE0scFylIdeJjM2bcbvRvv0h0WUbMWrJD3A,4006
+pip/_vendor/chardet/escsm.py,sha256=AqyXpA2FQFD7k-buBty_7itGEYkhmVa8X09NLRul3QM,12176
+pip/_vendor/chardet/eucjpprober.py,sha256=5KYaM9fsxkRYzw1b5k0fL-j_-ezIw-ij9r97a9MHxLY,3934
+pip/_vendor/chardet/euckrfreq.py,sha256=3mHuRvXfsq_QcQysDQFb8qSudvTiol71C6Ic2w57tKM,13566
+pip/_vendor/chardet/euckrprober.py,sha256=hiFT6wM174GIwRvqDsIcuOc-dDsq2uPKMKbyV8-1Xnc,1753
+pip/_vendor/chardet/euctwfreq.py,sha256=2alILE1Lh5eqiFJZjzRkMQXolNJRHY5oBQd-vmZYFFM,36913
+pip/_vendor/chardet/euctwprober.py,sha256=NxbpNdBtU0VFI0bKfGfDkpP7S2_8_6FlO87dVH0ogws,1753
+pip/_vendor/chardet/gb2312freq.py,sha256=49OrdXzD-HXqwavkqjo8Z7gvs58hONNzDhAyMENNkvY,20735
+pip/_vendor/chardet/gb2312prober.py,sha256=KPEBueaSLSvBpFeINMu0D6TgHcR90e5PaQawifzF4o0,1759
+pip/_vendor/chardet/hebrewprober.py,sha256=96T_Lj_OmW-fK7JrSHojYjyG3fsGgbzkoTNleZ3kfYE,14537
+pip/_vendor/chardet/jisfreq.py,sha256=mm8tfrwqhpOd3wzZKS4NJqkYBQVcDfTM2JiQ5aW932E,25796
+pip/_vendor/chardet/johabfreq.py,sha256=dBpOYG34GRX6SL8k_LbS9rxZPMjLjoMlgZ03Pz5Hmqc,42498
+pip/_vendor/chardet/johabprober.py,sha256=O1Qw9nVzRnun7vZp4UZM7wvJSv9W941mEU9uDMnY3DU,1752
+pip/_vendor/chardet/jpcntx.py,sha256=uhHrYWkLxE_rF5OkHKInm0HUsrjgKHHVQvtt3UcvotA,27055
+pip/_vendor/chardet/langbulgarianmodel.py,sha256=vmbvYFP8SZkSxoBvLkFqKiH1sjma5ihk3PTpdy71Rr4,104562
+pip/_vendor/chardet/langgreekmodel.py,sha256=JfB7bupjjJH2w3X_mYnQr9cJA_7EuITC2cRW13fUjeI,98484
+pip/_vendor/chardet/langhebrewmodel.py,sha256=3HXHaLQPNAGcXnJjkIJfozNZLTvTJmf4W5Awi6zRRKc,98196
+pip/_vendor/chardet/langhungarianmodel.py,sha256=WxbeQIxkv8YtApiNqxQcvj-tMycsoI4Xy-fwkDHpP_Y,101363
+pip/_vendor/chardet/langrussianmodel.py,sha256=s395bTZ87ESTrZCOdgXbEjZ9P1iGPwCl_8xSsac_DLY,128035
+pip/_vendor/chardet/langthaimodel.py,sha256=7bJlQitRpTnVGABmbSznHnJwOHDy3InkTvtFUx13WQI,102774
+pip/_vendor/chardet/langturkishmodel.py,sha256=XY0eGdTIy4eQ9Xg1LVPZacb-UBhHBR-cq0IpPVHowKc,95372
+pip/_vendor/chardet/latin1prober.py,sha256=p15EEmFbmQUwbKLC7lOJVGHEZwcG45ubEZYTGu01J5g,5380
+pip/_vendor/chardet/macromanprober.py,sha256=9anfzmY6TBfUPDyBDOdY07kqmTHpZ1tK0jL-p1JWcOY,6077
+pip/_vendor/chardet/mbcharsetprober.py,sha256=Wr04WNI4F3X_VxEverNG-H25g7u-MDDKlNt-JGj-_uU,3715
+pip/_vendor/chardet/mbcsgroupprober.py,sha256=iRpaNBjV0DNwYPu_z6TiHgRpwYahiM7ztI_4kZ4Uz9A,2131
+pip/_vendor/chardet/mbcssm.py,sha256=hUtPvDYgWDaA2dWdgLsshbwRfm3Q5YRlRogdmeRUNQw,30391
+pip/_vendor/chardet/metadata/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/chardet/metadata/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/chardet/metadata/__pycache__/languages.cpython-39.pyc,,
+pip/_vendor/chardet/metadata/languages.py,sha256=FhvBIdZFxRQ-dTwkb_0madRKgVBCaUMQz9I5xqjE5iQ,13560
+pip/_vendor/chardet/resultdict.py,sha256=ez4FRvN5KaSosJeJ2WzUyKdDdg35HDy_SSLPXKCdt5M,402
+pip/_vendor/chardet/sbcharsetprober.py,sha256=-nd3F90i7GpXLjehLVHqVBE0KlWzGvQUPETLBNn4o6U,6400
+pip/_vendor/chardet/sbcsgroupprober.py,sha256=gcgI0fOfgw_3YTClpbra_MNxwyEyJ3eUXraoLHYb59E,4137
+pip/_vendor/chardet/sjisprober.py,sha256=aqQufMzRw46ZpFlzmYaYeT2-nzmKb-hmcrApppJ862k,4007
+pip/_vendor/chardet/universaldetector.py,sha256=xYBrg4x0dd9WnT8qclfADVD9ondrUNkqPmvte1pa520,14848
+pip/_vendor/chardet/utf1632prober.py,sha256=pw1epGdMj1hDGiCu1AHqqzOEfjX8MVdiW7O1BlT8-eQ,8505
+pip/_vendor/chardet/utf8prober.py,sha256=8m08Ub5490H4jQ6LYXvFysGtgKoKsHUd2zH_i8_TnVw,2812
+pip/_vendor/chardet/version.py,sha256=lGtJcxGM44Qz4Cbk4rbbmrKxnNr1-97U25TameLehZw,244
+pip/_vendor/colorama/__init__.py,sha256=wePQA4U20tKgYARySLEC047ucNX-g8pRLpYBuiHlLb8,266
+pip/_vendor/colorama/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/colorama/__pycache__/ansi.cpython-39.pyc,,
+pip/_vendor/colorama/__pycache__/ansitowin32.cpython-39.pyc,,
+pip/_vendor/colorama/__pycache__/initialise.cpython-39.pyc,,
+pip/_vendor/colorama/__pycache__/win32.cpython-39.pyc,,
+pip/_vendor/colorama/__pycache__/winterm.cpython-39.pyc,,
+pip/_vendor/colorama/ansi.py,sha256=Top4EeEuaQdBWdteKMEcGOTeKeF19Q-Wo_6_Cj5kOzQ,2522
+pip/_vendor/colorama/ansitowin32.py,sha256=vPNYa3OZbxjbuFyaVo0Tmhmy1FZ1lKMWCnT7odXpItk,11128
+pip/_vendor/colorama/initialise.py,sha256=-hIny86ClXo39ixh5iSCfUIa2f_h_bgKRDW7gqs-KLU,3325
+pip/_vendor/colorama/tests/__init__.py,sha256=MkgPAEzGQd-Rq0w0PZXSX2LadRWhUECcisJY8lSrm4Q,75
+pip/_vendor/colorama/tests/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/colorama/tests/__pycache__/ansi_test.cpython-39.pyc,,
+pip/_vendor/colorama/tests/__pycache__/ansitowin32_test.cpython-39.pyc,,
+pip/_vendor/colorama/tests/__pycache__/initialise_test.cpython-39.pyc,,
+pip/_vendor/colorama/tests/__pycache__/isatty_test.cpython-39.pyc,,
+pip/_vendor/colorama/tests/__pycache__/utils.cpython-39.pyc,,
+pip/_vendor/colorama/tests/__pycache__/winterm_test.cpython-39.pyc,,
+pip/_vendor/colorama/tests/ansi_test.py,sha256=FeViDrUINIZcr505PAxvU4AjXz1asEiALs9GXMhwRaE,2839
+pip/_vendor/colorama/tests/ansitowin32_test.py,sha256=RN7AIhMJ5EqDsYaCjVo-o4u8JzDD4ukJbmevWKS70rY,10678
+pip/_vendor/colorama/tests/initialise_test.py,sha256=BbPy-XfyHwJ6zKozuQOvNvQZzsx9vdb_0bYXn7hsBTc,6741
+pip/_vendor/colorama/tests/isatty_test.py,sha256=Pg26LRpv0yQDB5Ac-sxgVXG7hsA1NYvapFgApZfYzZg,1866
+pip/_vendor/colorama/tests/utils.py,sha256=1IIRylG39z5-dzq09R_ngufxyPZxgldNbrxKxUGwGKE,1079
+pip/_vendor/colorama/tests/winterm_test.py,sha256=qoWFPEjym5gm2RuMwpf3pOis3a5r_PJZFCzK254JL8A,3709
+pip/_vendor/colorama/win32.py,sha256=YQOKwMTwtGBbsY4dL5HYTvwTeP9wIQra5MvPNddpxZs,6181
+pip/_vendor/colorama/winterm.py,sha256=XCQFDHjPi6AHYNdZwy0tA02H-Jh48Jp-HvCjeLeLp3U,7134
+pip/_vendor/distlib/__init__.py,sha256=acgfseOC55dNrVAzaBKpUiH3Z6V7Q1CaxsiQ3K7pC-E,581
+pip/_vendor/distlib/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/distlib/__pycache__/compat.cpython-39.pyc,,
+pip/_vendor/distlib/__pycache__/database.cpython-39.pyc,,
+pip/_vendor/distlib/__pycache__/index.cpython-39.pyc,,
+pip/_vendor/distlib/__pycache__/locators.cpython-39.pyc,,
+pip/_vendor/distlib/__pycache__/manifest.cpython-39.pyc,,
+pip/_vendor/distlib/__pycache__/markers.cpython-39.pyc,,
+pip/_vendor/distlib/__pycache__/metadata.cpython-39.pyc,,
+pip/_vendor/distlib/__pycache__/resources.cpython-39.pyc,,
+pip/_vendor/distlib/__pycache__/scripts.cpython-39.pyc,,
+pip/_vendor/distlib/__pycache__/util.cpython-39.pyc,,
+pip/_vendor/distlib/__pycache__/version.cpython-39.pyc,,
+pip/_vendor/distlib/__pycache__/wheel.cpython-39.pyc,,
+pip/_vendor/distlib/compat.py,sha256=tfoMrj6tujk7G4UC2owL6ArgDuCKabgBxuJRGZSmpko,41259
+pip/_vendor/distlib/database.py,sha256=o_mw0fAr93NDAHHHfqG54Y1Hi9Rkfrp2BX15XWZYK50,51697
+pip/_vendor/distlib/index.py,sha256=HFiDG7LMoaBs829WuotrfIwcErOOExUOR_AeBtw_TCU,20834
+pip/_vendor/distlib/locators.py,sha256=wNzG-zERzS_XGls-nBPVVyLRHa2skUlkn0-5n0trMWA,51991
+pip/_vendor/distlib/manifest.py,sha256=nQEhYmgoreaBZzyFzwYsXxJARu3fo4EkunU163U16iE,14811
+pip/_vendor/distlib/markers.py,sha256=TpHHHLgkzyT7YHbwj-2i6weRaq-Ivy2-MUnrDkjau-U,5058
+pip/_vendor/distlib/metadata.py,sha256=g_DIiu8nBXRzA-mWPRpatHGbmFZqaFoss7z9TG7QSUU,39801
+pip/_vendor/distlib/resources.py,sha256=LwbPksc0A1JMbi6XnuPdMBUn83X7BPuFNWqPGEKI698,10820
+pip/_vendor/distlib/scripts.py,sha256=BmkTKmiTk4m2cj-iueliatwz3ut_9SsABBW51vnQnZU,18102
+pip/_vendor/distlib/t32.exe,sha256=a0GV5kCoWsMutvliiCKmIgV98eRZ33wXoS-XrqvJQVs,97792
+pip/_vendor/distlib/t64-arm.exe,sha256=68TAa32V504xVBnufojh0PcenpR3U4wAqTqf-MZqbPw,182784
+pip/_vendor/distlib/t64.exe,sha256=gaYY8hy4fbkHYTTnA4i26ct8IQZzkBG2pRdy0iyuBrc,108032
+pip/_vendor/distlib/util.py,sha256=31dPXn3Rfat0xZLeVoFpuniyhe6vsbl9_QN-qd9Lhlk,66262
+pip/_vendor/distlib/version.py,sha256=WG__LyAa2GwmA6qSoEJtvJE8REA1LZpbSizy8WvhJLk,23513
+pip/_vendor/distlib/w32.exe,sha256=R4csx3-OGM9kL4aPIzQKRo5TfmRSHZo6QWyLhDhNBks,91648
+pip/_vendor/distlib/w64-arm.exe,sha256=xdyYhKj0WDcVUOCb05blQYvzdYIKMbmJn2SZvzkcey4,168448
+pip/_vendor/distlib/w64.exe,sha256=ejGf-rojoBfXseGLpya6bFTFPWRG21X5KvU8J5iU-K0,101888
+pip/_vendor/distlib/wheel.py,sha256=Rgqs658VsJ3R2845qwnZD8XQryV2CzWw2mghwLvxxsI,43898
+pip/_vendor/distro/__init__.py,sha256=2fHjF-SfgPvjyNZ1iHh_wjqWdR_Yo5ODHwZC0jLBPhc,981
+pip/_vendor/distro/__main__.py,sha256=bu9d3TifoKciZFcqRBuygV3GSuThnVD_m2IK4cz96Vs,64
+pip/_vendor/distro/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/distro/__pycache__/__main__.cpython-39.pyc,,
+pip/_vendor/distro/__pycache__/distro.cpython-39.pyc,,
+pip/_vendor/distro/distro.py,sha256=UZO1LjIhtFCMdlbiz39gj3raV-Amf3SBwzGzfApiMHw,49330
+pip/_vendor/idna/__init__.py,sha256=KJQN1eQBr8iIK5SKrJ47lXvxG0BJ7Lm38W4zT0v_8lk,849
+pip/_vendor/idna/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/idna/__pycache__/codec.cpython-39.pyc,,
+pip/_vendor/idna/__pycache__/compat.cpython-39.pyc,,
+pip/_vendor/idna/__pycache__/core.cpython-39.pyc,,
+pip/_vendor/idna/__pycache__/idnadata.cpython-39.pyc,,
+pip/_vendor/idna/__pycache__/intranges.cpython-39.pyc,,
+pip/_vendor/idna/__pycache__/package_data.cpython-39.pyc,,
+pip/_vendor/idna/__pycache__/uts46data.cpython-39.pyc,,
+pip/_vendor/idna/codec.py,sha256=6ly5odKfqrytKT9_7UrlGklHnf1DSK2r9C6cSM4sa28,3374
+pip/_vendor/idna/compat.py,sha256=0_sOEUMT4CVw9doD3vyRhX80X19PwqFoUBs7gWsFME4,321
+pip/_vendor/idna/core.py,sha256=1JxchwKzkxBSn7R_oCE12oBu3eVux0VzdxolmIad24M,12950
+pip/_vendor/idna/idnadata.py,sha256=xUjqKqiJV8Ho_XzBpAtv5JFoVPSupK-SUXvtjygUHqw,44375
+pip/_vendor/idna/intranges.py,sha256=YBr4fRYuWH7kTKS2tXlFjM24ZF1Pdvcir-aywniInqg,1881
+pip/_vendor/idna/package_data.py,sha256=C_jHJzmX8PI4xq0jpzmcTMxpb5lDsq4o5VyxQzlVrZE,21
+pip/_vendor/idna/uts46data.py,sha256=zvjZU24s58_uAS850Mcd0NnD0X7_gCMAMjzWNIeUJdc,206539
+pip/_vendor/msgpack/__init__.py,sha256=NryGaKLDk_Egd58ZxXpnuI7OWO27AXz7S6CBFRM3sAY,1132
+pip/_vendor/msgpack/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/msgpack/__pycache__/exceptions.cpython-39.pyc,,
+pip/_vendor/msgpack/__pycache__/ext.cpython-39.pyc,,
+pip/_vendor/msgpack/__pycache__/fallback.cpython-39.pyc,,
+pip/_vendor/msgpack/exceptions.py,sha256=dCTWei8dpkrMsQDcjQk74ATl9HsIBH0ybt8zOPNqMYc,1081
+pip/_vendor/msgpack/ext.py,sha256=TuldJPkYu8Wo_Xh0tFGL2l06-gY88NSR8tOje9fo2Wg,6080
+pip/_vendor/msgpack/fallback.py,sha256=OORDn86-fHBPlu-rPlMdM10KzkH6S_Rx9CHN1b7o4cg,34557
+pip/_vendor/packaging/__about__.py,sha256=ugASIO2w1oUyH8_COqQ2X_s0rDhjbhQC3yJocD03h2c,661
+pip/_vendor/packaging/__init__.py,sha256=b9Kk5MF7KxhhLgcDmiUWukN-LatWFxPdNug0joPhHSk,497
+pip/_vendor/packaging/__pycache__/__about__.cpython-39.pyc,,
+pip/_vendor/packaging/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/packaging/__pycache__/_manylinux.cpython-39.pyc,,
+pip/_vendor/packaging/__pycache__/_musllinux.cpython-39.pyc,,
+pip/_vendor/packaging/__pycache__/_structures.cpython-39.pyc,,
+pip/_vendor/packaging/__pycache__/markers.cpython-39.pyc,,
+pip/_vendor/packaging/__pycache__/requirements.cpython-39.pyc,,
+pip/_vendor/packaging/__pycache__/specifiers.cpython-39.pyc,,
+pip/_vendor/packaging/__pycache__/tags.cpython-39.pyc,,
+pip/_vendor/packaging/__pycache__/utils.cpython-39.pyc,,
+pip/_vendor/packaging/__pycache__/version.cpython-39.pyc,,
+pip/_vendor/packaging/_manylinux.py,sha256=XcbiXB-qcjv3bcohp6N98TMpOP4_j3m-iOA8ptK2GWY,11488
+pip/_vendor/packaging/_musllinux.py,sha256=_KGgY_qc7vhMGpoqss25n2hiLCNKRtvz9mCrS7gkqyc,4378
+pip/_vendor/packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431
+pip/_vendor/packaging/markers.py,sha256=AJBOcY8Oq0kYc570KuuPTkvuqjAlhufaE2c9sCUbm64,8487
+pip/_vendor/packaging/requirements.py,sha256=NtDlPBtojpn1IUC85iMjPNsUmufjpSlwnNA-Xb4m5NA,4676
+pip/_vendor/packaging/specifiers.py,sha256=LRQ0kFsHrl5qfcFNEEJrIFYsnIHQUJXY9fIsakTrrqE,30110
+pip/_vendor/packaging/tags.py,sha256=lmsnGNiJ8C4D_Pf9PbM0qgbZvD9kmB9lpZBQUZa3R_Y,15699
+pip/_vendor/packaging/utils.py,sha256=dJjeat3BS-TYn1RrUFVwufUMasbtzLfYRoy_HXENeFQ,4200
+pip/_vendor/packaging/version.py,sha256=_fLRNrFrxYcHVfyo8vk9j8s6JM8N_xsSxVFr6RJyco8,14665
+pip/_vendor/pkg_resources/__init__.py,sha256=NnpQ3g6BCHzpMgOR_OLBmYtniY4oOzdKpwqghfq_6ug,108287
+pip/_vendor/pkg_resources/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/pkg_resources/__pycache__/py31compat.cpython-39.pyc,,
+pip/_vendor/pkg_resources/py31compat.py,sha256=CRk8fkiPRDLsbi5pZcKsHI__Pbmh_94L8mr9Qy9Ab2U,562
+pip/_vendor/platformdirs/__init__.py,sha256=9iY4Z8iJDZB0djln6zHHwrPVWpB54TCygcnh--MujU0,12936
+pip/_vendor/platformdirs/__main__.py,sha256=ZmsnTxEOxtTvwa-Y_Vfab_JN3X4XCVeN8X0yyy9-qnc,1176
+pip/_vendor/platformdirs/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/platformdirs/__pycache__/__main__.cpython-39.pyc,,
+pip/_vendor/platformdirs/__pycache__/android.cpython-39.pyc,,
+pip/_vendor/platformdirs/__pycache__/api.cpython-39.pyc,,
+pip/_vendor/platformdirs/__pycache__/macos.cpython-39.pyc,,
+pip/_vendor/platformdirs/__pycache__/unix.cpython-39.pyc,,
+pip/_vendor/platformdirs/__pycache__/version.cpython-39.pyc,,
+pip/_vendor/platformdirs/__pycache__/windows.cpython-39.pyc,,
+pip/_vendor/platformdirs/android.py,sha256=GKizhyS7ESRiU67u8UnBJLm46goau9937EchXWbPBlk,4068
+pip/_vendor/platformdirs/api.py,sha256=MXKHXOL3eh_-trSok-JUTjAR_zjmmKF3rjREVABjP8s,4910
+pip/_vendor/platformdirs/macos.py,sha256=-3UXQewbT0yMhMdkzRXfXGAntmLIH7Qt4a9Hlf8I5_Y,2655
+pip/_vendor/platformdirs/unix.py,sha256=P-WQjSSieE38DXjMDa1t4XHnKJQ5idEaKT0PyXwm8KQ,6911
+pip/_vendor/platformdirs/version.py,sha256=qaN-fw_htIgKUVXoAuAEVgKxQu3tZ9qE2eiKkWIS7LA,160
+pip/_vendor/platformdirs/windows.py,sha256=LOrXLgI0CjQldDo2zhOZYGYZ6g4e_cJOCB_pF9aMRWQ,6596
+pip/_vendor/pygments/__init__.py,sha256=5oLcMLXD0cTG8YcHBPITtK1fS0JBASILEvEnWkTezgE,2999
+pip/_vendor/pygments/__main__.py,sha256=p0_rz3JZmNZMNZBOqDojaEx1cr9wmA9FQZX_TYl74lQ,353
+pip/_vendor/pygments/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/pygments/__pycache__/__main__.cpython-39.pyc,,
+pip/_vendor/pygments/__pycache__/cmdline.cpython-39.pyc,,
+pip/_vendor/pygments/__pycache__/console.cpython-39.pyc,,
+pip/_vendor/pygments/__pycache__/filter.cpython-39.pyc,,
+pip/_vendor/pygments/__pycache__/formatter.cpython-39.pyc,,
+pip/_vendor/pygments/__pycache__/lexer.cpython-39.pyc,,
+pip/_vendor/pygments/__pycache__/modeline.cpython-39.pyc,,
+pip/_vendor/pygments/__pycache__/plugin.cpython-39.pyc,,
+pip/_vendor/pygments/__pycache__/regexopt.cpython-39.pyc,,
+pip/_vendor/pygments/__pycache__/scanner.cpython-39.pyc,,
+pip/_vendor/pygments/__pycache__/sphinxext.cpython-39.pyc,,
+pip/_vendor/pygments/__pycache__/style.cpython-39.pyc,,
+pip/_vendor/pygments/__pycache__/token.cpython-39.pyc,,
+pip/_vendor/pygments/__pycache__/unistring.cpython-39.pyc,,
+pip/_vendor/pygments/__pycache__/util.cpython-39.pyc,,
+pip/_vendor/pygments/cmdline.py,sha256=rc0fah4eknRqFgn1wKNEwkq0yWnSqYOGaA4PaIeOxVY,23685
+pip/_vendor/pygments/console.py,sha256=hQfqCFuOlGk7DW2lPQYepsw-wkOH1iNt9ylNA1eRymM,1697
+pip/_vendor/pygments/filter.py,sha256=NglMmMPTRRv-zuRSE_QbWid7JXd2J4AvwjCW2yWALXU,1938
+pip/_vendor/pygments/filters/__init__.py,sha256=b5YuXB9rampSy2-cMtKxGQoMDfrG4_DcvVwZrzTlB6w,40386
+pip/_vendor/pygments/filters/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/pygments/formatter.py,sha256=6-TS2Y8pUMeWIUolWwr1O8ruC-U6HydWDwOdbAiJgJQ,2917
+pip/_vendor/pygments/formatters/__init__.py,sha256=YTqGeHS17fNXCLMZpf7oCxBCKLB9YLsZ8IAsjGhawyg,4810
+pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-39.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/bbcode.cpython-39.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/groff.cpython-39.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/html.cpython-39.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/img.cpython-39.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/irc.cpython-39.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/latex.cpython-39.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/other.cpython-39.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/pangomarkup.cpython-39.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/rtf.cpython-39.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/svg.cpython-39.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/terminal.cpython-39.pyc,,
+pip/_vendor/pygments/formatters/__pycache__/terminal256.cpython-39.pyc,,
+pip/_vendor/pygments/formatters/_mapping.py,sha256=fCZgvsM6UEuZUG7J6lr47eVss5owKd_JyaNbDfxeqmQ,4104
+pip/_vendor/pygments/formatters/bbcode.py,sha256=JrL4ITjN-KzPcuQpPMBf1pm33eW2sDUNr8WzSoAJsJA,3314
+pip/_vendor/pygments/formatters/groff.py,sha256=xrOFoLbafSA9uHsSLRogy79_Zc4GWJ8tMK2hCdTJRsw,5086
+pip/_vendor/pygments/formatters/html.py,sha256=QNt9prPgxmbKx2M-nfDwoR1bIg06-sNouQuWnE434Wc,35441
+pip/_vendor/pygments/formatters/img.py,sha256=h75Y7IRZLZxDEIwyoOsdRLTwm7kLVPbODKkgEiJ0iKI,21938
+pip/_vendor/pygments/formatters/irc.py,sha256=iwk5tDJOxbCV64SCmOFyvk__x6RD60ay0nUn7ko9n7U,5871
+pip/_vendor/pygments/formatters/latex.py,sha256=thPbytJCIs2AUXsO3NZwqKtXJ-upOlcXP4CXsx94G4w,19351
+pip/_vendor/pygments/formatters/other.py,sha256=PczqK1Rms43lz6iucOLPeBMxIncPKOGBt-195w1ynII,5073
+pip/_vendor/pygments/formatters/pangomarkup.py,sha256=ZZzMsKJKXrsDniFeMTkIpe7aQ4VZYRHu0idWmSiUJ2U,2212
+pip/_vendor/pygments/formatters/rtf.py,sha256=abrKlWjipBkQvhIICxtjYTUNv6WME0iJJObFvqVuudE,5014
+pip/_vendor/pygments/formatters/svg.py,sha256=6MM9YyO8NhU42RTQfTWBiagWMnsf9iG5gwhqSriHORE,7335
+pip/_vendor/pygments/formatters/terminal.py,sha256=NpEGvwkC6LgMLQTjVzGrJXji3XcET1sb5JCunSCzoRo,4674
+pip/_vendor/pygments/formatters/terminal256.py,sha256=4v4OVizvsxtwWBpIy_Po30zeOzE5oJg_mOc1-rCjMDk,11753
+pip/_vendor/pygments/lexer.py,sha256=ZPB_TGn_qzrXodRFwEdPzzJk6LZBo9BlfSy3lacc6zg,32005
+pip/_vendor/pygments/lexers/__init__.py,sha256=8d80-XfL5UKDCC1wRD1a_ZBZDkZ2HOe7Zul8SsnNYFE,11174
+pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-39.pyc,,
+pip/_vendor/pygments/lexers/__pycache__/python.cpython-39.pyc,,
+pip/_vendor/pygments/lexers/_mapping.py,sha256=zEiCV5FPiBioMJQJjw9kk7IJ5Y9GwknS4VJPYlcNchs,70232
+pip/_vendor/pygments/lexers/python.py,sha256=gZROs9iNSOA18YyVghP1cUCD0OwYZ04a6PCwgSOCeSA,53376
+pip/_vendor/pygments/modeline.py,sha256=gIbMSYrjSWPk0oATz7W9vMBYkUyTK2OcdVyKjioDRvA,986
+pip/_vendor/pygments/plugin.py,sha256=5rPxEoB_89qQMpOs0nI4KyLOzAHNlbQiwEMOKxqNmv8,2591
+pip/_vendor/pygments/regexopt.py,sha256=c6xcXGpGgvCET_3VWawJJqAnOp0QttFpQEdOPNY2Py0,3072
+pip/_vendor/pygments/scanner.py,sha256=F2T2G6cpkj-yZtzGQr-sOBw5w5-96UrJWveZN6va2aM,3092
+pip/_vendor/pygments/sphinxext.py,sha256=F8L0211sPnXaiWutN0lkSUajWBwlgDMIEFFAbMWOvZY,4630
+pip/_vendor/pygments/style.py,sha256=RRnussX1YiK9Z7HipIvKorImxu3-HnkdpPCO4u925T0,6257
+pip/_vendor/pygments/styles/__init__.py,sha256=iZDZ7PBKb55SpGlE1--cx9cbmWx5lVTH4bXO87t2Vok,3419
+pip/_vendor/pygments/styles/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/pygments/token.py,sha256=vA2yNHGJBHfq4jNQSah7C9DmIOp34MmYHPA8P-cYAHI,6184
+pip/_vendor/pygments/unistring.py,sha256=gP3gK-6C4oAFjjo9HvoahsqzuV4Qz0jl0E0OxfDerHI,63187
+pip/_vendor/pygments/util.py,sha256=KgwpWWC3By5AiNwxGTI7oI9aXupH2TyZWukafBJe0Mg,9110
+pip/_vendor/pyparsing/__init__.py,sha256=ZPdI7pPo4IYXcABw-51AcqOzsxVvDtqnQbyn_qYWZvo,9171
+pip/_vendor/pyparsing/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/pyparsing/__pycache__/actions.cpython-39.pyc,,
+pip/_vendor/pyparsing/__pycache__/common.cpython-39.pyc,,
+pip/_vendor/pyparsing/__pycache__/core.cpython-39.pyc,,
+pip/_vendor/pyparsing/__pycache__/exceptions.cpython-39.pyc,,
+pip/_vendor/pyparsing/__pycache__/helpers.cpython-39.pyc,,
+pip/_vendor/pyparsing/__pycache__/results.cpython-39.pyc,,
+pip/_vendor/pyparsing/__pycache__/testing.cpython-39.pyc,,
+pip/_vendor/pyparsing/__pycache__/unicode.cpython-39.pyc,,
+pip/_vendor/pyparsing/__pycache__/util.cpython-39.pyc,,
+pip/_vendor/pyparsing/actions.py,sha256=wU9i32e0y1ymxKE3OUwSHO-SFIrt1h_wv6Ws0GQjpNU,6426
+pip/_vendor/pyparsing/common.py,sha256=lFL97ooIeR75CmW5hjURZqwDCTgruqltcTCZ-ulLO2Q,12936
+pip/_vendor/pyparsing/core.py,sha256=AzTm1KFT1FIhiw2zvXZJmrpQoAwB0wOmeDCiR6SYytw,213344
+pip/_vendor/pyparsing/diagram/__init__.py,sha256=KW0PV_TvWKnL7jysz0pQbZ24nzWWu2ZfNaeyUIIywIg,23685
+pip/_vendor/pyparsing/diagram/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/pyparsing/exceptions.py,sha256=3LbSafD32NYb1Tzt85GHNkhEAU1eZkTtNSk24cPMemo,9023
+pip/_vendor/pyparsing/helpers.py,sha256=QpUOjW0-psvueMwWb9bQpU2noqKCv98_wnw1VSzSdVo,39129
+pip/_vendor/pyparsing/results.py,sha256=HgNvWVXBdQP-Q6PtJfoCEeOJk2nwEvG-2KVKC5sGA30,25341
+pip/_vendor/pyparsing/testing.py,sha256=7tu4Abp4uSeJV0N_yEPRmmNUhpd18ZQP3CrX41DM814,13402
+pip/_vendor/pyparsing/unicode.py,sha256=fwuhMj30SQ165Cv7HJpu-rSxGbRm93kN9L4Ei7VGc1Y,10787
+pip/_vendor/pyparsing/util.py,sha256=kq772O5YSeXOSdP-M31EWpbH_ayj7BMHImBYo9xPD5M,6805
+pip/_vendor/pyproject_hooks/__init__.py,sha256=kCehmy0UaBa9oVMD7ZIZrnswfnP3LXZ5lvnNJAL5JBM,491
+pip/_vendor/pyproject_hooks/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/pyproject_hooks/__pycache__/_compat.cpython-39.pyc,,
+pip/_vendor/pyproject_hooks/__pycache__/_impl.cpython-39.pyc,,
+pip/_vendor/pyproject_hooks/_compat.py,sha256=by6evrYnqkisiM-MQcvOKs5bgDMzlOSgZqRHNqf04zE,138
+pip/_vendor/pyproject_hooks/_impl.py,sha256=61GJxzQip0IInhuO69ZI5GbNQ82XEDUB_1Gg5_KtUoc,11920
+pip/_vendor/pyproject_hooks/_in_process/__init__.py,sha256=9gQATptbFkelkIy0OfWFEACzqxXJMQDWCH9rBOAZVwQ,546
+pip/_vendor/pyproject_hooks/_in_process/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/pyproject_hooks/_in_process/__pycache__/_in_process.cpython-39.pyc,,
+pip/_vendor/pyproject_hooks/_in_process/_in_process.py,sha256=m2b34c917IW5o-Q_6TYIHlsK9lSUlNiyrITTUH_zwew,10927
+pip/_vendor/requests/__init__.py,sha256=64HgJ8cke-XyNrj1ErwNq0F9SqyAThUTh5lV6m7-YkI,5178
+pip/_vendor/requests/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/requests/__pycache__/__version__.cpython-39.pyc,,
+pip/_vendor/requests/__pycache__/_internal_utils.cpython-39.pyc,,
+pip/_vendor/requests/__pycache__/adapters.cpython-39.pyc,,
+pip/_vendor/requests/__pycache__/api.cpython-39.pyc,,
+pip/_vendor/requests/__pycache__/auth.cpython-39.pyc,,
+pip/_vendor/requests/__pycache__/certs.cpython-39.pyc,,
+pip/_vendor/requests/__pycache__/compat.cpython-39.pyc,,
+pip/_vendor/requests/__pycache__/cookies.cpython-39.pyc,,
+pip/_vendor/requests/__pycache__/exceptions.cpython-39.pyc,,
+pip/_vendor/requests/__pycache__/help.cpython-39.pyc,,
+pip/_vendor/requests/__pycache__/hooks.cpython-39.pyc,,
+pip/_vendor/requests/__pycache__/models.cpython-39.pyc,,
+pip/_vendor/requests/__pycache__/packages.cpython-39.pyc,,
+pip/_vendor/requests/__pycache__/sessions.cpython-39.pyc,,
+pip/_vendor/requests/__pycache__/status_codes.cpython-39.pyc,,
+pip/_vendor/requests/__pycache__/structures.cpython-39.pyc,,
+pip/_vendor/requests/__pycache__/utils.cpython-39.pyc,,
+pip/_vendor/requests/__version__.py,sha256=h48zn-oFukaXrYHocdadp_hIszWyd_PGrS8Eiii6aoc,435
+pip/_vendor/requests/_internal_utils.py,sha256=aSPlF4uDhtfKxEayZJJ7KkAxtormeTfpwKSBSwtmAUw,1397
+pip/_vendor/requests/adapters.py,sha256=GFEz5koZaMZD86v0SHXKVB5SE9MgslEjkCQzldkNwVM,21443
+pip/_vendor/requests/api.py,sha256=dyvkDd5itC9z2g0wHl_YfD1yf6YwpGWLO7__8e21nks,6377
+pip/_vendor/requests/auth.py,sha256=h-HLlVx9j8rKV5hfSAycP2ApOSglTz77R0tz7qCbbEE,10187
+pip/_vendor/requests/certs.py,sha256=PVPooB0jP5hkZEULSCwC074532UFbR2Ptgu0I5zwmCs,575
+pip/_vendor/requests/compat.py,sha256=IhK9quyX0RRuWTNcg6d2JGSAOUbM6mym2p_2XjLTwf4,1286
+pip/_vendor/requests/cookies.py,sha256=kD3kNEcCj-mxbtf5fJsSaT86eGoEYpD3X0CSgpzl7BM,18560
+pip/_vendor/requests/exceptions.py,sha256=FA-_kVwBZ2jhXauRctN_ewHVK25b-fj0Azyz1THQ0Kk,3823
+pip/_vendor/requests/help.py,sha256=FnAAklv8MGm_qb2UilDQgS6l0cUttiCFKUjx0zn2XNA,3879
+pip/_vendor/requests/hooks.py,sha256=CiuysiHA39V5UfcCBXFIx83IrDpuwfN9RcTUgv28ftQ,733
+pip/_vendor/requests/models.py,sha256=dDZ-iThotky-Noq9yy97cUEJhr3wnY6mv-xR_ePg_lk,35288
+pip/_vendor/requests/packages.py,sha256=njJmVifY4aSctuW3PP5EFRCxjEwMRDO6J_feG2dKWsI,695
+pip/_vendor/requests/sessions.py,sha256=KUqJcRRLovNefUs7ScOXSUVCcfSayTFWtbiJ7gOSlTI,30180
+pip/_vendor/requests/status_codes.py,sha256=FvHmT5uH-_uimtRz5hH9VCbt7VV-Nei2J9upbej6j8g,4235
+pip/_vendor/requests/structures.py,sha256=-IbmhVz06S-5aPSZuUthZ6-6D9XOjRuTXHOabY041XM,2912
+pip/_vendor/requests/utils.py,sha256=0gzSOcx9Ya4liAbHnHuwt4jM78lzCZZoDFgkmsInNUg,33240
+pip/_vendor/resolvelib/__init__.py,sha256=UL-B2BDI0_TRIqkfGwLHKLxY-LjBlomz7941wDqzB1I,537
+pip/_vendor/resolvelib/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/resolvelib/__pycache__/providers.cpython-39.pyc,,
+pip/_vendor/resolvelib/__pycache__/reporters.cpython-39.pyc,,
+pip/_vendor/resolvelib/__pycache__/resolvers.cpython-39.pyc,,
+pip/_vendor/resolvelib/__pycache__/structs.cpython-39.pyc,,
+pip/_vendor/resolvelib/compat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/resolvelib/compat/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/resolvelib/compat/__pycache__/collections_abc.cpython-39.pyc,,
+pip/_vendor/resolvelib/compat/collections_abc.py,sha256=uy8xUZ-NDEw916tugUXm8HgwCGiMO0f-RcdnpkfXfOs,156
+pip/_vendor/resolvelib/providers.py,sha256=roVmFBItQJ0TkhNua65h8LdNny7rmeqVEXZu90QiP4o,5872
+pip/_vendor/resolvelib/reporters.py,sha256=fW91NKf-lK8XN7i6Yd_rczL5QeOT3sc6AKhpaTEnP3E,1583
+pip/_vendor/resolvelib/resolvers.py,sha256=2wYzVGBGerbmcIpH8cFmgSKgLSETz8jmwBMGjCBMHG4,17592
+pip/_vendor/resolvelib/structs.py,sha256=IVIYof6sA_N4ZEiE1C1UhzTX495brCNnyCdgq6CYq28,4794
+pip/_vendor/rich/__init__.py,sha256=dRxjIL-SbFVY0q3IjSMrfgBTHrm1LZDgLOygVBwiYZc,6090
+pip/_vendor/rich/__main__.py,sha256=TT8sb9PTnsnKhhrGuHkLN0jdN0dtKhtPkEr9CidDbPM,8478
+pip/_vendor/rich/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/__main__.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_cell_widths.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_emoji_codes.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_emoji_replace.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_export_format.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_extension.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_inspect.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_log_render.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_loop.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_null_file.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_palettes.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_pick.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_ratio.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_spinners.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_stack.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_timer.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_win32_console.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_windows.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_windows_renderer.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/_wrap.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/abc.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/align.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/ansi.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/bar.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/box.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/cells.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/color.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/color_triplet.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/columns.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/console.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/constrain.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/containers.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/control.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/default_styles.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/diagnose.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/emoji.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/errors.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/file_proxy.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/filesize.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/highlighter.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/json.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/jupyter.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/layout.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/live.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/live_render.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/logging.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/markup.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/measure.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/padding.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/pager.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/palette.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/panel.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/pretty.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/progress.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/progress_bar.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/prompt.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/protocol.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/region.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/repr.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/rule.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/scope.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/screen.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/segment.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/spinner.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/status.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/style.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/styled.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/syntax.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/table.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/terminal_theme.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/text.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/theme.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/themes.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/traceback.cpython-39.pyc,,
+pip/_vendor/rich/__pycache__/tree.cpython-39.pyc,,
+pip/_vendor/rich/_cell_widths.py,sha256=2n4EiJi3X9sqIq0O16kUZ_zy6UYMd3xFfChlKfnW1Hc,10096
+pip/_vendor/rich/_emoji_codes.py,sha256=hu1VL9nbVdppJrVoijVshRlcRRe_v3dju3Mmd2sKZdY,140235
+pip/_vendor/rich/_emoji_replace.py,sha256=n-kcetsEUx2ZUmhQrfeMNc-teeGhpuSQ5F8VPBsyvDo,1064
+pip/_vendor/rich/_export_format.py,sha256=nHArqOljIlYn6NruhWsAsh-fHo7oJC3y9BDJyAa-QYQ,2114
+pip/_vendor/rich/_extension.py,sha256=Xt47QacCKwYruzjDi-gOBq724JReDj9Cm9xUi5fr-34,265
+pip/_vendor/rich/_inspect.py,sha256=oZJGw31e64dwXSCmrDnvZbwVb1ZKhWfU8wI3VWohjJk,9695
+pip/_vendor/rich/_log_render.py,sha256=1ByI0PA1ZpxZY3CGJOK54hjlq4X-Bz_boIjIqCd8Kns,3225
+pip/_vendor/rich/_loop.py,sha256=hV_6CLdoPm0va22Wpw4zKqM0RYsz3TZxXj0PoS-9eDQ,1236
+pip/_vendor/rich/_null_file.py,sha256=cTaTCU_xuDXGGa9iqK-kZ0uddZCSvM-RgM2aGMuMiHs,1643
+pip/_vendor/rich/_palettes.py,sha256=cdev1JQKZ0JvlguV9ipHgznTdnvlIzUFDBb0It2PzjI,7063
+pip/_vendor/rich/_pick.py,sha256=evDt8QN4lF5CiwrUIXlOJCntitBCOsI3ZLPEIAVRLJU,423
+pip/_vendor/rich/_ratio.py,sha256=2lLSliL025Y-YMfdfGbutkQDevhcyDqc-DtUYW9mU70,5472
+pip/_vendor/rich/_spinners.py,sha256=U2r1_g_1zSjsjiUdAESc2iAMc3i4ri_S8PYP6kQ5z1I,19919
+pip/_vendor/rich/_stack.py,sha256=-C8OK7rxn3sIUdVwxZBBpeHhIzX0eI-VM3MemYfaXm0,351
+pip/_vendor/rich/_timer.py,sha256=zelxbT6oPFZnNrwWPpc1ktUeAT-Vc4fuFcRZLQGLtMI,417
+pip/_vendor/rich/_win32_console.py,sha256=P0vxI2fcndym1UU1S37XAzQzQnkyY7YqAKmxm24_gug,22820
+pip/_vendor/rich/_windows.py,sha256=dvNl9TmfPzNVxiKk5WDFihErZ5796g2UC9-KGGyfXmk,1926
+pip/_vendor/rich/_windows_renderer.py,sha256=t74ZL3xuDCP3nmTp9pH1L5LiI2cakJuQRQleHCJerlk,2783
+pip/_vendor/rich/_wrap.py,sha256=xfV_9t0Sg6rzimmrDru8fCVmUlalYAcHLDfrJZnbbwQ,1840
+pip/_vendor/rich/abc.py,sha256=ON-E-ZqSSheZ88VrKX2M3PXpFbGEUUZPMa_Af0l-4f0,890
+pip/_vendor/rich/align.py,sha256=FV6_GS-8uhIyViMng3hkIWSFaTgMohK1Oqyjl8I8mGE,10368
+pip/_vendor/rich/ansi.py,sha256=THex7-qjc82-ZRtmDPAYlVEObYOEE_ARB1692Fk-JHs,6819
+pip/_vendor/rich/bar.py,sha256=a7UD303BccRCrEhGjfMElpv5RFYIinaAhAuqYqhUvmw,3264
+pip/_vendor/rich/box.py,sha256=FJ6nI3jD7h2XNFU138bJUt2HYmWOlRbltoCEuIAZhew,9842
+pip/_vendor/rich/cells.py,sha256=zMjFI15wCpgjLR14lHdfFMVC6qMDi5OsKIB0PYZBBMk,4503
+pip/_vendor/rich/color.py,sha256=GTITgffj47On3YK1v_I5T2CPZJGSnyWipPID_YkYXqw,18015
+pip/_vendor/rich/color_triplet.py,sha256=3lhQkdJbvWPoLDO-AnYImAWmJvV5dlgYNCVZ97ORaN4,1054
+pip/_vendor/rich/columns.py,sha256=HUX0KcMm9dsKNi11fTbiM_h2iDtl8ySCaVcxlalEzq8,7131
+pip/_vendor/rich/console.py,sha256=w3tJfrILZpS359wrNqaldGmyk3PEhEmV8Pg2g2GjXWI,97992
+pip/_vendor/rich/constrain.py,sha256=1VIPuC8AgtKWrcncQrjBdYqA3JVWysu6jZo1rrh7c7Q,1288
+pip/_vendor/rich/containers.py,sha256=aKgm5UDHn5Nmui6IJaKdsZhbHClh_X7D-_Wg8Ehrr7s,5497
+pip/_vendor/rich/control.py,sha256=DSkHTUQLorfSERAKE_oTAEUFefZnZp4bQb4q8rHbKws,6630
+pip/_vendor/rich/default_styles.py,sha256=WqVh-RPNEsx0Wxf3fhS_fCn-wVqgJ6Qfo-Zg7CoCsLE,7954
+pip/_vendor/rich/diagnose.py,sha256=an6uouwhKPAlvQhYpNNpGq9EJysfMIOvvCbO3oSoR24,972
+pip/_vendor/rich/emoji.py,sha256=omTF9asaAnsM4yLY94eR_9dgRRSm1lHUszX20D1yYCQ,2501
+pip/_vendor/rich/errors.py,sha256=5pP3Kc5d4QJ_c0KFsxrfyhjiPVe7J1zOqSFbFAzcV-Y,642
+pip/_vendor/rich/file_proxy.py,sha256=4gCbGRXg0rW35Plaf0UVvj3dfENHuzc_n8I_dBqxI7o,1616
+pip/_vendor/rich/filesize.py,sha256=9fTLAPCAwHmBXdRv7KZU194jSgNrRb6Wx7RIoBgqeKY,2508
+pip/_vendor/rich/highlighter.py,sha256=3WW6PACGlq0e3YDjfqiMBQ0dYZwu7pcoFYUgJy01nb0,9585
+pip/_vendor/rich/json.py,sha256=TmeFm96Utaov-Ff5miavBPNo51HRooM8S78HEwrYEjA,5053
+pip/_vendor/rich/jupyter.py,sha256=QyoKoE_8IdCbrtiSHp9TsTSNyTHY0FO5whE7jOTd9UE,3252
+pip/_vendor/rich/layout.py,sha256=RFYL6HdCFsHf9WRpcvi3w-fpj-8O5dMZ8W96VdKNdbI,14007
+pip/_vendor/rich/live.py,sha256=emVaLUua-FKSYqZXmtJJjBIstO99CqMOuA6vMAKVkO0,14172
+pip/_vendor/rich/live_render.py,sha256=zElm3PrfSIvjOce28zETHMIUf9pFYSUA5o0AflgUP64,3667
+pip/_vendor/rich/logging.py,sha256=uB-cB-3Q4bmXDLLpbOWkmFviw-Fde39zyMV6tKJ2WHQ,11903
+pip/_vendor/rich/markup.py,sha256=xzF4uAafiEeEYDJYt_vUnJOGoTU8RrH-PH7WcWYXjCg,8198
+pip/_vendor/rich/measure.py,sha256=HmrIJX8sWRTHbgh8MxEay_83VkqNW_70s8aKP5ZcYI8,5305
+pip/_vendor/rich/padding.py,sha256=kTFGsdGe0os7tXLnHKpwTI90CXEvrceeZGCshmJy5zw,4970
+pip/_vendor/rich/pager.py,sha256=SO_ETBFKbg3n_AgOzXm41Sv36YxXAyI3_R-KOY2_uSc,828
+pip/_vendor/rich/palette.py,sha256=lInvR1ODDT2f3UZMfL1grq7dY_pDdKHw4bdUgOGaM4Y,3396
+pip/_vendor/rich/panel.py,sha256=wGMe40J8KCGgQoM0LyjRErmGIkv2bsYA71RCXThD0xE,10574
+pip/_vendor/rich/pretty.py,sha256=dAbLqSF3jJnyfBLJ7QjQ3B2J-WGyBnAdGXeuBVIyMyA,37414
+pip/_vendor/rich/progress.py,sha256=eg-OURdfZW3n3bib1-zP3SZl6cIm2VZup1pr_96CyLk,59836
+pip/_vendor/rich/progress_bar.py,sha256=cEoBfkc3lLwqba4XKsUpy4vSQKDh2QQ5J2J94-ACFoo,8165
+pip/_vendor/rich/prompt.py,sha256=x0mW-pIPodJM4ry6grgmmLrl8VZp99kqcmdnBe70YYA,11303
+pip/_vendor/rich/protocol.py,sha256=5hHHDDNHckdk8iWH5zEbi-zuIVSF5hbU2jIo47R7lTE,1391
+pip/_vendor/rich/region.py,sha256=rNT9xZrVZTYIXZC0NYn41CJQwYNbR-KecPOxTgQvB8Y,166
+pip/_vendor/rich/repr.py,sha256=eJObQe6_c5pUjRM85sZ2rrW47_iF9HT3Z8DrgVjvOl8,4436
+pip/_vendor/rich/rule.py,sha256=V6AWI0wCb6DB0rvN967FRMlQrdlG7HoZdfEAHyeG8CM,4773
+pip/_vendor/rich/scope.py,sha256=TMUU8qo17thyqQCPqjDLYpg_UU1k5qVd-WwiJvnJVas,2843
+pip/_vendor/rich/screen.py,sha256=YoeReESUhx74grqb0mSSb9lghhysWmFHYhsbMVQjXO8,1591
+pip/_vendor/rich/segment.py,sha256=6XdX0MfL18tUCaUWDWncIqx0wpq3GiaqzhYP779JvRA,24224
+pip/_vendor/rich/spinner.py,sha256=7b8MCleS4fa46HX0AzF98zfu6ZM6fAL0UgYzPOoakF4,4374
+pip/_vendor/rich/status.py,sha256=gJsIXIZeSo3urOyxRUjs6VrhX5CZrA0NxIQ-dxhCnwo,4425
+pip/_vendor/rich/style.py,sha256=odBbAlrgdEbAj7pmtPbQtWJNS8upyNhhy--Ks6KwAKk,26332
+pip/_vendor/rich/styled.py,sha256=eZNnzGrI4ki_54pgY3Oj0T-x3lxdXTYh4_ryDB24wBU,1258
+pip/_vendor/rich/syntax.py,sha256=W1xtdBA1-EVP-weYofKXusUlV5zghCOv1nWMHHfNmiY,34995
+pip/_vendor/rich/table.py,sha256=-WzesL-VJKsaiDU3uyczpJMHy6VCaSewBYJwx8RudI8,39684
+pip/_vendor/rich/terminal_theme.py,sha256=1j5-ufJfnvlAo5Qsi_ACZiXDmwMXzqgmFByObT9-yJY,3370
+pip/_vendor/rich/text.py,sha256=andXaxWW_wBveMiZZpd5viQwucWo7SPopcM3ZCQeO0c,45686
+pip/_vendor/rich/theme.py,sha256=GKNtQhDBZKAzDaY0vQVQQFzbc0uWfFe6CJXA-syT7zQ,3627
+pip/_vendor/rich/themes.py,sha256=0xgTLozfabebYtcJtDdC5QkX5IVUEaviqDUJJh4YVFk,102
+pip/_vendor/rich/traceback.py,sha256=6LkGguCEAxKv8v8xmKfMeYPPJ1UXUEHDv4726To6FiQ,26070
+pip/_vendor/rich/tree.py,sha256=BMbUYNjS9uodNPfvtY_odmU09GA5QzcMbQ5cJZhllQI,9169
+pip/_vendor/six.py,sha256=TOOfQi7nFGfMrIvtdr6wX4wyHH8M7aknmuLfo2cBBrM,34549
+pip/_vendor/tenacity/__init__.py,sha256=rjcWJVq5PcNJNC42rt-TAGGskM-RUEkZbDKu1ra7IPo,18364
+pip/_vendor/tenacity/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/tenacity/__pycache__/_asyncio.cpython-39.pyc,,
+pip/_vendor/tenacity/__pycache__/_utils.cpython-39.pyc,,
+pip/_vendor/tenacity/__pycache__/after.cpython-39.pyc,,
+pip/_vendor/tenacity/__pycache__/before.cpython-39.pyc,,
+pip/_vendor/tenacity/__pycache__/before_sleep.cpython-39.pyc,,
+pip/_vendor/tenacity/__pycache__/nap.cpython-39.pyc,,
+pip/_vendor/tenacity/__pycache__/retry.cpython-39.pyc,,
+pip/_vendor/tenacity/__pycache__/stop.cpython-39.pyc,,
+pip/_vendor/tenacity/__pycache__/tornadoweb.cpython-39.pyc,,
+pip/_vendor/tenacity/__pycache__/wait.cpython-39.pyc,,
+pip/_vendor/tenacity/_asyncio.py,sha256=HEb0BVJEeBJE9P-m9XBxh1KcaF96BwoeqkJCL5sbVcQ,3314
+pip/_vendor/tenacity/_utils.py,sha256=-y68scDcyoqvTJuJJ0GTfjdSCljEYlbCYvgk7nM4NdM,1944
+pip/_vendor/tenacity/after.py,sha256=dlmyxxFy2uqpLXDr838DiEd7jgv2AGthsWHGYcGYsaI,1496
+pip/_vendor/tenacity/before.py,sha256=7XtvRmO0dRWUp8SVn24OvIiGFj8-4OP5muQRUiWgLh0,1376
+pip/_vendor/tenacity/before_sleep.py,sha256=ThyDvqKU5yle_IvYQz_b6Tp6UjUS0PhVp6zgqYl9U6Y,1908
+pip/_vendor/tenacity/nap.py,sha256=fRWvnz1aIzbIq9Ap3gAkAZgDH6oo5zxMrU6ZOVByq0I,1383
+pip/_vendor/tenacity/retry.py,sha256=Cy504Ss3UrRV7lnYgvymF66WD1wJ2dbM869kDcjuDes,7550
+pip/_vendor/tenacity/stop.py,sha256=sKHmHaoSaW6sKu3dTxUVKr1-stVkY7lw4Y9yjZU30zQ,2790
+pip/_vendor/tenacity/tornadoweb.py,sha256=E8lWO2nwe6dJgoB-N2HhQprYLDLB_UdSgFnv-EN6wKE,2145
+pip/_vendor/tenacity/wait.py,sha256=tdLTESRm5E237VHG0SxCDXRa0DHKPKVq285kslHVURc,8011
+pip/_vendor/tomli/__init__.py,sha256=JhUwV66DB1g4Hvt1UQCVMdfCu-IgAV8FXmvDU9onxd4,396
+pip/_vendor/tomli/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/tomli/__pycache__/_parser.cpython-39.pyc,,
+pip/_vendor/tomli/__pycache__/_re.cpython-39.pyc,,
+pip/_vendor/tomli/__pycache__/_types.cpython-39.pyc,,
+pip/_vendor/tomli/_parser.py,sha256=g9-ENaALS-B8dokYpCuzUFalWlog7T-SIYMjLZSWrtM,22633
+pip/_vendor/tomli/_re.py,sha256=dbjg5ChZT23Ka9z9DHOXfdtSpPwUfdgMXnj8NOoly-w,2943
+pip/_vendor/tomli/_types.py,sha256=-GTG2VUqkpxwMqzmVO4F7ybKddIbAnuAHXfmWQcTi3Q,254
+pip/_vendor/typing_extensions.py,sha256=VKZ_nHsuzDbKOVUY2CTdavwBgfZ2EXRyluZHRzUYAbg,80114
+pip/_vendor/urllib3/__init__.py,sha256=iXLcYiJySn0GNbWOOZDDApgBL1JgP44EZ8i1760S8Mc,3333
+pip/_vendor/urllib3/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/urllib3/__pycache__/_collections.cpython-39.pyc,,
+pip/_vendor/urllib3/__pycache__/_version.cpython-39.pyc,,
+pip/_vendor/urllib3/__pycache__/connection.cpython-39.pyc,,
+pip/_vendor/urllib3/__pycache__/connectionpool.cpython-39.pyc,,
+pip/_vendor/urllib3/__pycache__/exceptions.cpython-39.pyc,,
+pip/_vendor/urllib3/__pycache__/fields.cpython-39.pyc,,
+pip/_vendor/urllib3/__pycache__/filepost.cpython-39.pyc,,
+pip/_vendor/urllib3/__pycache__/poolmanager.cpython-39.pyc,,
+pip/_vendor/urllib3/__pycache__/request.cpython-39.pyc,,
+pip/_vendor/urllib3/__pycache__/response.cpython-39.pyc,,
+pip/_vendor/urllib3/_collections.py,sha256=Rp1mVyBgc_UlAcp6M3at1skJBXR5J43NawRTvW2g_XY,10811
+pip/_vendor/urllib3/_version.py,sha256=JWE--BUVy7--9FsXILONIpQ43irftKGjT9j2H_fdF2M,64
+pip/_vendor/urllib3/connection.py,sha256=8976wL6sGeVMW0JnXvx5mD00yXu87uQjxtB9_VL8dx8,20070
+pip/_vendor/urllib3/connectionpool.py,sha256=vS4UaHLoR9_5aGLXSQ776y_jTxgqqjx0YsjkYksWGOo,39095
+pip/_vendor/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-39.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-39.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-39.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-39.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-39.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-39.pyc,,
+pip/_vendor/urllib3/contrib/_appengine_environ.py,sha256=bDbyOEhW2CKLJcQqAKAyrEHN-aklsyHFKq6vF8ZFsmk,957
+pip/_vendor/urllib3/contrib/_securetransport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-39.pyc,,
+pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-39.pyc,,
+pip/_vendor/urllib3/contrib/_securetransport/bindings.py,sha256=4Xk64qIkPBt09A5q-RIFUuDhNc9mXilVapm7WnYnzRw,17632
+pip/_vendor/urllib3/contrib/_securetransport/low_level.py,sha256=B2JBB2_NRP02xK6DCa1Pa9IuxrPwxzDzZbixQkb7U9M,13922
+pip/_vendor/urllib3/contrib/appengine.py,sha256=VR68eAVE137lxTgjBDwCna5UiBZTOKa01Aj_-5BaCz4,11036
+pip/_vendor/urllib3/contrib/ntlmpool.py,sha256=NlfkW7WMdW8ziqudopjHoW299og1BTWi0IeIibquFwk,4528
+pip/_vendor/urllib3/contrib/pyopenssl.py,sha256=hDJh4MhyY_p-oKlFcYcQaVQRDv6GMmBGuW9yjxyeejM,17081
+pip/_vendor/urllib3/contrib/securetransport.py,sha256=yhZdmVjY6PI6EeFbp7qYOp6-vp1Rkv2NMuOGaEj7pmc,34448
+pip/_vendor/urllib3/contrib/socks.py,sha256=aRi9eWXo9ZEb95XUxef4Z21CFlnnjbEiAo9HOseoMt4,7097
+pip/_vendor/urllib3/exceptions.py,sha256=0Mnno3KHTNfXRfY7638NufOPkUb6mXOm-Lqj-4x2w8A,8217
+pip/_vendor/urllib3/fields.py,sha256=kvLDCg_JmH1lLjUUEY_FLS8UhY7hBvDPuVETbY8mdrM,8579
+pip/_vendor/urllib3/filepost.py,sha256=5b_qqgRHVlL7uLtdAYBzBh-GHmU5AfJVt_2N0XS3PeY,2440
+pip/_vendor/urllib3/packages/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/urllib3/packages/__pycache__/six.cpython-39.pyc,,
+pip/_vendor/urllib3/packages/backports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-39.pyc,,
+pip/_vendor/urllib3/packages/backports/makefile.py,sha256=nbzt3i0agPVP07jqqgjhaYjMmuAi_W5E0EywZivVO8E,1417
+pip/_vendor/urllib3/packages/six.py,sha256=b9LM0wBXv7E7SrbCjAm4wwN-hrH-iNxv18LgWNMMKPo,34665
+pip/_vendor/urllib3/poolmanager.py,sha256=0KOOJECoeLYVjUHvv-0h4Oq3FFQQ2yb-Fnjkbj8gJO0,19786
+pip/_vendor/urllib3/request.py,sha256=ZFSIqX0C6WizixecChZ3_okyu7BEv0lZu1VT0s6h4SM,5985
+pip/_vendor/urllib3/response.py,sha256=fmDJAFkG71uFTn-sVSTh2Iw0WmcXQYqkbRjihvwBjU8,30641
+pip/_vendor/urllib3/util/__init__.py,sha256=JEmSmmqqLyaw8P51gUImZh8Gwg9i1zSe-DoqAitn2nc,1155
+pip/_vendor/urllib3/util/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/urllib3/util/__pycache__/connection.cpython-39.pyc,,
+pip/_vendor/urllib3/util/__pycache__/proxy.cpython-39.pyc,,
+pip/_vendor/urllib3/util/__pycache__/queue.cpython-39.pyc,,
+pip/_vendor/urllib3/util/__pycache__/request.cpython-39.pyc,,
+pip/_vendor/urllib3/util/__pycache__/response.cpython-39.pyc,,
+pip/_vendor/urllib3/util/__pycache__/retry.cpython-39.pyc,,
+pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-39.pyc,,
+pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-39.pyc,,
+pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-39.pyc,,
+pip/_vendor/urllib3/util/__pycache__/timeout.cpython-39.pyc,,
+pip/_vendor/urllib3/util/__pycache__/url.cpython-39.pyc,,
+pip/_vendor/urllib3/util/__pycache__/wait.cpython-39.pyc,,
+pip/_vendor/urllib3/util/connection.py,sha256=5Lx2B1PW29KxBn2T0xkN1CBgRBa3gGVJBKoQoRogEVk,4901
+pip/_vendor/urllib3/util/proxy.py,sha256=zUvPPCJrp6dOF0N4GAVbOcl6o-4uXKSrGiTkkr5vUS4,1605
+pip/_vendor/urllib3/util/queue.py,sha256=nRgX8_eX-_VkvxoX096QWoz8Ps0QHUAExILCY_7PncM,498
+pip/_vendor/urllib3/util/request.py,sha256=C0OUt2tcU6LRiQJ7YYNP9GvPrSvl7ziIBekQ-5nlBZk,3997
+pip/_vendor/urllib3/util/response.py,sha256=GJpg3Egi9qaJXRwBh5wv-MNuRWan5BIu40oReoxWP28,3510
+pip/_vendor/urllib3/util/retry.py,sha256=4laWh0HpwGijLiBmdBIYtbhYekQnNzzhx2W9uys0RHA,22003
+pip/_vendor/urllib3/util/ssl_.py,sha256=X4-AqW91aYPhPx6-xbf66yHFQKbqqfC_5Zt4WkLX1Hc,17177
+pip/_vendor/urllib3/util/ssl_match_hostname.py,sha256=Ir4cZVEjmAk8gUAIHWSi7wtOO83UCYABY2xFD1Ql_WA,5758
+pip/_vendor/urllib3/util/ssltransport.py,sha256=NA-u5rMTrDFDFC8QzRKUEKMG0561hOD4qBTr3Z4pv6E,6895
+pip/_vendor/urllib3/util/timeout.py,sha256=QSbBUNOB9yh6AnDn61SrLQ0hg5oz0I9-uXEG91AJuIg,10003
+pip/_vendor/urllib3/util/url.py,sha256=HLCLEKt8D-QMioTNbneZSzGTGyUkns4w_lSJP1UzE2E,14298
+pip/_vendor/urllib3/util/wait.py,sha256=fOX0_faozG2P7iVojQoE1mbydweNyTcm-hXEfFrTtLI,5403
+pip/_vendor/vendor.txt,sha256=3i3Zr7_kRDD9UEva0I8YOMroCZ8xuZ9OWd_Q4jmazqE,476
+pip/_vendor/webencodings/__init__.py,sha256=qOBJIuPy_4ByYH6W_bNgJF-qYQ2DoU-dKsDu5yRWCXg,10579
+pip/_vendor/webencodings/__pycache__/__init__.cpython-39.pyc,,
+pip/_vendor/webencodings/__pycache__/labels.cpython-39.pyc,,
+pip/_vendor/webencodings/__pycache__/mklabels.cpython-39.pyc,,
+pip/_vendor/webencodings/__pycache__/tests.cpython-39.pyc,,
+pip/_vendor/webencodings/__pycache__/x_user_defined.cpython-39.pyc,,
+pip/_vendor/webencodings/labels.py,sha256=4AO_KxTddqGtrL9ns7kAPjb0CcN6xsCIxbK37HY9r3E,8979
+pip/_vendor/webencodings/mklabels.py,sha256=GYIeywnpaLnP0GSic8LFWgd0UVvO_l1Nc6YoF-87R_4,1305
+pip/_vendor/webencodings/tests.py,sha256=OtGLyjhNY1fvkW1GvLJ_FV9ZoqC9Anyjr7q3kxTbzNs,6563
+pip/_vendor/webencodings/x_user_defined.py,sha256=yOqWSdmpytGfUgh_Z6JYgDNhoc-BAHyyeeT15Fr42tM,4307
+pip/py.typed,sha256=EBVvvPRTn_eIpz5e5QztSCdrMX7Qwd7VP93RSoIlZ2I,286

+ 0 - 0
env/Lib/site-packages/pip-23.0.1.dist-info/REQUESTED


+ 5 - 0
env/Lib/site-packages/pip-23.0.1.dist-info/WHEEL

@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.38.4)
+Root-Is-Purelib: true
+Tag: py3-none-any
+

+ 4 - 0
env/Lib/site-packages/pip-23.0.1.dist-info/entry_points.txt

@@ -0,0 +1,4 @@
+[console_scripts]
+pip = pip._internal.cli.main:main
+pip3 = pip._internal.cli.main:main
+pip3.9 = pip._internal.cli.main:main

+ 1 - 0
env/Lib/site-packages/pip-23.0.1.dist-info/top_level.txt

@@ -0,0 +1 @@
+pip

+ 13 - 0
env/Lib/site-packages/pip/__init__.py

@@ -0,0 +1,13 @@
+from typing import List, Optional
+
+__version__ = "23.0.1"
+
+
+def main(args: Optional[List[str]] = None) -> int:
+    """This is an internal API only meant for use by pip's own console scripts.
+
+    For additional details, see https://github.com/pypa/pip/issues/7498.
+    """
+    from pip._internal.utils.entrypoints import _wrapper
+
+    return _wrapper(args)

+ 31 - 0
env/Lib/site-packages/pip/__main__.py

@@ -0,0 +1,31 @@
+import os
+import sys
+import warnings
+
+# Remove '' and current working directory from the first entry
+# of sys.path, if present to avoid using current directory
+# in pip commands check, freeze, install, list and show,
+# when invoked as python -m pip <command>
+if sys.path[0] in ("", os.getcwd()):
+    sys.path.pop(0)
+
+# If we are running from a wheel, add the wheel to sys.path
+# This allows the usage python pip-*.whl/pip install pip-*.whl
+if __package__ == "":
+    # __file__ is pip-*.whl/pip/__main__.py
+    # first dirname call strips of '/__main__.py', second strips off '/pip'
+    # Resulting path is the name of the wheel itself
+    # Add that to sys.path so we can import pip
+    path = os.path.dirname(os.path.dirname(__file__))
+    sys.path.insert(0, path)
+
+if __name__ == "__main__":
+    # Work around the error reported in #9540, pending a proper fix.
+    # Note: It is essential the warning filter is set *before* importing
+    #       pip, as the deprecation happens at import time, not runtime.
+    warnings.filterwarnings(
+        "ignore", category=DeprecationWarning, module=".*packaging\\.version"
+    )
+    from pip._internal.cli.main import main as _main
+
+    sys.exit(_main())

+ 50 - 0
env/Lib/site-packages/pip/__pip-runner__.py

@@ -0,0 +1,50 @@
+"""Execute exactly this copy of pip, within a different environment.
+
+This file is named as it is, to ensure that this module can't be imported via
+an import statement.
+"""
+
+# /!\ This version compatibility check section must be Python 2 compatible. /!\
+
+import sys
+
+# Copied from setup.py
+PYTHON_REQUIRES = (3, 7)
+
+
+def version_str(version):  # type: ignore
+    return ".".join(str(v) for v in version)
+
+
+if sys.version_info[:2] < PYTHON_REQUIRES:
+    raise SystemExit(
+        "This version of pip does not support python {} (requires >={}).".format(
+            version_str(sys.version_info[:2]), version_str(PYTHON_REQUIRES)
+        )
+    )
+
+# From here on, we can use Python 3 features, but the syntax must remain
+# Python 2 compatible.
+
+import runpy  # noqa: E402
+from importlib.machinery import PathFinder  # noqa: E402
+from os.path import dirname  # noqa: E402
+
+PIP_SOURCES_ROOT = dirname(dirname(__file__))
+
+
+class PipImportRedirectingFinder:
+    @classmethod
+    def find_spec(self, fullname, path=None, target=None):  # type: ignore
+        if fullname != "pip":
+            return None
+
+        spec = PathFinder.find_spec(fullname, [PIP_SOURCES_ROOT], target)
+        assert spec, (PIP_SOURCES_ROOT, fullname)
+        return spec
+
+
+sys.meta_path.insert(0, PipImportRedirectingFinder())
+
+assert __name__ == "__main__", "Cannot run __pip-runner__.py as a non-main module"
+runpy.run_module("pip", run_name="__main__", alter_sys=True)

BIN=BIN
env/Lib/site-packages/pip/__pycache__/__init__.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/__pycache__/__main__.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/__pycache__/__pip-runner__.cpython-39.pyc


+ 19 - 0
env/Lib/site-packages/pip/_internal/__init__.py

@@ -0,0 +1,19 @@
+from typing import List, Optional
+
+import pip._internal.utils.inject_securetransport  # noqa
+from pip._internal.utils import _log
+
+# init_logging() must be called before any call to logging.getLogger()
+# which happens at import of most modules.
+_log.init_logging()
+
+
+def main(args: (Optional[List[str]]) = None) -> int:
+    """This is preserved for old console scripts that may still be referencing
+    it.
+
+    For additional details, see https://github.com/pypa/pip/issues/7498.
+    """
+    from pip._internal.utils.entrypoints import _wrapper
+
+    return _wrapper(args)

BIN=BIN
env/Lib/site-packages/pip/_internal/__pycache__/__init__.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/__pycache__/build_env.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/__pycache__/cache.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/__pycache__/configuration.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/__pycache__/exceptions.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/__pycache__/main.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/__pycache__/pyproject.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-39.pyc


+ 311 - 0
env/Lib/site-packages/pip/_internal/build_env.py

@@ -0,0 +1,311 @@
+"""Build Environment used for isolation during sdist building
+"""
+
+import logging
+import os
+import pathlib
+import site
+import sys
+import textwrap
+from collections import OrderedDict
+from types import TracebackType
+from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple, Type, Union
+
+from pip._vendor.certifi import where
+from pip._vendor.packaging.requirements import Requirement
+from pip._vendor.packaging.version import Version
+
+from pip import __file__ as pip_location
+from pip._internal.cli.spinners import open_spinner
+from pip._internal.locations import get_platlib, get_purelib, get_scheme
+from pip._internal.metadata import get_default_environment, get_environment
+from pip._internal.utils.subprocess import call_subprocess
+from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds
+
+if TYPE_CHECKING:
+    from pip._internal.index.package_finder import PackageFinder
+
+logger = logging.getLogger(__name__)
+
+
+def _dedup(a: str, b: str) -> Union[Tuple[str], Tuple[str, str]]:
+    return (a, b) if a != b else (a,)
+
+
+class _Prefix:
+    def __init__(self, path: str) -> None:
+        self.path = path
+        self.setup = False
+        scheme = get_scheme("", prefix=path)
+        self.bin_dir = scheme.scripts
+        self.lib_dirs = _dedup(scheme.purelib, scheme.platlib)
+
+
+def get_runnable_pip() -> str:
+    """Get a file to pass to a Python executable, to run the currently-running pip.
+
+    This is used to run a pip subprocess, for installing requirements into the build
+    environment.
+    """
+    source = pathlib.Path(pip_location).resolve().parent
+
+    if not source.is_dir():
+        # This would happen if someone is using pip from inside a zip file. In that
+        # case, we can use that directly.
+        return str(source)
+
+    return os.fsdecode(source / "__pip-runner__.py")
+
+
+def _get_system_sitepackages() -> Set[str]:
+    """Get system site packages
+
+    Usually from site.getsitepackages,
+    but fallback on `get_purelib()/get_platlib()` if unavailable
+    (e.g. in a virtualenv created by virtualenv<20)
+
+    Returns normalized set of strings.
+    """
+    if hasattr(site, "getsitepackages"):
+        system_sites = site.getsitepackages()
+    else:
+        # virtualenv < 20 overwrites site.py without getsitepackages
+        # fallback on get_purelib/get_platlib.
+        # this is known to miss things, but shouldn't in the cases
+        # where getsitepackages() has been removed (inside a virtualenv)
+        system_sites = [get_purelib(), get_platlib()]
+    return {os.path.normcase(path) for path in system_sites}
+
+
+class BuildEnvironment:
+    """Creates and manages an isolated environment to install build deps"""
+
+    def __init__(self) -> None:
+        temp_dir = TempDirectory(kind=tempdir_kinds.BUILD_ENV, globally_managed=True)
+
+        self._prefixes = OrderedDict(
+            (name, _Prefix(os.path.join(temp_dir.path, name)))
+            for name in ("normal", "overlay")
+        )
+
+        self._bin_dirs: List[str] = []
+        self._lib_dirs: List[str] = []
+        for prefix in reversed(list(self._prefixes.values())):
+            self._bin_dirs.append(prefix.bin_dir)
+            self._lib_dirs.extend(prefix.lib_dirs)
+
+        # Customize site to:
+        # - ensure .pth files are honored
+        # - prevent access to system site packages
+        system_sites = _get_system_sitepackages()
+
+        self._site_dir = os.path.join(temp_dir.path, "site")
+        if not os.path.exists(self._site_dir):
+            os.mkdir(self._site_dir)
+        with open(
+            os.path.join(self._site_dir, "sitecustomize.py"), "w", encoding="utf-8"
+        ) as fp:
+            fp.write(
+                textwrap.dedent(
+                    """
+                import os, site, sys
+
+                # First, drop system-sites related paths.
+                original_sys_path = sys.path[:]
+                known_paths = set()
+                for path in {system_sites!r}:
+                    site.addsitedir(path, known_paths=known_paths)
+                system_paths = set(
+                    os.path.normcase(path)
+                    for path in sys.path[len(original_sys_path):]
+                )
+                original_sys_path = [
+                    path for path in original_sys_path
+                    if os.path.normcase(path) not in system_paths
+                ]
+                sys.path = original_sys_path
+
+                # Second, add lib directories.
+                # ensuring .pth file are processed.
+                for path in {lib_dirs!r}:
+                    assert not path in sys.path
+                    site.addsitedir(path)
+                """
+                ).format(system_sites=system_sites, lib_dirs=self._lib_dirs)
+            )
+
+    def __enter__(self) -> None:
+        self._save_env = {
+            name: os.environ.get(name, None)
+            for name in ("PATH", "PYTHONNOUSERSITE", "PYTHONPATH")
+        }
+
+        path = self._bin_dirs[:]
+        old_path = self._save_env["PATH"]
+        if old_path:
+            path.extend(old_path.split(os.pathsep))
+
+        pythonpath = [self._site_dir]
+
+        os.environ.update(
+            {
+                "PATH": os.pathsep.join(path),
+                "PYTHONNOUSERSITE": "1",
+                "PYTHONPATH": os.pathsep.join(pythonpath),
+            }
+        )
+
+    def __exit__(
+        self,
+        exc_type: Optional[Type[BaseException]],
+        exc_val: Optional[BaseException],
+        exc_tb: Optional[TracebackType],
+    ) -> None:
+        for varname, old_value in self._save_env.items():
+            if old_value is None:
+                os.environ.pop(varname, None)
+            else:
+                os.environ[varname] = old_value
+
+    def check_requirements(
+        self, reqs: Iterable[str]
+    ) -> Tuple[Set[Tuple[str, str]], Set[str]]:
+        """Return 2 sets:
+        - conflicting requirements: set of (installed, wanted) reqs tuples
+        - missing requirements: set of reqs
+        """
+        missing = set()
+        conflicting = set()
+        if reqs:
+            env = (
+                get_environment(self._lib_dirs)
+                if hasattr(self, "_lib_dirs")
+                else get_default_environment()
+            )
+            for req_str in reqs:
+                req = Requirement(req_str)
+                # We're explicitly evaluating with an empty extra value, since build
+                # environments are not provided any mechanism to select specific extras.
+                if req.marker is not None and not req.marker.evaluate({"extra": ""}):
+                    continue
+                dist = env.get_distribution(req.name)
+                if not dist:
+                    missing.add(req_str)
+                    continue
+                if isinstance(dist.version, Version):
+                    installed_req_str = f"{req.name}=={dist.version}"
+                else:
+                    installed_req_str = f"{req.name}==={dist.version}"
+                if not req.specifier.contains(dist.version, prereleases=True):
+                    conflicting.add((installed_req_str, req_str))
+                # FIXME: Consider direct URL?
+        return conflicting, missing
+
+    def install_requirements(
+        self,
+        finder: "PackageFinder",
+        requirements: Iterable[str],
+        prefix_as_string: str,
+        *,
+        kind: str,
+    ) -> None:
+        prefix = self._prefixes[prefix_as_string]
+        assert not prefix.setup
+        prefix.setup = True
+        if not requirements:
+            return
+        self._install_requirements(
+            get_runnable_pip(),
+            finder,
+            requirements,
+            prefix,
+            kind=kind,
+        )
+
+    @staticmethod
+    def _install_requirements(
+        pip_runnable: str,
+        finder: "PackageFinder",
+        requirements: Iterable[str],
+        prefix: _Prefix,
+        *,
+        kind: str,
+    ) -> None:
+        args: List[str] = [
+            sys.executable,
+            pip_runnable,
+            "install",
+            "--ignore-installed",
+            "--no-user",
+            "--prefix",
+            prefix.path,
+            "--no-warn-script-location",
+        ]
+        if logger.getEffectiveLevel() <= logging.DEBUG:
+            args.append("-v")
+        for format_control in ("no_binary", "only_binary"):
+            formats = getattr(finder.format_control, format_control)
+            args.extend(
+                (
+                    "--" + format_control.replace("_", "-"),
+                    ",".join(sorted(formats or {":none:"})),
+                )
+            )
+
+        index_urls = finder.index_urls
+        if index_urls:
+            args.extend(["-i", index_urls[0]])
+            for extra_index in index_urls[1:]:
+                args.extend(["--extra-index-url", extra_index])
+        else:
+            args.append("--no-index")
+        for link in finder.find_links:
+            args.extend(["--find-links", link])
+
+        for host in finder.trusted_hosts:
+            args.extend(["--trusted-host", host])
+        if finder.allow_all_prereleases:
+            args.append("--pre")
+        if finder.prefer_binary:
+            args.append("--prefer-binary")
+        args.append("--")
+        args.extend(requirements)
+        extra_environ = {"_PIP_STANDALONE_CERT": where()}
+        with open_spinner(f"Installing {kind}") as spinner:
+            call_subprocess(
+                args,
+                command_desc=f"pip subprocess to install {kind}",
+                spinner=spinner,
+                extra_environ=extra_environ,
+            )
+
+
+class NoOpBuildEnvironment(BuildEnvironment):
+    """A no-op drop-in replacement for BuildEnvironment"""
+
+    def __init__(self) -> None:
+        pass
+
+    def __enter__(self) -> None:
+        pass
+
+    def __exit__(
+        self,
+        exc_type: Optional[Type[BaseException]],
+        exc_val: Optional[BaseException],
+        exc_tb: Optional[TracebackType],
+    ) -> None:
+        pass
+
+    def cleanup(self) -> None:
+        pass
+
+    def install_requirements(
+        self,
+        finder: "PackageFinder",
+        requirements: Iterable[str],
+        prefix_as_string: str,
+        *,
+        kind: str,
+    ) -> None:
+        raise NotImplementedError()

+ 293 - 0
env/Lib/site-packages/pip/_internal/cache.py

@@ -0,0 +1,293 @@
+"""Cache Management
+"""
+
+import hashlib
+import json
+import logging
+import os
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Set
+
+from pip._vendor.packaging.tags import Tag, interpreter_name, interpreter_version
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.exceptions import InvalidWheelFilename
+from pip._internal.models.direct_url import DirectUrl
+from pip._internal.models.format_control import FormatControl
+from pip._internal.models.link import Link
+from pip._internal.models.wheel import Wheel
+from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds
+from pip._internal.utils.urls import path_to_url
+
+logger = logging.getLogger(__name__)
+
+ORIGIN_JSON_NAME = "origin.json"
+
+
+def _hash_dict(d: Dict[str, str]) -> str:
+    """Return a stable sha224 of a dictionary."""
+    s = json.dumps(d, sort_keys=True, separators=(",", ":"), ensure_ascii=True)
+    return hashlib.sha224(s.encode("ascii")).hexdigest()
+
+
+class Cache:
+    """An abstract class - provides cache directories for data from links
+
+
+    :param cache_dir: The root of the cache.
+    :param format_control: An object of FormatControl class to limit
+        binaries being read from the cache.
+    :param allowed_formats: which formats of files the cache should store.
+        ('binary' and 'source' are the only allowed values)
+    """
+
+    def __init__(
+        self, cache_dir: str, format_control: FormatControl, allowed_formats: Set[str]
+    ) -> None:
+        super().__init__()
+        assert not cache_dir or os.path.isabs(cache_dir)
+        self.cache_dir = cache_dir or None
+        self.format_control = format_control
+        self.allowed_formats = allowed_formats
+
+        _valid_formats = {"source", "binary"}
+        assert self.allowed_formats.union(_valid_formats) == _valid_formats
+
+    def _get_cache_path_parts(self, link: Link) -> List[str]:
+        """Get parts of part that must be os.path.joined with cache_dir"""
+
+        # We want to generate an url to use as our cache key, we don't want to
+        # just re-use the URL because it might have other items in the fragment
+        # and we don't care about those.
+        key_parts = {"url": link.url_without_fragment}
+        if link.hash_name is not None and link.hash is not None:
+            key_parts[link.hash_name] = link.hash
+        if link.subdirectory_fragment:
+            key_parts["subdirectory"] = link.subdirectory_fragment
+
+        # Include interpreter name, major and minor version in cache key
+        # to cope with ill-behaved sdists that build a different wheel
+        # depending on the python version their setup.py is being run on,
+        # and don't encode the difference in compatibility tags.
+        # https://github.com/pypa/pip/issues/7296
+        key_parts["interpreter_name"] = interpreter_name()
+        key_parts["interpreter_version"] = interpreter_version()
+
+        # Encode our key url with sha224, we'll use this because it has similar
+        # security properties to sha256, but with a shorter total output (and
+        # thus less secure). However the differences don't make a lot of
+        # difference for our use case here.
+        hashed = _hash_dict(key_parts)
+
+        # We want to nest the directories some to prevent having a ton of top
+        # level directories where we might run out of sub directories on some
+        # FS.
+        parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
+
+        return parts
+
+    def _get_candidates(self, link: Link, canonical_package_name: str) -> List[Any]:
+        can_not_cache = not self.cache_dir or not canonical_package_name or not link
+        if can_not_cache:
+            return []
+
+        formats = self.format_control.get_allowed_formats(canonical_package_name)
+        if not self.allowed_formats.intersection(formats):
+            return []
+
+        candidates = []
+        path = self.get_path_for_link(link)
+        if os.path.isdir(path):
+            for candidate in os.listdir(path):
+                candidates.append((candidate, path))
+        return candidates
+
+    def get_path_for_link(self, link: Link) -> str:
+        """Return a directory to store cached items in for link."""
+        raise NotImplementedError()
+
+    def get(
+        self,
+        link: Link,
+        package_name: Optional[str],
+        supported_tags: List[Tag],
+    ) -> Link:
+        """Returns a link to a cached item if it exists, otherwise returns the
+        passed link.
+        """
+        raise NotImplementedError()
+
+
+class SimpleWheelCache(Cache):
+    """A cache of wheels for future installs."""
+
+    def __init__(self, cache_dir: str, format_control: FormatControl) -> None:
+        super().__init__(cache_dir, format_control, {"binary"})
+
+    def get_path_for_link(self, link: Link) -> str:
+        """Return a directory to store cached wheels for link
+
+        Because there are M wheels for any one sdist, we provide a directory
+        to cache them in, and then consult that directory when looking up
+        cache hits.
+
+        We only insert things into the cache if they have plausible version
+        numbers, so that we don't contaminate the cache with things that were
+        not unique. E.g. ./package might have dozens of installs done for it
+        and build a version of 0.0...and if we built and cached a wheel, we'd
+        end up using the same wheel even if the source has been edited.
+
+        :param link: The link of the sdist for which this will cache wheels.
+        """
+        parts = self._get_cache_path_parts(link)
+        assert self.cache_dir
+        # Store wheels within the root cache_dir
+        return os.path.join(self.cache_dir, "wheels", *parts)
+
+    def get(
+        self,
+        link: Link,
+        package_name: Optional[str],
+        supported_tags: List[Tag],
+    ) -> Link:
+        candidates = []
+
+        if not package_name:
+            return link
+
+        canonical_package_name = canonicalize_name(package_name)
+        for wheel_name, wheel_dir in self._get_candidates(link, canonical_package_name):
+            try:
+                wheel = Wheel(wheel_name)
+            except InvalidWheelFilename:
+                continue
+            if canonicalize_name(wheel.name) != canonical_package_name:
+                logger.debug(
+                    "Ignoring cached wheel %s for %s as it "
+                    "does not match the expected distribution name %s.",
+                    wheel_name,
+                    link,
+                    package_name,
+                )
+                continue
+            if not wheel.supported(supported_tags):
+                # Built for a different python/arch/etc
+                continue
+            candidates.append(
+                (
+                    wheel.support_index_min(supported_tags),
+                    wheel_name,
+                    wheel_dir,
+                )
+            )
+
+        if not candidates:
+            return link
+
+        _, wheel_name, wheel_dir = min(candidates)
+        return Link(path_to_url(os.path.join(wheel_dir, wheel_name)))
+
+
+class EphemWheelCache(SimpleWheelCache):
+    """A SimpleWheelCache that creates it's own temporary cache directory"""
+
+    def __init__(self, format_control: FormatControl) -> None:
+        self._temp_dir = TempDirectory(
+            kind=tempdir_kinds.EPHEM_WHEEL_CACHE,
+            globally_managed=True,
+        )
+
+        super().__init__(self._temp_dir.path, format_control)
+
+
+class CacheEntry:
+    def __init__(
+        self,
+        link: Link,
+        persistent: bool,
+    ):
+        self.link = link
+        self.persistent = persistent
+        self.origin: Optional[DirectUrl] = None
+        origin_direct_url_path = Path(self.link.file_path).parent / ORIGIN_JSON_NAME
+        if origin_direct_url_path.exists():
+            self.origin = DirectUrl.from_json(origin_direct_url_path.read_text())
+
+
+class WheelCache(Cache):
+    """Wraps EphemWheelCache and SimpleWheelCache into a single Cache
+
+    This Cache allows for gracefully degradation, using the ephem wheel cache
+    when a certain link is not found in the simple wheel cache first.
+    """
+
+    def __init__(
+        self, cache_dir: str, format_control: Optional[FormatControl] = None
+    ) -> None:
+        if format_control is None:
+            format_control = FormatControl()
+        super().__init__(cache_dir, format_control, {"binary"})
+        self._wheel_cache = SimpleWheelCache(cache_dir, format_control)
+        self._ephem_cache = EphemWheelCache(format_control)
+
+    def get_path_for_link(self, link: Link) -> str:
+        return self._wheel_cache.get_path_for_link(link)
+
+    def get_ephem_path_for_link(self, link: Link) -> str:
+        return self._ephem_cache.get_path_for_link(link)
+
+    def get(
+        self,
+        link: Link,
+        package_name: Optional[str],
+        supported_tags: List[Tag],
+    ) -> Link:
+        cache_entry = self.get_cache_entry(link, package_name, supported_tags)
+        if cache_entry is None:
+            return link
+        return cache_entry.link
+
+    def get_cache_entry(
+        self,
+        link: Link,
+        package_name: Optional[str],
+        supported_tags: List[Tag],
+    ) -> Optional[CacheEntry]:
+        """Returns a CacheEntry with a link to a cached item if it exists or
+        None. The cache entry indicates if the item was found in the persistent
+        or ephemeral cache.
+        """
+        retval = self._wheel_cache.get(
+            link=link,
+            package_name=package_name,
+            supported_tags=supported_tags,
+        )
+        if retval is not link:
+            return CacheEntry(retval, persistent=True)
+
+        retval = self._ephem_cache.get(
+            link=link,
+            package_name=package_name,
+            supported_tags=supported_tags,
+        )
+        if retval is not link:
+            return CacheEntry(retval, persistent=False)
+
+        return None
+
+    @staticmethod
+    def record_download_origin(cache_dir: str, download_info: DirectUrl) -> None:
+        origin_path = Path(cache_dir) / ORIGIN_JSON_NAME
+        if origin_path.is_file():
+            origin = DirectUrl.from_json(origin_path.read_text())
+            # TODO: use DirectUrl.equivalent when https://github.com/pypa/pip/pull/10564
+            # is merged.
+            if origin.url != download_info.url:
+                logger.warning(
+                    "Origin URL %s in cache entry %s does not match download URL %s. "
+                    "This is likely a pip bug or a cache corruption issue.",
+                    origin.url,
+                    cache_dir,
+                    download_info.url,
+                )
+        origin_path.write_text(download_info.to_json(), encoding="utf-8")

+ 4 - 0
env/Lib/site-packages/pip/_internal/cli/__init__.py

@@ -0,0 +1,4 @@
+"""Subpackage containing all of pip's command line interface related code
+"""
+
+# This file intentionally does not import submodules

BIN=BIN
env/Lib/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/cli/__pycache__/main.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/cli/__pycache__/parser.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-39.pyc


BIN=BIN
env/Lib/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-39.pyc


+ 171 - 0
env/Lib/site-packages/pip/_internal/cli/autocompletion.py

@@ -0,0 +1,171 @@
+"""Logic that powers autocompletion installed by ``pip completion``.
+"""
+
+import optparse
+import os
+import sys
+from itertools import chain
+from typing import Any, Iterable, List, Optional
+
+from pip._internal.cli.main_parser import create_main_parser
+from pip._internal.commands import commands_dict, create_command
+from pip._internal.metadata import get_default_environment
+
+
+def autocomplete() -> None:
+    """Entry Point for completion of main and subcommand options."""
+    # Don't complete if user hasn't sourced bash_completion file.
+    if "PIP_AUTO_COMPLETE" not in os.environ:
+        return
+    cwords = os.environ["COMP_WORDS"].split()[1:]
+    cword = int(os.environ["COMP_CWORD"])
+    try:
+        current = cwords[cword - 1]
+    except IndexError:
+        current = ""
+
+    parser = create_main_parser()
+    subcommands = list(commands_dict)
+    options = []
+
+    # subcommand
+    subcommand_name: Optional[str] = None
+    for word in cwords:
+        if word in subcommands:
+            subcommand_name = word
+            break
+    # subcommand options
+    if subcommand_name is not None:
+        # special case: 'help' subcommand has no options
+        if subcommand_name == "help":
+            sys.exit(1)
+        # special case: list locally installed dists for show and uninstall
+        should_list_installed = not current.startswith("-") and subcommand_name in [
+            "show",
+            "uninstall",
+        ]
+        if should_list_installed:
+            env = get_default_environment()
+            lc = current.lower()
+            installed = [
+                dist.canonical_name
+                for dist in env.iter_installed_distributions(local_only=True)
+                if dist.canonical_name.startswith(lc)
+                and dist.canonical_name not in cwords[1:]
+            ]
+            # if there are no dists installed, fall back to option completion
+            if installed:
+                for dist in installed:
+                    print(dist)
+                sys.exit(1)
+
+        should_list_installables = (
+            not current.startswith("-") and subcommand_name == "install"
+        )
+        if should_list_installables:
+            for path in auto_complete_paths(current, "path"):
+                print(path)
+            sys.exit(1)
+
+        subcommand = create_command(subcommand_name)
+
+        for opt in subcommand.parser.option_list_all:
+            if opt.help != optparse.SUPPRESS_HELP:
+                for opt_str in opt._long_opts + opt._short_opts:
+                    options.append((opt_str, opt.nargs))
+
+        # filter out previously specified options from available options
+        prev_opts = [x.split("=")[0] for x in cwords[1 : cword - 1]]
+        options = [(x, v) for (x, v) in options if x not in prev_opts]
+        # filter options by current input
+        options = [(k, v) for k, v in options if k.startswith(current)]
+        # get completion type given cwords and available subcommand options
+        completion_type = get_path_completion_type(
+            cwords,
+            cword,
+            subcommand.parser.option_list_all,
+        )
+        # get completion files and directories if ``completion_type`` is
+        # ``<file>``, ``<dir>`` or ``<path>``
+        if completion_type:
+            paths = auto_complete_paths(current, completion_type)
+            options = [(path, 0) for path in paths]
+        for option in options:
+            opt_label = option[0]
+            # append '=' to options which require args
+            if option[1] and option[0][:2] == "--":
+                opt_label += "="
+            print(opt_label)
+    else:
+        # show main parser options only when necessary
+
+        opts = [i.option_list for i in parser.option_groups]
+        opts.append(parser.option_list)
+        flattened_opts = chain.from_iterable(opts)
+        if current.startswith("-"):
+            for opt in flattened_opts:
+                if opt.help != optparse.SUPPRESS_HELP:
+                    subcommands += opt._long_opts + opt._short_opts
+        else:
+            # get completion type given cwords and all available options
+            completion_type = get_path_completion_type(cwords, cword, flattened_opts)
+            if completion_type:
+                subcommands = list(auto_complete_paths(current, completion_type))
+
+        print(" ".join([x for x in subcommands if x.startswith(current)]))
+    sys.exit(1)
+
+
+def get_path_completion_type(
+    cwords: List[str], cword: int, opts: Iterable[Any]
+) -> Optional[str]:
+    """Get the type of path completion (``file``, ``dir``, ``path`` or None)
+
+    :param cwords: same as the environmental variable ``COMP_WORDS``
+    :param cword: same as the environmental variable ``COMP_CWORD``
+    :param opts: The available options to check
+    :return: path completion type (``file``, ``dir``, ``path`` or None)
+    """
+    if cword < 2 or not cwords[cword - 2].startswith("-"):
+        return None
+    for opt in opts:
+        if opt.help == optparse.SUPPRESS_HELP:
+            continue
+        for o in str(opt).split("/"):
+            if cwords[cword - 2].split("=")[0] == o:
+                if not opt.metavar or any(
+                    x in ("path", "file", "dir") for x in opt.metavar.split("/")
+                ):
+                    return opt.metavar
+    return None
+
+
+def auto_complete_paths(current: str, completion_type: str) -> Iterable[str]:
+    """If ``completion_type`` is ``file`` or ``path``, list all regular files
+    and directories starting with ``current``; otherwise only list directories
+    starting with ``current``.
+
+    :param current: The word to be completed
+    :param completion_type: path completion type(``file``, ``path`` or ``dir``)
+    :return: A generator of regular files and/or directories
+    """
+    directory, filename = os.path.split(current)
+    current_path = os.path.abspath(directory)
+    # Don't complete paths if they can't be accessed
+    if not os.access(current_path, os.R_OK):
+        return
+    filename = os.path.normcase(filename)
+    # list all files that start with ``filename``
+    file_list = (
+        x for x in os.listdir(current_path) if os.path.normcase(x).startswith(filename)
+    )
+    for f in file_list:
+        opt = os.path.join(current_path, f)
+        comp_file = os.path.normcase(os.path.join(directory, f))
+        # complete regular files when there is not ``<dir>`` after option
+        # complete directories when there is ``<file>``, ``<path>`` or
+        # ``<dir>``after option
+        if completion_type != "dir" and os.path.isfile(opt):
+            yield comp_file
+        elif os.path.isdir(opt):
+            yield os.path.join(comp_file, "")

+ 216 - 0
env/Lib/site-packages/pip/_internal/cli/base_command.py

@@ -0,0 +1,216 @@
+"""Base Command class, and related routines"""
+
+import functools
+import logging
+import logging.config
+import optparse
+import os
+import sys
+import traceback
+from optparse import Values
+from typing import Any, Callable, List, Optional, Tuple
+
+from pip._vendor.rich import traceback as rich_traceback
+
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.command_context import CommandContextMixIn
+from pip._internal.cli.parser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
+from pip._internal.cli.status_codes import (
+    ERROR,
+    PREVIOUS_BUILD_DIR_ERROR,
+    UNKNOWN_ERROR,
+    VIRTUALENV_NOT_FOUND,
+)
+from pip._internal.exceptions import (
+    BadCommand,
+    CommandError,
+    DiagnosticPipError,
+    InstallationError,
+    NetworkConnectionError,
+    PreviousBuildDirError,
+    UninstallationError,
+)
+from pip._internal.utils.filesystem import check_path_owner
+from pip._internal.utils.logging import BrokenStdoutLoggingError, setup_logging
+from pip._internal.utils.misc import get_prog, normalize_path
+from pip._internal.utils.temp_dir import TempDirectoryTypeRegistry as TempDirRegistry
+from pip._internal.utils.temp_dir import global_tempdir_manager, tempdir_registry
+from pip._internal.utils.virtualenv import running_under_virtualenv
+
+__all__ = ["Command"]
+
+logger = logging.getLogger(__name__)
+
+
+class Command(CommandContextMixIn):
+    usage: str = ""
+    ignore_require_venv: bool = False
+
+    def __init__(self, name: str, summary: str, isolated: bool = False) -> None:
+        super().__init__()
+
+        self.name = name
+        self.summary = summary
+        self.parser = ConfigOptionParser(
+            usage=self.usage,
+            prog=f"{get_prog()} {name}",
+            formatter=UpdatingDefaultsHelpFormatter(),
+            add_help_option=False,
+            name=name,
+            description=self.__doc__,
+            isolated=isolated,
+        )
+
+        self.tempdir_registry: Optional[TempDirRegistry] = None
+
+        # Commands should add options to this option group
+        optgroup_name = f"{self.name.capitalize()} Options"
+        self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
+
+        # Add the general options
+        gen_opts = cmdoptions.make_option_group(
+            cmdoptions.general_group,
+            self.parser,
+        )
+        self.parser.add_option_group(gen_opts)
+
+        self.add_options()
+
+    def add_options(self) -> None:
+        pass
+
+    def handle_pip_version_check(self, options: Values) -> None:
+        """
+        This is a no-op so that commands by default do not do the pip version
+        check.
+        """
+        # Make sure we do the pip version check if the index_group options
+        # are present.
+        assert not hasattr(options, "no_index")
+
+    def run(self, options: Values, args: List[str]) -> int:
+        raise NotImplementedError
+
+    def parse_args(self, args: List[str]) -> Tuple[Values, List[str]]:
+        # factored out for testability
+        return self.parser.parse_args(args)
+
+    def main(self, args: List[str]) -> int:
+        try:
+            with self.main_context():
+                return self._main(args)
+        finally:
+            logging.shutdown()
+
+    def _main(self, args: List[str]) -> int:
+        # We must initialize this before the tempdir manager, otherwise the
+        # configuration would not be accessible by the time we clean up the
+        # tempdir manager.
+        self.tempdir_registry = self.enter_context(tempdir_registry())
+        # Intentionally set as early as possible so globally-managed temporary
+        # directories are available to the rest of the code.
+        self.enter_context(global_tempdir_manager())
+
+        options, args = self.parse_args(args)
+
+        # Set verbosity so that it can be used elsewhere.
+        self.verbosity = options.verbose - options.quiet
+
+        level_number = setup_logging(
+            verbosity=self.verbosity,
+            no_color=options.no_color,
+            user_log_file=options.log,
+        )
+
+        # TODO: Try to get these passing down from the command?
+        #       without resorting to os.environ to hold these.
+        #       This also affects isolated builds and it should.
+
+        if options.no_input:
+            os.environ["PIP_NO_INPUT"] = "1"
+
+        if options.exists_action:
+            os.environ["PIP_EXISTS_ACTION"] = " ".join(options.exists_action)
+
+        if options.require_venv and not self.ignore_require_venv:
+            # If a venv is required check if it can really be found
+            if not running_under_virtualenv():
+                logger.critical("Could not find an activated virtualenv (required).")
+                sys.exit(VIRTUALENV_NOT_FOUND)
+
+        if options.cache_dir:
+            options.cache_dir = normalize_path(options.cache_dir)
+            if not check_path_owner(options.cache_dir):
+                logger.warning(
+                    "The directory '%s' or its parent directory is not owned "
+                    "or is not writable by the current user. The cache "
+                    "has been disabled. Check the permissions and owner of "
+                    "that directory. If executing pip with sudo, you should "
+                    "use sudo's -H flag.",
+                    options.cache_dir,
+                )
+                options.cache_dir = None
+
+        def intercepts_unhandled_exc(
+            run_func: Callable[..., int]
+        ) -> Callable[..., int]:
+            @functools.wraps(run_func)
+            def exc_logging_wrapper(*args: Any) -> int:
+                try:
+                    status = run_func(*args)
+                    assert isinstance(status, int)
+                    return status
+                except DiagnosticPipError as exc:
+                    logger.error("[present-rich] %s", exc)
+                    logger.debug("Exception information:", exc_info=True)
+
+                    return ERROR
+                except PreviousBuildDirError as exc:
+                    logger.critical(str(exc))
+                    logger.debug("Exception information:", exc_info=True)
+
+                    return PREVIOUS_BUILD_DIR_ERROR
+                except (
+                    InstallationError,
+                    UninstallationError,
+                    BadCommand,
+                    NetworkConnectionError,
+                ) as exc:
+                    logger.critical(str(exc))
+                    logger.debug("Exception information:", exc_info=True)
+
+                    return ERROR
+                except CommandError as exc:
+                    logger.critical("%s", exc)
+                    logger.debug("Exception information:", exc_info=True)
+
+                    return ERROR
+                except BrokenStdoutLoggingError:
+                    # Bypass our logger and write any remaining messages to
+                    # stderr because stdout no longer works.
+                    print("ERROR: Pipe to stdout was broken", file=sys.stderr)
+                    if level_number <= logging.DEBUG:
+                        traceback.print_exc(file=sys.stderr)
+
+                    return ERROR
+                except KeyboardInterrupt:
+                    logger.critical("Operation cancelled by user")
+                    logger.debug("Exception information:", exc_info=True)
+
+                    return ERROR
+                except BaseException:
+                    logger.critical("Exception:", exc_info=True)
+
+                    return UNKNOWN_ERROR
+
+            return exc_logging_wrapper
+
+        try:
+            if not options.debug_mode:
+                run = intercepts_unhandled_exc(self.run)
+            else:
+                run = self.run
+                rich_traceback.install(show_locals=True)
+            return run(options, args)
+        finally:
+            self.handle_pip_version_check(options)

+ 1055 - 0
env/Lib/site-packages/pip/_internal/cli/cmdoptions.py

@@ -0,0 +1,1055 @@
+"""
+shared options and groups
+
+The principle here is to define options once, but *not* instantiate them
+globally. One reason being that options with action='append' can carry state
+between parses. pip parses general options twice internally, and shouldn't
+pass on state. To be consistent, all options will follow this design.
+"""
+
+# The following comment should be removed at some point in the future.
+# mypy: strict-optional=False
+
+import importlib.util
+import logging
+import os
+import textwrap
+from functools import partial
+from optparse import SUPPRESS_HELP, Option, OptionGroup, OptionParser, Values
+from textwrap import dedent
+from typing import Any, Callable, Dict, Optional, Tuple
+
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.cli.parser import ConfigOptionParser
+from pip._internal.exceptions import CommandError
+from pip._internal.locations import USER_CACHE_DIR, get_src_prefix
+from pip._internal.models.format_control import FormatControl
+from pip._internal.models.index import PyPI
+from pip._internal.models.target_python import TargetPython
+from pip._internal.utils.hashes import STRONG_HASHES
+from pip._internal.utils.misc import strtobool
+
+logger = logging.getLogger(__name__)
+
+
+def raise_option_error(parser: OptionParser, option: Option, msg: str) -> None:
+    """
+    Raise an option parsing error using parser.error().
+
+    Args:
+      parser: an OptionParser instance.
+      option: an Option instance.
+      msg: the error text.
+    """
+    msg = f"{option} error: {msg}"
+    msg = textwrap.fill(" ".join(msg.split()))
+    parser.error(msg)
+
+
+def make_option_group(group: Dict[str, Any], parser: ConfigOptionParser) -> OptionGroup:
+    """
+    Return an OptionGroup object
+    group  -- assumed to be dict with 'name' and 'options' keys
+    parser -- an optparse Parser
+    """
+    option_group = OptionGroup(parser, group["name"])
+    for option in group["options"]:
+        option_group.add_option(option())
+    return option_group
+
+
+def check_dist_restriction(options: Values, check_target: bool = False) -> None:
+    """Function for determining if custom platform options are allowed.
+
+    :param options: The OptionParser options.
+    :param check_target: Whether or not to check if --target is being used.
+    """
+    dist_restriction_set = any(
+        [
+            options.python_version,
+            options.platforms,
+            options.abis,
+            options.implementation,
+        ]
+    )
+
+    binary_only = FormatControl(set(), {":all:"})
+    sdist_dependencies_allowed = (
+        options.format_control != binary_only and not options.ignore_dependencies
+    )
+
+    # Installations or downloads using dist restrictions must not combine
+    # source distributions and dist-specific wheels, as they are not
+    # guaranteed to be locally compatible.
+    if dist_restriction_set and sdist_dependencies_allowed:
+        raise CommandError(
+            "When restricting platform and interpreter constraints using "
+            "--python-version, --platform, --abi, or --implementation, "
+            "either --no-deps must be set, or --only-binary=:all: must be "
+            "set and --no-binary must not be set (or must be set to "
+            ":none:)."
+        )
+
+    if check_target:
+        if dist_restriction_set and not options.target_dir:
+            raise CommandError(
+                "Can not use any platform or abi specific options unless "
+                "installing via '--target'"
+            )
+
+
+def _path_option_check(option: Option, opt: str, value: str) -> str:
+    return os.path.expanduser(value)
+
+
+def _package_name_option_check(option: Option, opt: str, value: str) -> str:
+    return canonicalize_name(value)
+
+
+class PipOption(Option):
+    TYPES = Option.TYPES + ("path", "package_name")
+    TYPE_CHECKER = Option.TYPE_CHECKER.copy()
+    TYPE_CHECKER["package_name"] = _package_name_option_check
+    TYPE_CHECKER["path"] = _path_option_check
+
+
+###########
+# options #
+###########
+
+help_: Callable[..., Option] = partial(
+    Option,
+    "-h",
+    "--help",
+    dest="help",
+    action="help",
+    help="Show help.",
+)
+
+debug_mode: Callable[..., Option] = partial(
+    Option,
+    "--debug",
+    dest="debug_mode",
+    action="store_true",
+    default=False,
+    help=(
+        "Let unhandled exceptions propagate outside the main subroutine, "
+        "instead of logging them to stderr."
+    ),
+)
+
+isolated_mode: Callable[..., Option] = partial(
+    Option,
+    "--isolated",
+    dest="isolated_mode",
+    action="store_true",
+    default=False,
+    help=(
+        "Run pip in an isolated mode, ignoring environment variables and user "
+        "configuration."
+    ),
+)
+
+require_virtualenv: Callable[..., Option] = partial(
+    Option,
+    "--require-virtualenv",
+    "--require-venv",
+    dest="require_venv",
+    action="store_true",
+    default=False,
+    help=(
+        "Allow pip to only run in a virtual environment; "
+        "exit with an error otherwise."
+    ),
+)
+
+override_externally_managed: Callable[..., Option] = partial(
+    Option,
+    "--break-system-packages",
+    dest="override_externally_managed",
+    action="store_true",
+    help="Allow pip to modify an EXTERNALLY-MANAGED Python installation",
+)
+
+python: Callable[..., Option] = partial(
+    Option,
+    "--python",
+    dest="python",
+    help="Run pip with the specified Python interpreter.",
+)
+
+verbose: Callable[..., Option] = partial(
+    Option,
+    "-v",
+    "--verbose",
+    dest="verbose",
+    action="count",
+    default=0,
+    help="Give more output. Option is additive, and can be used up to 3 times.",
+)
+
+no_color: Callable[..., Option] = partial(
+    Option,
+    "--no-color",
+    dest="no_color",
+    action="store_true",
+    default=False,
+    help="Suppress colored output.",
+)
+
+version: Callable[..., Option] = partial(
+    Option,
+    "-V",
+    "--version",
+    dest="version",
+    action="store_true",
+    help="Show version and exit.",
+)
+
+quiet: Callable[..., Option] = partial(
+    Option,
+    "-q",
+    "--quiet",
+    dest="quiet",
+    action="count",
+    default=0,
+    help=(
+        "Give less output. Option is additive, and can be used up to 3"
+        " times (corresponding to WARNING, ERROR, and CRITICAL logging"
+        " levels)."
+    ),
+)
+
+progress_bar: Callable[..., Option] = partial(
+    Option,
+    "--progress-bar",
+    dest="progress_bar",
+    type="choice",
+    choices=["on", "off"],
+    default="on",
+    help="Specify whether the progress bar should be used [on, off] (default: on)",
+)
+
+log: Callable[..., Option] = partial(
+    PipOption,
+    "--log",
+    "--log-file",
+    "--local-log",
+    dest="log",
+    metavar="path",
+    type="path",
+    help="Path to a verbose appending log.",
+)
+
+no_input: Callable[..., Option] = partial(
+    Option,
+    # Don't ask for input
+    "--no-input",
+    dest="no_input",
+    action="store_true",
+    default=False,
+    help="Disable prompting for input.",
+)
+
+proxy: Callable[..., Option] = partial(
+    Option,
+    "--proxy",
+    dest="proxy",
+    type="str",
+    default="",
+    help="Specify a proxy in the form scheme://[user:passwd@]proxy.server:port.",
+)
+
+retries: Callable[..., Option] = partial(
+    Option,
+    "--retries",
+    dest="retries",
+    type="int",
+    default=5,
+    help="Maximum number of retries each connection should attempt "
+    "(default %default times).",
+)
+
+timeout: Callable[..., Option] = partial(
+    Option,
+    "--timeout",
+    "--default-timeout",
+    metavar="sec",
+    dest="timeout",
+    type="float",
+    default=15,
+    help="Set the socket timeout (default %default seconds).",
+)
+
+
+def exists_action() -> Option:
+    return Option(
+        # Option when path already exist
+        "--exists-action",
+        dest="exists_action",
+        type="choice",
+        choices=["s", "i", "w", "b", "a"],
+        default=[],
+        action="append",
+        metavar="action",
+        help="Default action when a path already exists: "
+        "(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort.",
+    )
+
+
+cert: Callable[..., Option] = partial(
+    PipOption,
+    "--cert",
+    dest="cert",
+    type="path",
+    metavar="path",
+    help=(
+        "Path to PEM-encoded CA certificate bundle. "
+        "If provided, overrides the default. "
+        "See 'SSL Certificate Verification' in pip documentation "
+        "for more information."
+    ),
+)
+
+client_cert: Callable[..., Option] = partial(
+    PipOption,
+    "--client-cert",
+    dest="client_cert",
+    type="path",
+    default=None,
+    metavar="path",
+    help="Path to SSL client certificate, a single file containing the "
+    "private key and the certificate in PEM format.",
+)
+
+index_url: Callable[..., Option] = partial(
+    Option,
+    "-i",
+    "--index-url",
+    "--pypi-url",
+    dest="index_url",
+    metavar="URL",
+    default=PyPI.simple_url,
+    help="Base URL of the Python Package Index (default %default). "
+    "This should point to a repository compliant with PEP 503 "
+    "(the simple repository API) or a local directory laid out "
+    "in the same format.",
+)
+
+
+def extra_index_url() -> Option:
+    return Option(
+        "--extra-index-url",
+        dest="extra_index_urls",
+        metavar="URL",
+        action="append",
+        default=[],
+        help="Extra URLs of package indexes to use in addition to "
+        "--index-url. Should follow the same rules as "
+        "--index-url.",
+    )
+
+
+no_index: Callable[..., Option] = partial(
+    Option,
+    "--no-index",
+    dest="no_index",
+    action="store_true",
+    default=False,
+    help="Ignore package index (only looking at --find-links URLs instead).",
+)
+
+
+def find_links() -> Option:
+    return Option(
+        "-f",
+        "--find-links",
+        dest="find_links",
+        action="append",
+        default=[],
+        metavar="url",
+        help="If a URL or path to an html file, then parse for links to "
+        "archives such as sdist (.tar.gz) or wheel (.whl) files. "
+        "If a local path or file:// URL that's a directory, "
+        "then look for archives in the directory listing. "
+        "Links to VCS project URLs are not supported.",
+    )
+
+
+def trusted_host() -> Option:
+    return Option(
+        "--trusted-host",
+        dest="trusted_hosts",
+        action="append",
+        metavar="HOSTNAME",
+        default=[],
+        help="Mark this host or host:port pair as trusted, even though it "
+        "does not have valid or any HTTPS.",
+    )
+
+
+def constraints() -> Option:
+    return Option(
+        "-c",
+        "--constraint",
+        dest="constraints",
+        action="append",
+        default=[],
+        metavar="file",
+        help="Constrain versions using the given constraints file. "
+        "This option can be used multiple times.",
+    )
+
+
+def requirements() -> Option:
+    return Option(
+        "-r",
+        "--requirement",
+        dest="requirements",
+        action="append",
+        default=[],
+        metavar="file",
+        help="Install from the given requirements file. "
+        "This option can be used multiple times.",
+    )
+
+
+def editable() -> Option:
+    return Option(
+        "-e",
+        "--editable",
+        dest="editables",
+        action="append",
+        default=[],
+        metavar="path/url",
+        help=(
+            "Install a project in editable mode (i.e. setuptools "
+            '"develop mode") from a local project path or a VCS url.'
+        ),
+    )
+
+
+def _handle_src(option: Option, opt_str: str, value: str, parser: OptionParser) -> None:
+    value = os.path.abspath(value)
+    setattr(parser.values, option.dest, value)
+
+
+src: Callable[..., Option] = partial(
+    PipOption,
+    "--src",
+    "--source",
+    "--source-dir",
+    "--source-directory",
+    dest="src_dir",
+    type="path",
+    metavar="dir",
+    default=get_src_prefix(),
+    action="callback",
+    callback=_handle_src,
+    help="Directory to check out editable projects into. "
+    'The default in a virtualenv is "<venv path>/src". '
+    'The default for global installs is "<current dir>/src".',
+)
+
+
+def _get_format_control(values: Values, option: Option) -> Any:
+    """Get a format_control object."""
+    return getattr(values, option.dest)
+
+
+def _handle_no_binary(
+    option: Option, opt_str: str, value: str, parser: OptionParser
+) -> None:
+    existing = _get_format_control(parser.values, option)
+    FormatControl.handle_mutual_excludes(
+        value,
+        existing.no_binary,
+        existing.only_binary,
+    )
+
+
+def _handle_only_binary(
+    option: Option, opt_str: str, value: str, parser: OptionParser
+) -> None:
+    existing = _get_format_control(parser.values, option)
+    FormatControl.handle_mutual_excludes(
+        value,
+        existing.only_binary,
+        existing.no_binary,
+    )
+
+
+def no_binary() -> Option:
+    format_control = FormatControl(set(), set())
+    return Option(
+        "--no-binary",
+        dest="format_control",
+        action="callback",
+        callback=_handle_no_binary,
+        type="str",
+        default=format_control,
+        help="Do not use binary packages. Can be supplied multiple times, and "
+        'each time adds to the existing value. Accepts either ":all:" to '
+        'disable all binary packages, ":none:" to empty the set (notice '
+        "the colons), or one or more package names with commas between "
+        "them (no colons). Note that some packages are tricky to compile "
+        "and may fail to install when this option is used on them.",
+    )
+
+
+def only_binary() -> Option:
+    format_control = FormatControl(set(), set())
+    return Option(
+        "--only-binary",
+        dest="format_control",
+        action="callback",
+        callback=_handle_only_binary,
+        type="str",
+        default=format_control,
+        help="Do not use source packages. Can be supplied multiple times, and "
+        'each time adds to the existing value. Accepts either ":all:" to '
+        'disable all source packages, ":none:" to empty the set, or one '
+        "or more package names with commas between them. Packages "
+        "without binary distributions will fail to install when this "
+        "option is used on them.",
+    )
+
+
+platforms: Callable[..., Option] = partial(
+    Option,
+    "--platform",
+    dest="platforms",
+    metavar="platform",
+    action="append",
+    default=None,
+    help=(
+        "Only use wheels compatible with <platform>. Defaults to the "
+        "platform of the running system. Use this option multiple times to "
+        "specify multiple platforms supported by the target interpreter."
+    ),
+)
+
+
+# This was made a separate function for unit-testing purposes.
+def _convert_python_version(value: str) -> Tuple[Tuple[int, ...], Optional[str]]:
+    """
+    Convert a version string like "3", "37", or "3.7.3" into a tuple of ints.
+
+    :return: A 2-tuple (version_info, error_msg), where `error_msg` is
+        non-None if and only if there was a parsing error.
+    """
+    if not value:
+        # The empty string is the same as not providing a value.
+        return (None, None)
+
+    parts = value.split(".")
+    if len(parts) > 3:
+        return ((), "at most three version parts are allowed")
+
+    if len(parts) == 1:
+        # Then we are in the case of "3" or "37".
+        value = parts[0]
+        if len(value) > 1:
+            parts = [value[0], value[1:]]
+
+    try:
+        version_info = tuple(int(part) for part in parts)
+    except ValueError:
+        return ((), "each version part must be an integer")
+
+    return (version_info, None)
+
+
+def _handle_python_version(
+    option: Option, opt_str: str, value: str, parser: OptionParser
+) -> None:
+    """
+    Handle a provided --python-version value.
+    """
+    version_info, error_msg = _convert_python_version(value)
+    if error_msg is not None:
+        msg = "invalid --python-version value: {!r}: {}".format(
+            value,
+            error_msg,
+        )
+        raise_option_error(parser, option=option, msg=msg)
+
+    parser.values.python_version = version_info
+
+
+python_version: Callable[..., Option] = partial(
+    Option,
+    "--python-version",
+    dest="python_version",
+    metavar="python_version",
+    action="callback",
+    callback=_handle_python_version,
+    type="str",
+    default=None,
+    help=dedent(
+        """\
+    The Python interpreter version to use for wheel and "Requires-Python"
+    compatibility checks. Defaults to a version derived from the running
+    interpreter. The version can be specified using up to three dot-separated
+    integers (e.g. "3" for 3.0.0, "3.7" for 3.7.0, or "3.7.3"). A major-minor
+    version can also be given as a string without dots (e.g. "37" for 3.7.0).
+    """
+    ),
+)
+
+
+implementation: Callable[..., Option] = partial(
+    Option,
+    "--implementation",
+    dest="implementation",
+    metavar="implementation",
+    default=None,
+    help=(
+        "Only use wheels compatible with Python "
+        "implementation <implementation>, e.g. 'pp', 'jy', 'cp', "
+        " or 'ip'. If not specified, then the current "
+        "interpreter implementation is used.  Use 'py' to force "
+        "implementation-agnostic wheels."
+    ),
+)
+
+
+abis: Callable[..., Option] = partial(
+    Option,
+    "--abi",
+    dest="abis",
+    metavar="abi",
+    action="append",
+    default=None,
+    help=(
+        "Only use wheels compatible with Python abi <abi>, e.g. 'pypy_41'. "
+        "If not specified, then the current interpreter abi tag is used. "
+        "Use this option multiple times to specify multiple abis supported "
+        "by the target interpreter. Generally you will need to specify "
+        "--implementation, --platform, and --python-version when using this "
+        "option."
+    ),
+)
+
+
+def add_target_python_options(cmd_opts: OptionGroup) -> None:
+    cmd_opts.add_option(platforms())
+    cmd_opts.add_option(python_version())
+    cmd_opts.add_option(implementation())
+    cmd_opts.add_option(abis())
+
+
+def make_target_python(options: Values) -> TargetPython:
+    target_python = TargetPython(
+        platforms=options.platforms,
+        py_version_info=options.python_version,
+        abis=options.abis,
+        implementation=options.implementation,
+    )
+
+    return target_python
+
+
+def prefer_binary() -> Option:
+    return Option(
+        "--prefer-binary",
+        dest="prefer_binary",
+        action="store_true",
+        default=False,
+        help="Prefer older binary packages over newer source packages.",
+    )
+
+
+cache_dir: Callable[..., Option] = partial(
+    PipOption,
+    "--cache-dir",
+    dest="cache_dir",
+    default=USER_CACHE_DIR,
+    metavar="dir",
+    type="path",
+    help="Store the cache data in <dir>.",
+)
+
+
+def _handle_no_cache_dir(
+    option: Option, opt: str, value: str, parser: OptionParser
+) -> None:
+    """
+    Process a value provided for the --no-cache-dir option.
+
+    This is an optparse.Option callback for the --no-cache-dir option.
+    """
+    # The value argument will be None if --no-cache-dir is passed via the
+    # command-line, since the option doesn't accept arguments.  However,
+    # the value can be non-None if the option is triggered e.g. by an
+    # environment variable, like PIP_NO_CACHE_DIR=true.
+    if value is not None:
+        # Then parse the string value to get argument error-checking.
+        try:
+            strtobool(value)
+        except ValueError as exc:
+            raise_option_error(parser, option=option, msg=str(exc))
+
+    # Originally, setting PIP_NO_CACHE_DIR to a value that strtobool()
+    # converted to 0 (like "false" or "no") caused cache_dir to be disabled
+    # rather than enabled (logic would say the latter).  Thus, we disable
+    # the cache directory not just on values that parse to True, but (for
+    # backwards compatibility reasons) also on values that parse to False.
+    # In other words, always set it to False if the option is provided in
+    # some (valid) form.
+    parser.values.cache_dir = False
+
+
+no_cache: Callable[..., Option] = partial(
+    Option,
+    "--no-cache-dir",
+    dest="cache_dir",
+    action="callback",
+    callback=_handle_no_cache_dir,
+    help="Disable the cache.",
+)
+
+no_deps: Callable[..., Option] = partial(
+    Option,
+    "--no-deps",
+    "--no-dependencies",
+    dest="ignore_dependencies",
+    action="store_true",
+    default=False,
+    help="Don't install package dependencies.",
+)
+
+ignore_requires_python: Callable[..., Option] = partial(
+    Option,
+    "--ignore-requires-python",
+    dest="ignore_requires_python",
+    action="store_true",
+    help="Ignore the Requires-Python information.",
+)
+
+no_build_isolation: Callable[..., Option] = partial(
+    Option,
+    "--no-build-isolation",
+    dest="build_isolation",
+    action="store_false",
+    default=True,
+    help="Disable isolation when building a modern source distribution. "
+    "Build dependencies specified by PEP 518 must be already installed "
+    "if this option is used.",
+)
+
+check_build_deps: Callable[..., Option] = partial(
+    Option,
+    "--check-build-dependencies",
+    dest="check_build_deps",
+    action="store_true",
+    default=False,
+    help="Check the build dependencies when PEP517 is used.",
+)
+
+
+def _handle_no_use_pep517(
+    option: Option, opt: str, value: str, parser: OptionParser
+) -> None:
+    """
+    Process a value provided for the --no-use-pep517 option.
+
+    This is an optparse.Option callback for the no_use_pep517 option.
+    """
+    # Since --no-use-pep517 doesn't accept arguments, the value argument
+    # will be None if --no-use-pep517 is passed via the command-line.
+    # However, the value can be non-None if the option is triggered e.g.
+    # by an environment variable, for example "PIP_NO_USE_PEP517=true".
+    if value is not None:
+        msg = """A value was passed for --no-use-pep517,
+        probably using either the PIP_NO_USE_PEP517 environment variable
+        or the "no-use-pep517" config file option. Use an appropriate value
+        of the PIP_USE_PEP517 environment variable or the "use-pep517"
+        config file option instead.
+        """
+        raise_option_error(parser, option=option, msg=msg)
+
+    # If user doesn't wish to use pep517, we check if setuptools is installed
+    # and raise error if it is not.
+    if not importlib.util.find_spec("setuptools"):
+        msg = "It is not possible to use --no-use-pep517 without setuptools installed."
+        raise_option_error(parser, option=option, msg=msg)
+
+    # Otherwise, --no-use-pep517 was passed via the command-line.
+    parser.values.use_pep517 = False
+
+
+use_pep517: Any = partial(
+    Option,
+    "--use-pep517",
+    dest="use_pep517",
+    action="store_true",
+    default=None,
+    help="Use PEP 517 for building source distributions "
+    "(use --no-use-pep517 to force legacy behaviour).",
+)
+
+no_use_pep517: Any = partial(
+    Option,
+    "--no-use-pep517",
+    dest="use_pep517",
+    action="callback",
+    callback=_handle_no_use_pep517,
+    default=None,
+    help=SUPPRESS_HELP,
+)
+
+
+def _handle_config_settings(
+    option: Option, opt_str: str, value: str, parser: OptionParser
+) -> None:
+    key, sep, val = value.partition("=")
+    if sep != "=":
+        parser.error(f"Arguments to {opt_str} must be of the form KEY=VAL")  # noqa
+    dest = getattr(parser.values, option.dest)
+    if dest is None:
+        dest = {}
+        setattr(parser.values, option.dest, dest)
+    dest[key] = val
+
+
+config_settings: Callable[..., Option] = partial(
+    Option,
+    "--config-settings",
+    dest="config_settings",
+    type=str,
+    action="callback",
+    callback=_handle_config_settings,
+    metavar="settings",
+    help="Configuration settings to be passed to the PEP 517 build backend. "
+    "Settings take the form KEY=VALUE. Use multiple --config-settings options "
+    "to pass multiple keys to the backend.",
+)
+
+install_options: Callable[..., Option] = partial(
+    Option,
+    "--install-option",
+    dest="install_options",
+    action="append",
+    metavar="options",
+    help="This option is deprecated. Using this option with location-changing "
+    "options may cause unexpected behavior. "
+    "Use pip-level options like --user, --prefix, --root, and --target.",
+)
+
+build_options: Callable[..., Option] = partial(
+    Option,
+    "--build-option",
+    dest="build_options",
+    metavar="options",
+    action="append",
+    help="Extra arguments to be supplied to 'setup.py bdist_wheel'.",
+)
+
+global_options: Callable[..., Option] = partial(
+    Option,
+    "--global-option",
+    dest="global_options",
+    action="append",
+    metavar="options",
+    help="Extra global options to be supplied to the setup.py "
+    "call before the install or bdist_wheel command.",
+)
+
+no_clean: Callable[..., Option] = partial(
+    Option,
+    "--no-clean",
+    action="store_true",
+    default=False,
+    help="Don't clean up build directories.",
+)
+
+pre: Callable[..., Option] = partial(
+    Option,
+    "--pre",
+    action="store_true",
+    default=False,
+    help="Include pre-release and development versions. By default, "
+    "pip only finds stable versions.",
+)
+
+disable_pip_version_check: Callable[..., Option] = partial(
+    Option,
+    "--disable-pip-version-check",
+    dest="disable_pip_version_check",
+    action="store_true",
+    default=False,
+    help="Don't periodically check PyPI to determine whether a new version "
+    "of pip is available for download. Implied with --no-index.",
+)
+
+root_user_action: Callable[..., Option] = partial(
+    Option,
+    "--root-user-action",
+    dest="root_user_action",
+    default="warn",
+    choices=["warn", "ignore"],
+    help="Action if pip is run as a root user. By default, a warning message is shown.",
+)
+
+
+def _handle_merge_hash(
+    option: Option, opt_str: str, value: str, parser: OptionParser
+) -> None:
+    """Given a value spelled "algo:digest", append the digest to a list
+    pointed to in a dict by the algo name."""
+    if not parser.values.hashes:
+        parser.values.hashes = {}
+    try:
+        algo, digest = value.split(":", 1)
+    except ValueError:
+        parser.error(
+            "Arguments to {} must be a hash name "  # noqa
+            "followed by a value, like --hash=sha256:"
+            "abcde...".format(opt_str)
+        )
+    if algo not in STRONG_HASHES:
+        parser.error(
+            "Allowed hash algorithms for {} are {}.".format(  # noqa
+                opt_str, ", ".join(STRONG_HASHES)
+            )
+        )
+    parser.values.hashes.setdefault(algo, []).append(digest)
+
+
+hash: Callable[..., Option] = partial(
+    Option,
+    "--hash",
+    # Hash values eventually end up in InstallRequirement.hashes due to
+    # __dict__ copying in process_line().
+    dest="hashes",
+    action="callback",
+    callback=_handle_merge_hash,
+    type="string",
+    help="Verify that the package's archive matches this "
+    "hash before installing. Example: --hash=sha256:abcdef...",
+)
+
+
+require_hashes: Callable[..., Option] = partial(
+    Option,
+    "--require-hashes",
+    dest="require_hashes",
+    action="store_true",
+    default=False,
+    help="Require a hash to check each requirement against, for "
+    "repeatable installs. This option is implied when any package in a "
+    "requirements file has a --hash option.",
+)
+
+
+list_path: Callable[..., Option] = partial(
+    PipOption,
+    "--path",
+    dest="path",
+    type="path",
+    action="append",
+    help="Restrict to the specified installation path for listing "
+    "packages (can be used multiple times).",
+)
+
+
+def check_list_path_option(options: Values) -> None:
+    if options.path and (options.user or options.local):
+        raise CommandError("Cannot combine '--path' with '--user' or '--local'")
+
+
+list_exclude: Callable[..., Option] = partial(
+    PipOption,
+    "--exclude",
+    dest="excludes",
+    action="append",
+    metavar="package",
+    type="package_name",
+    help="Exclude specified package from the output",
+)
+
+
+no_python_version_warning: Callable[..., Option] = partial(
+    Option,
+    "--no-python-version-warning",
+    dest="no_python_version_warning",
+    action="store_true",
+    default=False,
+    help="Silence deprecation warnings for upcoming unsupported Pythons.",
+)
+
+
+use_new_feature: Callable[..., Option] = partial(
+    Option,
+    "--use-feature",
+    dest="features_enabled",
+    metavar="feature",
+    action="append",
+    default=[],
+    choices=[
+        "fast-deps",
+        "truststore",
+        "no-binary-enable-wheel-cache",
+    ],
+    help="Enable new functionality, that may be backward incompatible.",
+)
+
+use_deprecated_feature: Callable[..., Option] = partial(
+    Option,
+    "--use-deprecated",
+    dest="deprecated_features_enabled",
+    metavar="feature",
+    action="append",
+    default=[],
+    choices=[
+        "legacy-resolver",
+    ],
+    help=("Enable deprecated functionality, that will be removed in the future."),
+)
+
+
+##########
+# groups #
+##########
+
+general_group: Dict[str, Any] = {
+    "name": "General Options",
+    "options": [
+        help_,
+        debug_mode,
+        isolated_mode,
+        require_virtualenv,
+        python,
+        verbose,
+        version,
+        quiet,
+        log,
+        no_input,
+        proxy,
+        retries,
+        timeout,
+        exists_action,
+        trusted_host,
+        cert,
+        client_cert,
+        cache_dir,
+        no_cache,
+        disable_pip_version_check,
+        no_color,
+        no_python_version_warning,
+        use_new_feature,
+        use_deprecated_feature,
+    ],
+}
+
+index_group: Dict[str, Any] = {
+    "name": "Package Index Options",
+    "options": [
+        index_url,
+        extra_index_url,
+        no_index,
+        find_links,
+    ],
+}

+ 27 - 0
env/Lib/site-packages/pip/_internal/cli/command_context.py

@@ -0,0 +1,27 @@
+from contextlib import ExitStack, contextmanager
+from typing import ContextManager, Generator, TypeVar
+
+_T = TypeVar("_T", covariant=True)
+
+
+class CommandContextMixIn:
+    def __init__(self) -> None:
+        super().__init__()
+        self._in_main_context = False
+        self._main_context = ExitStack()
+
+    @contextmanager
+    def main_context(self) -> Generator[None, None, None]:
+        assert not self._in_main_context
+
+        self._in_main_context = True
+        try:
+            with self._main_context:
+                yield
+        finally:
+            self._in_main_context = False
+
+    def enter_context(self, context_provider: ContextManager[_T]) -> _T:
+        assert self._in_main_context
+
+        return self._main_context.enter_context(context_provider)

+ 70 - 0
env/Lib/site-packages/pip/_internal/cli/main.py

@@ -0,0 +1,70 @@
+"""Primary application entrypoint.
+"""
+import locale
+import logging
+import os
+import sys
+from typing import List, Optional
+
+from pip._internal.cli.autocompletion import autocomplete
+from pip._internal.cli.main_parser import parse_command
+from pip._internal.commands import create_command
+from pip._internal.exceptions import PipError
+from pip._internal.utils import deprecation
+
+logger = logging.getLogger(__name__)
+
+
+# Do not import and use main() directly! Using it directly is actively
+# discouraged by pip's maintainers. The name, location and behavior of
+# this function is subject to change, so calling it directly is not
+# portable across different pip versions.
+
+# In addition, running pip in-process is unsupported and unsafe. This is
+# elaborated in detail at
+# https://pip.pypa.io/en/stable/user_guide/#using-pip-from-your-program.
+# That document also provides suggestions that should work for nearly
+# all users that are considering importing and using main() directly.
+
+# However, we know that certain users will still want to invoke pip
+# in-process. If you understand and accept the implications of using pip
+# in an unsupported manner, the best approach is to use runpy to avoid
+# depending on the exact location of this entry point.
+
+# The following example shows how to use runpy to invoke pip in that
+# case:
+#
+#     sys.argv = ["pip", your, args, here]
+#     runpy.run_module("pip", run_name="__main__")
+#
+# Note that this will exit the process after running, unlike a direct
+# call to main. As it is not safe to do any processing after calling
+# main, this should not be an issue in practice.
+
+
+def main(args: Optional[List[str]] = None) -> int:
+    if args is None:
+        args = sys.argv[1:]
+
+    # Configure our deprecation warnings to be sent through loggers
+    deprecation.install_warning_logger()
+
+    autocomplete()
+
+    try:
+        cmd_name, cmd_args = parse_command(args)
+    except PipError as exc:
+        sys.stderr.write(f"ERROR: {exc}")
+        sys.stderr.write(os.linesep)
+        sys.exit(1)
+
+    # Needed for locale.getpreferredencoding(False) to work
+    # in pip._internal.utils.encoding.auto_decode
+    try:
+        locale.setlocale(locale.LC_ALL, "")
+    except locale.Error as e:
+        # setlocale can apparently crash if locale are uninitialized
+        logger.debug("Ignoring error %s when setting locale", e)
+    command = create_command(cmd_name, isolated=("--isolated" in cmd_args))
+
+    return command.main(cmd_args)

Algúns arquivos non se mostraron porque demasiados arquivos cambiaron neste cambio