1
from __future__ import absolute_import
22
from pip._vendor.six.moves.urllib import parse as urllib_parse
23
from pip._vendor.six.moves.urllib import request as urllib_request
27
from pip.exceptions import InstallationError, HashMismatch
28
from pip.models import PyPI
29
from pip.utils import (splitext, rmtree, format_size, display_path,
30
backup_dir, ask_path_exists, unpack_file,
31
ARCHIVE_EXTENSIONS, consume, call_subprocess)
32
from pip.utils.encoding import auto_decode
33
from pip.utils.filesystem import check_path_owner
34
from pip.utils.logging import indent_log
35
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
36
from pip.utils.glibc import libc_ver
37
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
38
from pip.locations import write_delete_marker_file
39
from pip.vcs import vcs
40
from pip._vendor import requests, six
41
from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
42
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
43
from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
44
from pip._vendor.requests.utils import get_netrc_auth
45
from pip._vendor.requests.structures import CaseInsensitiveDict
46
from pip._vendor.requests.packages import urllib3
47
from pip._vendor.cachecontrol import CacheControlAdapter
48
from pip._vendor.cachecontrol.caches import FileCache
49
from pip._vendor.lockfile import LockError
50
from pip._vendor.six.moves import xmlrpc_client
53
__all__ = ['get_file_content',
54
'is_url', 'url_to_path', 'path_to_url',
55
'is_archive_file', 'unpack_vcs_link',
56
'unpack_file_url', 'is_vcs_url', 'is_file_url',
57
'unpack_http_url', 'unpack_url']
60
logger = logging.getLogger(__name__)
65
Return a string representing the user agent.
68
"installer": {"name": "pip", "version": pip.__version__},
69
"python": platform.python_version(),
71
"name": platform.python_implementation(),
75
if data["implementation"]["name"] == 'CPython':
76
data["implementation"]["version"] = platform.python_version()
77
elif data["implementation"]["name"] == 'PyPy':
78
if sys.pypy_version_info.releaselevel == 'final':
79
pypy_version_info = sys.pypy_version_info[:3]
81
pypy_version_info = sys.pypy_version_info
82
data["implementation"]["version"] = ".".join(
83
[str(x) for x in pypy_version_info]
85
elif data["implementation"]["name"] == 'Jython':
87
data["implementation"]["version"] = platform.python_version()
88
elif data["implementation"]["name"] == 'IronPython':
90
data["implementation"]["version"] = platform.python_version()
92
if sys.platform.startswith("linux"):
93
from pip._vendor import distro
94
distro_infos = dict(filter(
96
zip(["name", "version", "id"], distro.linux_distribution()),
100
zip(["lib", "version"], libc_ver()),
103
distro_infos["libc"] = libc
105
data["distro"] = distro_infos
107
if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
108
data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]}
110
if platform.system():
111
data.setdefault("system", {})["name"] = platform.system()
113
if platform.release():
114
data.setdefault("system", {})["release"] = platform.release()
116
if platform.machine():
117
data["cpu"] = platform.machine()
119
# Python 2.6 doesn't have ssl.OPENSSL_VERSION.
120
if HAS_TLS and sys.version_info[:2] > (2, 6):
121
data["openssl_version"] = ssl.OPENSSL_VERSION
123
return "{data[installer][name]}/{data[installer][version]} {json}".format(
125
json=json.dumps(data, separators=(",", ":"), sort_keys=True),
129
class MultiDomainBasicAuth(AuthBase):
131
def __init__(self, prompting=True):
132
self.prompting = prompting
135
def __call__(self, req):
136
parsed = urllib_parse.urlparse(req.url)
138
# Get the netloc without any embedded credentials
139
netloc = parsed.netloc.rsplit("@", 1)[-1]
141
# Set the url of the request to the url without any credentials
142
req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
144
# Use any stored credentials that we have for this netloc
145
username, password = self.passwords.get(netloc, (None, None))
147
# Extract credentials embedded in the url if we have none stored
149
username, password = self.parse_credentials(parsed.netloc)
151
# Get creds from netrc if we still don't have them
152
if username is None and password is None:
153
netrc_auth = get_netrc_auth(req.url)
154
username, password = netrc_auth if netrc_auth else (None, None)
156
if username or password:
157
# Store the username and password
158
self.passwords[netloc] = (username, password)
160
# Send the basic auth with this request
161
req = HTTPBasicAuth(username or "", password or "")(req)
163
# Attach a hook to handle 401 responses
164
req.register_hook("response", self.handle_401)
168
def handle_401(self, resp, **kwargs):
169
# We only care about 401 responses, anything else we want to just
170
# pass through the actual response
171
if resp.status_code != 401:
174
# We are not able to prompt the user so simply return the response
175
if not self.prompting:
178
parsed = urllib_parse.urlparse(resp.url)
180
# Prompt the user for a new username and password
181
username = six.moves.input("User for %s: " % parsed.netloc)
182
password = getpass.getpass("Password: ")
184
# Store the new username and password to use for future requests
185
if username or password:
186
self.passwords[parsed.netloc] = (username, password)
188
# Consume content and release the original connection to allow our new
189
# request to reuse the same one.
191
resp.raw.release_conn()
193
# Add our new username and password to the request
194
req = HTTPBasicAuth(username or "", password or "")(resp.request)
196
# Send our new request
197
new_resp = resp.connection.send(req, **kwargs)
198
new_resp.history.append(resp)
202
def parse_credentials(self, netloc):
204
userinfo = netloc.rsplit("@", 1)[0]
206
return userinfo.split(":", 1)
207
return userinfo, None
211
class LocalFSAdapter(BaseAdapter):
213
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
215
pathname = url_to_path(request.url)
218
resp.status_code = 200
219
resp.url = request.url
222
stats = os.stat(pathname)
223
except OSError as exc:
224
resp.status_code = 404
227
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
228
content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
229
resp.headers = CaseInsensitiveDict({
230
"Content-Type": content_type,
231
"Content-Length": stats.st_size,
232
"Last-Modified": modified,
235
resp.raw = open(pathname, "rb")
236
resp.close = resp.raw.close
244
class SafeFileCache(FileCache):
246
A file based cache which is safe to use even when the target directory may
247
not be accessible or writable.
250
def __init__(self, *args, **kwargs):
251
super(SafeFileCache, self).__init__(*args, **kwargs)
253
# Check to ensure that the directory containing our cache directory
254
# is owned by the user current executing pip. If it does not exist
255
# we will check the parent directory until we find one that does exist.
256
# If it is not owned by the user executing pip then we will disable
257
# the cache and log a warning.
258
if not check_path_owner(self.directory):
260
"The directory '%s' or its parent directory is not owned by "
261
"the current user and the cache has been disabled. Please "
262
"check the permissions and owner of that directory. If "
263
"executing pip with sudo, you may want sudo's -H flag.",
267
# Set our directory to None to disable the Cache
268
self.directory = None
270
def get(self, *args, **kwargs):
271
# If we don't have a directory, then the cache should be a no-op.
272
if self.directory is None:
276
return super(SafeFileCache, self).get(*args, **kwargs)
277
except (LockError, OSError, IOError):
278
# We intentionally silence this error, if we can't access the cache
279
# then we can just skip caching and process the request as if
280
# caching wasn't enabled.
283
def set(self, *args, **kwargs):
284
# If we don't have a directory, then the cache should be a no-op.
285
if self.directory is None:
289
return super(SafeFileCache, self).set(*args, **kwargs)
290
except (LockError, OSError, IOError):
291
# We intentionally silence this error, if we can't access the cache
292
# then we can just skip caching and process the request as if
293
# caching wasn't enabled.
296
def delete(self, *args, **kwargs):
297
# If we don't have a directory, then the cache should be a no-op.
298
if self.directory is None:
302
return super(SafeFileCache, self).delete(*args, **kwargs)
303
except (LockError, OSError, IOError):
304
# We intentionally silence this error, if we can't access the cache
305
# then we can just skip caching and process the request as if
306
# caching wasn't enabled.
310
class InsecureHTTPAdapter(HTTPAdapter):
312
def cert_verify(self, conn, url, verify, cert):
313
conn.cert_reqs = 'CERT_NONE'
317
class PipSession(requests.Session):
321
def __init__(self, *args, **kwargs):
322
retries = kwargs.pop("retries", 0)
323
cache = kwargs.pop("cache", None)
324
insecure_hosts = kwargs.pop("insecure_hosts", [])
326
super(PipSession, self).__init__(*args, **kwargs)
328
# Attach our User Agent to the request
329
self.headers["User-Agent"] = user_agent()
331
# Attach our Authentication handler to the session
332
self.auth = MultiDomainBasicAuth()
334
# Create our urllib3.Retry instance which will allow us to customize
335
# how we handle retries.
336
retries = urllib3.Retry(
337
# Set the total number of retries that a particular request can
341
# A 503 error from PyPI typically means that the Fastly -> Origin
342
# connection got interrupted in some way. A 503 error in general
343
# is typically considered a transient error so we'll go ahead and
345
status_forcelist=[503],
347
# Add a small amount of back off between failed requests in
348
# order to prevent hammering the service.
352
# We want to _only_ cache responses on securely fetched origins. We do
353
# this because we can't validate the response of an insecurely fetched
354
# origin, and we don't want someone to be able to poison the cache and
355
# require manual eviction from the cache to fix it.
357
secure_adapter = CacheControlAdapter(
358
cache=SafeFileCache(cache, use_dir_lock=True),
362
secure_adapter = HTTPAdapter(max_retries=retries)
364
# Our Insecure HTTPAdapter disables HTTPS validation. It does not
365
# support caching (see above) so we'll use it for all http:// URLs as
366
# well as any https:// host that we've marked as ignoring TLS errors
368
insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
370
self.mount("https://", secure_adapter)
371
self.mount("http://", insecure_adapter)
373
# Enable file:// urls
374
self.mount("file://", LocalFSAdapter())
376
# We want to use a non-validating adapter for any requests which are
378
for host in insecure_hosts:
379
self.mount("https://{0}/".format(host), insecure_adapter)
381
def request(self, method, url, *args, **kwargs):
382
# Allow setting a default timeout on a session
383
kwargs.setdefault("timeout", self.timeout)
385
# Dispatch the actual request
386
return super(PipSession, self).request(method, url, *args, **kwargs)
389
def get_file_content(url, comes_from=None, session=None):
390
"""Gets the content of a file; it may be a filename, file: URL, or
391
http: URL. Returns (location, content). Content is unicode."""
394
"get_file_content() missing 1 required keyword argument: 'session'"
397
match = _scheme_re.search(url)
399
scheme = match.group(1).lower()
400
if (scheme == 'file' and comes_from and
401
comes_from.startswith('http')):
402
raise InstallationError(
403
'Requirements file %s references URL %s, which is local'
406
path = url.split(':', 1)[1]
407
path = path.replace('\\', '/')
408
match = _url_slash_drive_re.match(path)
410
path = match.group(1) + ':' + path.split('|', 1)[1]
411
path = urllib_parse.unquote(path)
412
if path.startswith('/'):
413
path = '/' + path.lstrip('/')
416
# FIXME: catch some errors
417
resp = session.get(url)
418
resp.raise_for_status()
419
return resp.url, resp.text
421
with open(url, 'rb') as f:
422
content = auto_decode(f.read())
423
except IOError as exc:
424
raise InstallationError(
425
'Could not open requirements file: %s' % str(exc)
430
_scheme_re = re.compile(r'^(http|https|file):', re.I)
431
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
435
"""Returns true if the name looks like a URL"""
438
scheme = name.split(':', 1)[0].lower()
439
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
442
def url_to_path(url):
444
Convert a file: URL to a path.
446
assert url.startswith('file:'), (
447
"You can only turn file: urls into filenames (not %r)" % url)
449
_, netloc, path, _, _ = urllib_parse.urlsplit(url)
451
# if we have a UNC path, prepend UNC share notation
453
netloc = '\\\\' + netloc
455
path = urllib_request.url2pathname(netloc + path)
459
def path_to_url(path):
461
Convert a path to a file: URL. The path will be made absolute and have
464
path = os.path.normpath(os.path.abspath(path))
465
url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path))
469
def is_archive_file(name):
470
"""Return True if `name` is a considered as an archive file."""
471
ext = splitext(name)[1].lower()
472
if ext in ARCHIVE_EXTENSIONS:
477
def unpack_vcs_link(link, location):
478
vcs_backend = _get_used_vcs_backend(link)
479
vcs_backend.unpack(location)
482
def _get_used_vcs_backend(link):
483
for backend in vcs.backends:
484
if link.scheme in backend.schemes:
485
vcs_backend = backend(link.url)
489
def is_vcs_url(link):
490
return bool(_get_used_vcs_backend(link))
493
def is_file_url(link):
494
return link.url.lower().startswith('file:')
497
def is_dir_url(link):
498
"""Return whether a file:// Link points to a directory.
500
``link`` must not have any other scheme but file://. Call is_file_url()
504
link_path = url_to_path(link.url_without_fragment)
505
return os.path.isdir(link_path)
508
def _progress_indicator(iterable, *args, **kwargs):
512
def _download_url(resp, link, content_file, hashes):
514
total_length = int(resp.headers['content-length'])
515
except (ValueError, KeyError, TypeError):
518
cached_resp = getattr(resp, "from_cache", False)
520
if logger.getEffectiveLevel() > logging.INFO:
521
show_progress = False
523
show_progress = False
524
elif total_length > (40 * 1000):
526
elif not total_length:
529
show_progress = False
531
show_url = link.show_url
533
def resp_read(chunk_size):
535
# Special case for urllib3.
536
for chunk in resp.raw.stream(
538
# We use decode_content=False here because we don't
539
# want urllib3 to mess with the raw bytes we get
540
# from the server. If we decompress inside of
541
# urllib3 then we cannot verify the checksum
542
# because the checksum will be of the compressed
543
# file. This breakage will only occur if the
544
# server adds a Content-Encoding header, which
545
# depends on how the server was configured:
546
# - Some servers will notice that the file isn't a
547
# compressible file and will leave the file alone
548
# and with an empty Content-Encoding
549
# - Some servers will notice that the file is
550
# already compressed and will leave the file
551
# alone and will add a Content-Encoding: gzip
553
# - Some servers won't notice anything at all and
554
# will take a file that's already been compressed
555
# and compress it again and set the
556
# Content-Encoding: gzip header
558
# By setting this not to decode automatically we
559
# hope to eliminate problems with the second case.
560
decode_content=False):
562
except AttributeError:
563
# Standard file-like object.
565
chunk = resp.raw.read(chunk_size)
570
def written_chunks(chunks):
572
content_file.write(chunk)
575
progress_indicator = _progress_indicator
577
if link.netloc == PyPI.netloc:
580
url = link.url_without_fragment
582
if show_progress: # We don't show progress on cached responses
584
logger.info("Downloading %s (%s)", url, format_size(total_length))
585
progress_indicator = DownloadProgressBar(max=total_length).iter
587
logger.info("Downloading %s", url)
588
progress_indicator = DownloadProgressSpinner().iter
590
logger.info("Using cached %s", url)
592
logger.info("Downloading %s", url)
594
logger.debug('Downloading from URL %s', link)
596
downloaded_chunks = written_chunks(
598
resp_read(CONTENT_CHUNK_SIZE),
603
hashes.check_against_chunks(downloaded_chunks)
605
consume(downloaded_chunks)
608
def _copy_file(filename, location, link):
610
download_location = os.path.join(location, link.filename)
611
if os.path.exists(download_location):
612
response = ask_path_exists(
613
'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)abort' %
614
display_path(download_location), ('i', 'w', 'b', 'a'))
617
elif response == 'w':
618
logger.warning('Deleting %s', display_path(download_location))
619
os.remove(download_location)
620
elif response == 'b':
621
dest_file = backup_dir(download_location)
623
'Backing up %s to %s',
624
display_path(download_location),
625
display_path(dest_file),
627
shutil.move(download_location, dest_file)
628
elif response == 'a':
631
shutil.copy(filename, download_location)
632
logger.info('Saved %s', display_path(download_location))
635
def unpack_http_url(link, location, download_dir=None,
636
session=None, hashes=None):
639
"unpack_http_url() missing 1 required keyword argument: 'session'"
642
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
644
# If a download dir is specified, is the file already downloaded there?
645
already_downloaded_path = None
647
already_downloaded_path = _check_download_dir(link,
651
if already_downloaded_path:
652
from_path = already_downloaded_path
653
content_type = mimetypes.guess_type(from_path)[0]
655
# let's download to a tmp dir
656
from_path, content_type = _download_http_url(link,
661
# unpack the archive to the build dir location. even when only downloading
662
# archives, they have to be unpacked to parse dependencies
663
unpack_file(from_path, location, content_type, link)
665
# a download dir is specified; let's copy the archive there
666
if download_dir and not already_downloaded_path:
667
_copy_file(from_path, download_dir, link)
669
if not already_downloaded_path:
674
def unpack_file_url(link, location, download_dir=None, hashes=None):
675
"""Unpack link into location.
677
If download_dir is provided and link points to a file, make a copy
678
of the link file inside download_dir.
680
link_path = url_to_path(link.url_without_fragment)
682
# If it's a url to a local directory
684
if os.path.isdir(location):
686
shutil.copytree(link_path, location, symlinks=True)
688
logger.info('Link is a directory, ignoring download_dir')
691
# If --require-hashes is off, `hashes` is either empty, the
692
# link's embedded hash, or MissingHashes; it is required to
693
# match. If --require-hashes is on, we are satisfied by any
694
# hash in `hashes` matching: a URL-based or an option-based
695
# one; no internet-sourced hash will be in `hashes`.
697
hashes.check_against_path(link_path)
699
# If a download dir is specified, is the file already there and valid?
700
already_downloaded_path = None
702
already_downloaded_path = _check_download_dir(link,
706
if already_downloaded_path:
707
from_path = already_downloaded_path
709
from_path = link_path
711
content_type = mimetypes.guess_type(from_path)[0]
713
# unpack the archive to the build dir location. even when only downloading
714
# archives, they have to be unpacked to parse dependencies
715
unpack_file(from_path, location, content_type, link)
717
# a download dir is specified and not already downloaded
718
if download_dir and not already_downloaded_path:
719
_copy_file(from_path, download_dir, link)
722
def _copy_dist_from_dir(link_path, location):
723
"""Copy distribution files in `link_path` to `location`.
725
Invoked when user requests to install a local directory. E.g.:
728
pip install ~/dev/git-repos/python-prompt-toolkit
732
# Note: This is currently VERY SLOW if you have a lot of data in the
733
# directory, because it copies everything with `shutil.copytree`.
734
# What it should really do is build an sdist and install that.
735
# See https://github.com/pypa/pip/issues/2195
737
if os.path.isdir(location):
741
setup_py = 'setup.py'
742
sdist_args = [sys.executable]
743
sdist_args.append('-c')
744
sdist_args.append(SETUPTOOLS_SHIM % setup_py)
745
sdist_args.append('sdist')
746
sdist_args += ['--dist-dir', location]
747
logger.info('Running setup.py sdist for %s', link_path)
750
call_subprocess(sdist_args, cwd=link_path, show_stdout=False)
752
# unpack sdist into `location`
753
sdist = os.path.join(location, os.listdir(location)[0])
754
logger.info('Unpacking sdist %s into %s', sdist, location)
755
unpack_file(sdist, location, content_type=None, link=None)
758
class PipXmlrpcTransport(xmlrpc_client.Transport):
759
"""Provide a `xmlrpclib.Transport` implementation via a `PipSession`
763
def __init__(self, index_url, session, use_datetime=False):
764
xmlrpc_client.Transport.__init__(self, use_datetime)
765
index_parts = urllib_parse.urlparse(index_url)
766
self._scheme = index_parts.scheme
767
self._session = session
769
def request(self, host, handler, request_body, verbose=False):
770
parts = (self._scheme, host, handler, None, None, None)
771
url = urllib_parse.urlunparse(parts)
773
headers = {'Content-Type': 'text/xml'}
774
response = self._session.post(url, data=request_body,
775
headers=headers, stream=True)
776
response.raise_for_status()
777
self.verbose = verbose
778
return self.parse_response(response.raw)
779
except requests.HTTPError as exc:
781
"HTTP error %s while getting %s",
782
exc.response.status_code, url,
787
def unpack_url(link, location, download_dir=None,
788
only_download=False, session=None, hashes=None):
790
If link is a VCS link:
791
if only_download, export into download_dir and ignore location
792
else unpack into location
793
for other types of link:
794
- unpack into location
795
- if download_dir, copy the file into download_dir
796
- if only_download, mark location for deletion
798
:param hashes: A Hashes object, one of whose embedded hashes must match,
799
or HashMismatch will be raised. If the Hashes is empty, no matches are
800
required, and unhashable types of requirements (like VCS ones, which
801
would ordinarily raise HashUnsupported) are allowed.
803
# non-editable vcs urls
805
unpack_vcs_link(link, location)
808
elif is_file_url(link):
809
unpack_file_url(link, location, download_dir, hashes=hashes)
814
session = PipSession()
824
write_delete_marker_file(location)
827
def _download_http_url(link, session, temp_dir, hashes):
828
"""Download link url into temp_dir using provided session"""
829
target_url = link.url.split('#', 1)[0]
833
# We use Accept-Encoding: identity here because requests
834
# defaults to accepting compressed responses. This breaks in
835
# a variety of ways depending on how the server is configured.
836
# - Some servers will notice that the file isn't a compressible
837
# file and will leave the file alone and with an empty
839
# - Some servers will notice that the file is already
840
# compressed and will leave the file alone and will add a
841
# Content-Encoding: gzip header
842
# - Some servers won't notice anything at all and will take
843
# a file that's already been compressed and compress it again
844
# and set the Content-Encoding: gzip header
845
# By setting this to request only the identity encoding We're
846
# hoping to eliminate the third case. Hopefully there does not
847
# exist a server which when given a file will notice it is
848
# already compressed and that you're not asking for a
849
# compressed file and will then decompress it before sending
850
# because if that's the case I don't think it'll ever be
851
# possible to make this work.
852
headers={"Accept-Encoding": "identity"},
855
resp.raise_for_status()
856
except requests.HTTPError as exc:
858
"HTTP error %s while getting %s", exc.response.status_code, link,
862
content_type = resp.headers.get('content-type', '')
863
filename = link.filename # fallback
864
# Have a look at the Content-Disposition header for a better guess
865
content_disposition = resp.headers.get('content-disposition')
866
if content_disposition:
867
type, params = cgi.parse_header(content_disposition)
868
# We use ``or`` here because we don't want to use an "empty" value
869
# from the filename param.
870
filename = params.get('filename') or filename
871
ext = splitext(filename)[1]
873
ext = mimetypes.guess_extension(content_type)
876
if not ext and link.url != resp.url:
877
ext = os.path.splitext(resp.url)[1]
880
file_path = os.path.join(temp_dir, filename)
881
with open(file_path, 'wb') as content_file:
882
_download_url(resp, link, content_file, hashes)
883
return file_path, content_type
886
def _check_download_dir(link, download_dir, hashes):
887
""" Check download_dir for previously downloaded file with correct hash
888
If a correct file is found return its path else None
890
download_path = os.path.join(download_dir, link.filename)
891
if os.path.exists(download_path):
892
# If already downloaded, does its hash match?
893
logger.info('File was already downloaded %s', download_path)
896
hashes.check_against_path(download_path)
899
'Previously-downloaded file %s has bad hash. '
903
os.unlink(download_path)