1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
|
"""PyPI and direct package downloading"""
import sys, os.path, re, urlparse, urllib2
from pkg_resources import *
from distutils import log
HREF = re.compile(r"""href\s*=\s*['"]?([^'"> ]+)""", re.I)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):',re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
'interpret_distro_name',
]
def parse_bdist_wininst(name):
"""Return (base,pyversion) or (None,None) for possible .exe name"""
lower = name.lower()
base, py_ver = None, None
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:-10]
elif lower.startswith('.win32-py',-16):
py_ver = name[-7:-4]
base = name[:-16]
return base,py_ver
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
path = urlparse.urlparse(url)[2]
base = urllib2.unquote(path.split('/')[-1])
if base.endswith('.egg'):
dist = Distribution.from_filename(base, metadata)
dist.path = url
return [dist] # only one, unambiguous interpretation
if base.endswith('.exe'):
win_base, py_ver = parse_bdist_wininst(base)
if win_base is not None:
return interpret_distro_name(
url, win_base, metadata, py_ver, BINARY_DIST, "win32"
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if base.endswith(ext):
base = base[:-len(ext)]
return interpret_distro_name(url, base, metadata)
return [] # no extension matched
def interpret_distro_name(url, base, metadata,
py_version=None, distro_type=SOURCE_DIST, platform=None
):
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = base.split('-')
for p in range(1,len(parts)+1):
yield Distribution(
url, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, distro_type = distro_type,
platform = platform
)
class PackageIndex(AvailableDistributions):
"""A distribution index that scans web pages for download URLs"""
def __init__(self,index_url="http://www.python.org/pypi",*args,**kw):
AvailableDistributions.__init__(self,*args,**kw)
self.index_url = index_url + "/"[:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
def process_url(self, url, retrieve=False):
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
dists = list(distros_for_url(url))
if dists: self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
for dist in dists:
self.add(dist)
# don't need the actual page
return
self.info("Reading %s", url)
f = self.open_url(url)
self.fetched_urls[url] = self.fetched_urls[f.url] = True
if 'html' not in f.headers['content-type'].lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
f.close()
if url.startswith(self.index_url):
self.process_index(url, page)
for match in HREF.finditer(page):
link = urlparse.urljoin(base, match.group(1))
self.process_url(link)
def process_index(self,url,page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = map(
urllib2.unquote, link[len(self.index_url):].split('/')
)
if len(parts)==2:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(),{})[link] = True
if url==self.index_url or 'Index of Packages</title>' in page:
# process an index page into the package-page index
for match in HREF.finditer(page):
scan( urlparse.urljoin(url, match.group(1)) )
else:
scan(url) # ensure this page is in the page index
# process individual package page
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos!=-1:
match = HREF.search(page,pos)
if match:
# Process the found URL
self.scan_url(urlparse.urljoin(url, match.group(1)))
def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.distname+'/')
if not self.package_pages.get(requirement.key):
# We couldn't find the target package, so search the index page too
self.warn(
"Couldn't find index page for %r (maybe misspelled?)",
requirement.distname
)
if self.index_url not in self.fetched_urls:
self.warn(
"Scanning index of all packages (this may take a while)"
)
self.scan_url(self.index_url)
for url in self.package_pages.get(requirement.key,()):
# scan each page that might be related to the desired package
self.scan_url(url)
def obtain(self, requirement, installer=None):
self.find_packages(requirement)
for dist in self.get(requirement.key, ()):
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement,installer)
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object).
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method is equivalent to
the ``fetch()`` method. If `spec` is a local, existing file or
directory name, it is simply returned unchanged. If `spec` is a URL,
it is downloaded to a subpath of `tmpdir`, and the local filename is
returned. Various errors may be raised if a problem occurs during
downloading.
"""
if not isinstance(spec,Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
return self._download_url(scheme.group(1), spec, tmpdir)
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
try:
spec = Requirement.parse(spec)
except ValueError:
raise RuntimeError(
"Not a URL, existing file, or requirement spec: %r" %
(spec,)
)
return self.fetch(spec, tmpdir, force_scan)
def fetch(self, requirement, tmpdir, force_scan=False):
"""Obtain a file suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the return value is the same as if you had called the ``download()``
method with the matching distribution's URL. If no matching
distribution is found, returns ``None``.
"""
# process a Requirement
self.info("Searching for %s", requirement)
if force_scan:
self.find_packages(requirement)
dist = self.best_match(requirement, []) # XXX
if dist is not None:
self.info("Best match: %s", dist)
return self.download(dist.path, tmpdir)
self.warn(
"No local packages or download links found for %s", requirement
)
return None
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
# Download the file
fp, tfp = None, None
try:
fp = self.open_url(url)
if isinstance(fp, urllib2.HTTPError):
raise RuntimeError(
"Can't download %s: %s %s" % (url, fp.code,fp.msg)
)
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
size = int(headers["Content-Length"])
self.reporthook(url, filename, blocknum, bs, size)
tfp = open(filename,'wb')
while True:
block = fp.read(bs)
if block:
tfp.write(block)
blocknum += 1
self.reporthook(url, filename, blocknum, bs, size)
else:
break
return headers
finally:
if fp: fp.close()
if tfp: tfp.close()
def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
def open_url(self, url):
try:
return urllib2.urlopen(url)
except urllib2.HTTPError, v:
return v
except urllib2.URLError, v:
raise RuntimeError("Download error: %s" % v.reason)
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
#
name = filter(None,urlparse.urlparse(url)[2].split('/'))
if name:
name = name[-1]
while '..' in name:
name = name.replace('..','.').replace('\\','_')
else:
name = "__downloaded__" # default if URL has no path contents
filename = os.path.join(tmpdir,name)
# Download the file
#
if scheme=='svn' or scheme.startswith('svn+'):
return self._download_svn(url, filename)
else:
headers = self._download_to(url, filename)
if 'html' in headers['content-type'].lower():
return self._download_html(url, headers, filename, tmpdir)
else:
return filename
def scan_url(self, url):
self.process_url(url, True)
def _download_html(self, url, headers, filename, tmpdir):
# Check for a sourceforge URL
sf_url = url.startswith('http://prdownloads.')
file = open(filename)
for line in file:
if line.strip():
# Check for a subversion index page
if re.search(r'<title>Revision \d+:', line):
# it's a subversion index page:
file.close()
os.unlink(filename)
return self._download_svn(url, filename)
# Check for a SourceForge header
elif sf_url:
if re.search(r'^<HTML><HEAD>', line, re.I):
continue # skip first line
elif re.search(r'<TITLE>Select a Mirror for File:',line):
# Sourceforge mirror page
page = file.read()
file.close()
os.unlink(filename)
return self._download_sourceforge(url, page, tmpdir)
break # not an index page
file.close()
raise RuntimeError("Unexpected HTML page found at "+url)
def _download_svn(self, url, filename):
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout -q %s %s" % (url, filename))
return filename
def debug(self, msg, *args):
log.debug(msg, *args)
def info(self, msg, *args):
log.info(msg, *args)
def warn(self, msg, *args):
log.warn(msg, *args)
def _download_sourceforge(self, source_url, sf_page, tmpdir):
"""Download package from randomly-selected SourceForge mirror"""
self.debug("Processing SourceForge mirror page")
mirror_regex = re.compile(r'HREF=(/.*?\?use_mirror=[^>]*)')
urls = [m.group(1) for m in mirror_regex.finditer(sf_page)]
if not urls:
raise RuntimeError(
"URL looks like a Sourceforge mirror page, but no URLs found"
)
import random
url = urlparse.urljoin(source_url, random.choice(urls))
self.info(
"Requesting redirect to (randomly selected) %r mirror",
url.split('=',1)[-1]
)
f = self.open_url(url)
match = re.search(
r'<META HTTP-EQUIV="refresh" content=".*?URL=(.*?)"',
f.read()
)
f.close()
if match:
download_url = match.group(1)
scheme = URL_SCHEME(download_url)
return self._download_url(scheme.group(1), download_url, tmpdir)
else:
raise RuntimeError(
'No META HTTP-EQUIV="refresh" found in Sourceforge page at %s'
% url
)
|