1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
|
from __future__ import division
import urllib2, urlparse
import sys
import struct
import array
import os.path
import cgi
import re
from htmlentitydefs import name2codepoint
from time import sleep
from math import log, floor
from re import compile, IGNORECASE
from dosage.output import out
from dosage.version import VERSION
class NoMatchError(Exception): pass
def getMatchValues(matches):
return [match.group(1) for match in matches]
def fetchManyMatches(url, regexes):
'''Returns a list containing lists of matches for each regular expression, in the same order.'''
out.write('Matching regex(es) %r multiple times against %s...' % ([rex.pattern for rex in regexes], url), 2)
page = urlopen(url)
data = page.read()
matches = [getMatchValues(regex.finditer(data)) for regex in regexes]
if matches:
out.write('...found %r' % (matches,), 2)
else:
out.write('...not found!', 2)
return matches
def fetchMatches(url, regexes):
out.write('Matching regex(es) %r against %s...' % ([rex.pattern for rex in regexes], url), 2)
page = urlopen(url)
data = page.read()
matches = []
for regex in regexes:
match = regex.search(data)
matches.append(match and match.group(1))
if matches:
out.write('...found %r' % (matches,), 2)
else:
out.write('...not found!', 2)
return matches
def fetchMatch(url, regex):
return fetchMatches(url, (regex,))[0]
def fetchUrl(url, regex):
match = fetchMatch(url, regex)
return match and urlparse.urljoin(url, match)
baseSearch = compile(r'<base\s+href="([^"]*)"', IGNORECASE)
def fetchUrls(url, regexes):
matches = fetchMatches(url, [baseSearch] + list(regexes))
baseUrl = matches.pop(0) or url
return [urlparse.urljoin(baseUrl, match) for match in matches]
def fetchManyUrls(url, regexes):
matchGroups = fetchManyMatches(url, [baseSearch] + list(regexes))
baseUrl = matchGroups.pop(0) or [url]
baseUrl = baseUrl[0]
xformedGroups = []
for matchGroup in matchGroups:
xformedGroups.append([urlparse.urljoin(baseUrl, match) for match in matchGroup])
return xformedGroups
def _unescape(text):
"""
Replace HTML entities and character references.
"""
def _fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
text = unichr(int(text[3:-1], 16))
else:
text = unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(name2codepoint[text[1:-1]])
except KeyError:
pass
if isinstance(text, unicode):
text = text.encode('utf-8')
text = urllib2.quote(text, safe=';/?:@&=+$,')
return text
return re.sub("&#?\w+;", _fixup, text)
def normaliseURL(url):
"""
Removes any leading empty segments to avoid breaking urllib2; also replaces
HTML entities and character references.
"""
# XXX: brutal hack
url = _unescape(url)
url = url.replace(' ', '%20')
pu = list(urlparse.urlparse(url))
segments = pu[2].replace(' ', '%20').split('/')
while segments and segments[0] == '':
del segments[0]
pu[2] = '/' + '/'.join(segments)
return urlparse.urlunparse(pu)
def urlopen(url, referrer=None, retries=5):
# Work around urllib2 brokenness
url = normaliseURL(url)
req = urllib2.Request(url)
if referrer:
req.add_header('Referrer', referrer)
req.add_header('Referer', referrer)
req.add_header('User-Agent', 'Dosage %s (http://slipgate.za.net/dosage)' % (VERSION,))
tries = 0
while 1:
try:
urlobj = urllib2.urlopen(req)
break
except IOError:
out.write('URL retrieval failed, sleeping %d seconds and retrying (%d)' % (2**tries, tries), 2)
sleep(2**tries)
tries += 1
if tries >= retries:
raise
return urlobj
def getWindowSize():
try:
from fcntl import ioctl
from termios import TIOCGWINSZ
except ImportError:
raise NotImplementedError
st = 'HHHH'
names = 'ws_row', 'ws_col', 'ws_xpixel', 'ws_ypixel'
buf = array.array('b', ' ' * struct.calcsize(st))
try:
ioctl(sys.stderr, TIOCGWINSZ, buf, True)
except IOError:
raise NotImplementedError
winsize = dict(zip(names, struct.unpack(st, buf.tostring())))
return winsize['ws_col']
suffixes = ('B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
def saneDataSize(size):
if size == 0:
return 'unk B'
index = int(floor(log(abs(size), 1024)))
index = min(index, len(suffixes) - 1)
index = max(index, 0)
factor = 1024 ** index
return '%0.3f %s' % (float(size) / factor, suffixes[index])
def splitpath(path):
c = []
head, tail = os.path.split(path)
while tail:
c.insert(0, tail)
head, tail = os.path.split(head)
return c
def getRelativePath(basepath, path):
basepath = splitpath(os.path.abspath(basepath))
path = splitpath(os.path.abspath(path))
afterCommon = False
for c in basepath:
if afterCommon or path[0] != c:
path.insert(0, os.path.pardir)
afterCommon = True
else:
del path[0]
return os.path.join(*path)
def getQueryParams(url):
query = urlparse.urlsplit(url)[3]
out.write('Extracting query parameters from %r (%r)...' % (url, query), 3)
return cgi.parse_qs(query)
|