2
# This Source Code Form is subject to the terms of the Mozilla Public
3
# License, v. 2.0. If a copy of the MPL was not distributed with this
4
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
6
from __future__ import with_statement
7
import glob, logging, os, platform, shutil, subprocess, sys, tempfile, urllib2, zipfile
9
from urlparse import urlparse
24
# Map of debugging programs to information about them, like default arguments
25
# and whether or not they are interactive.
27
# gdb requires that you supply the '--args' flag in order to pass arguments
28
# after the executable name to the executable.
34
# valgrind doesn't explain much about leaks unless you set the
35
# '--leak-check=full' flag.
38
"args": "--leak-check=full"
42
class ZipFileReader(object):
44
Class to read zip files in Python 2.5 and later. Limited to only what we
48
def __init__(self, filename):
49
self._zipfile = zipfile.ZipFile(filename, "r")
54
def _getnormalizedpath(self, path):
56
Gets a normalized path from 'path' (or the current working directory if
57
'path' is None). Also asserts that the path exists.
61
path = os.path.normpath(os.path.expanduser(path))
62
assert os.path.isdir(path)
65
def _extractname(self, name, path):
67
Extracts a file with the given name from the zip file to the given path.
68
Also creates any directories needed along the way.
70
filename = os.path.normpath(os.path.join(path, name))
71
if name.endswith("/"):
74
path = os.path.split(filename)[0]
75
if not os.path.isdir(path):
77
with open(filename, "wb") as dest:
78
dest.write(self._zipfile.read(name))
81
return self._zipfile.namelist()
84
return self._zipfile.read(name)
86
def extract(self, name, path = None):
87
if hasattr(self._zipfile, "extract"):
88
return self._zipfile.extract(name, path)
90
# This will throw if name is not part of the zip file.
91
self._zipfile.getinfo(name)
93
self._extractname(name, self._getnormalizedpath(path))
95
def extractall(self, path = None):
96
if hasattr(self._zipfile, "extractall"):
97
return self._zipfile.extractall(path)
99
path = self._getnormalizedpath(path)
101
for name in self._zipfile.namelist():
102
self._extractname(name, path)
104
log = logging.getLogger()
107
"""Return True if |thing| looks like a URL."""
108
# We want to download URLs like http://... but not Windows paths like c:\...
109
return len(urlparse(thing).scheme) >= 2
111
def addCommonOptions(parser, defaults={}):
112
parser.add_option("--xre-path",
113
action = "store", type = "string", dest = "xrePath",
114
# individual scripts will set a sane default
116
help = "absolute path to directory containing XRE (probably xulrunner)")
117
if 'SYMBOLS_PATH' not in defaults:
118
defaults['SYMBOLS_PATH'] = None
119
parser.add_option("--symbols-path",
120
action = "store", type = "string", dest = "symbolsPath",
121
default = defaults['SYMBOLS_PATH'],
122
help = "absolute path to directory containing breakpad symbols, or the URL of a zip file containing symbols")
123
parser.add_option("--debugger",
124
action = "store", dest = "debugger",
125
help = "use the given debugger to launch the application")
126
parser.add_option("--debugger-args",
127
action = "store", dest = "debuggerArgs",
128
help = "pass the given args to the debugger _before_ "
129
"the application on the command line")
130
parser.add_option("--debugger-interactive",
131
action = "store_true", dest = "debuggerInteractive",
132
help = "prevents the test harness from redirecting "
133
"stdout and stderr for interactive debuggers")
135
def checkForCrashes(dumpDir, symbolsPath, testName=None):
136
stackwalkPath = os.environ.get('MINIDUMP_STACKWALK', None)
137
# try to get the caller's filename if no test name is given
140
testName = os.path.basename(sys._getframe(1).f_code.co_filename)
144
# Check preconditions
145
dumps = glob.glob(os.path.join(dumpDir, '*.dmp'))
150
removeSymbolsPath = False
152
# If our symbols are at a remote URL, download them now
153
if isURL(symbolsPath):
154
print "Downloading symbols from: " + symbolsPath
155
removeSymbolsPath = True
156
# Get the symbols and write them to a temporary zipfile
157
data = urllib2.urlopen(symbolsPath)
158
symbolsFile = tempfile.TemporaryFile()
159
symbolsFile.write(data.read())
160
# extract symbols to a temporary directory (which we'll delete after
161
# processing all crashes)
162
symbolsPath = tempfile.mkdtemp()
163
zfile = ZipFileReader(symbolsFile)
164
zfile.extractall(symbolsPath)
168
log.info("PROCESS-CRASH | %s | application crashed (minidump found)", testName)
169
print "Crash dump filename: " + d
170
if symbolsPath and stackwalkPath and os.path.exists(stackwalkPath):
171
# run minidump stackwalk
172
p = subprocess.Popen([stackwalkPath, d, symbolsPath],
173
stdout=subprocess.PIPE,
174
stderr=subprocess.PIPE)
175
(out, err) = p.communicate()
177
# minidump_stackwalk is chatty, so ignore stderr when it succeeds.
180
print "stderr from minidump_stackwalk:"
182
if p.returncode != 0:
183
print "minidump_stackwalk exited with return code %d" % p.returncode
186
print "No symbols path given, can't process dump."
187
if not stackwalkPath:
188
print "MINIDUMP_STACKWALK not set, can't process dump."
189
elif stackwalkPath and not os.path.exists(stackwalkPath):
190
print "MINIDUMP_STACKWALK binary not found: %s" % stackwalkPath
191
dumpSavePath = os.environ.get('MINIDUMP_SAVE_PATH', None)
193
shutil.move(d, dumpSavePath)
194
print "Saved dump as %s" % os.path.join(dumpSavePath,
198
extra = os.path.splitext(d)[0] + ".extra"
199
if os.path.exists(extra):
203
if removeSymbolsPath:
204
shutil.rmtree(symbolsPath)
208
def getFullPath(directory, path):
209
"Get an absolute path relative to 'directory'."
210
return os.path.normpath(os.path.join(directory, os.path.expanduser(path)))
212
def searchPath(directory, path):
213
"Go one step beyond getFullPath and try the various folders in PATH"
214
# Try looking in the current working directory first.
215
newpath = getFullPath(directory, path)
216
if os.path.isfile(newpath):
219
# At this point we have to fail if a directory was given (to prevent cases
220
# like './gdb' from matching '/usr/bin/./gdb').
221
if not os.path.dirname(path):
222
for dir in os.environ['PATH'].split(os.pathsep):
223
newpath = os.path.join(dir, path)
224
if os.path.isfile(newpath):
228
def getDebuggerInfo(directory, debugger, debuggerArgs, debuggerInteractive = False):
233
debuggerPath = searchPath(directory, debugger)
235
print "Error: Path %s doesn't exist." % debugger
238
debuggerName = os.path.basename(debuggerPath).lower()
240
def getDebuggerInfo(type, default):
241
if debuggerName in DEBUGGER_INFO and type in DEBUGGER_INFO[debuggerName]:
242
return DEBUGGER_INFO[debuggerName][type]
246
"path": debuggerPath,
247
"interactive" : getDebuggerInfo("interactive", False),
248
"args": getDebuggerInfo("args", "").split()
252
debuggerInfo["args"] = debuggerArgs.split()
253
if debuggerInteractive:
254
debuggerInfo["interactive"] = debuggerInteractive
259
def dumpLeakLog(leakLogFile, filter = False):
260
"""Process the leak log, without parsing it.
262
Use this function if you want the raw log only.
263
Use it preferably with the |XPCOM_MEM_LEAK_LOG| environment variable.
266
# Don't warn (nor "info") if the log file is not there.
267
if not os.path.exists(leakLogFile):
270
leaks = open(leakLogFile, "r")
271
leakReport = leaks.read()
274
# Only |XPCOM_MEM_LEAK_LOG| reports can be actually filtered out.
275
# Only check whether an actual leak was reported.
276
if filter and not "0 TOTAL " in leakReport:
279
# Simply copy the log.
280
log.info(leakReport.rstrip("\n"))
282
def processSingleLeakFile(leakLogFileName, PID, processType, leakThreshold):
283
"""Process a single leak log, corresponding to the specified
284
process PID and type.
287
# Per-Inst Leaked Total Rem ...
288
# 0 TOTAL 17 192 419115886 2 ...
289
# 833 nsTimerImpl 60 120 24726 2 ...
290
lineRe = re.compile(r"^\s*\d+\s+(?P<name>\S+)\s+"
291
r"(?P<size>-?\d+)\s+(?P<bytesLeaked>-?\d+)\s+"
292
r"-?\d+\s+(?P<numLeaked>-?\d+)")
295
if PID and processType:
296
processString = "| %s process %s " % (processType, PID)
297
leaks = open(leakLogFileName, "r")
299
matches = lineRe.match(line)
301
int(matches.group("numLeaked")) == 0 and
302
matches.group("name") != "TOTAL"):
304
log.info(line.rstrip())
307
leaks = open(leakLogFileName, "r")
309
crashedOnPurpose = False
313
if line.find("purposefully crash") > -1:
314
crashedOnPurpose = True
315
matches = lineRe.match(line)
318
name = matches.group("name")
319
size = int(matches.group("size"))
320
bytesLeaked = int(matches.group("bytesLeaked"))
321
numLeaked = int(matches.group("numLeaked"))
322
if size < 0 or bytesLeaked < 0 or numLeaked < 0:
323
log.info("TEST-UNEXPECTED-FAIL %s| automationutils.processLeakLog() | negative leaks caught!" %
327
elif name == "TOTAL":
330
if bytesLeaked < 0 or bytesLeaked > leakThreshold:
331
prefix = "TEST-UNEXPECTED-FAIL"
332
leakLog = "TEST-UNEXPECTED-FAIL %s| automationutils.processLeakLog() | leaked" \
333
" %d bytes during test execution" % (processString, bytesLeaked)
334
elif bytesLeaked > 0:
335
leakLog = "TEST-PASS %s| automationutils.processLeakLog() | WARNING leaked" \
336
" %d bytes during test execution" % (processString, bytesLeaked)
338
leakLog = "TEST-PASS %s| automationutils.processLeakLog() | no leaks detected!" \
340
# Remind the threshold if it is not 0, which is the default/goal.
341
if leakThreshold != 0:
342
leakLog += " (threshold set at %d bytes)" % leakThreshold
343
# Log the information.
348
instance = "instances"
349
rest = " each (%s bytes total)" % matches.group("bytesLeaked")
351
instance = "instance"
355
# don't spam brief tinderbox logs with tons of leak output
357
log.info("%(prefix)s %(process)s| automationutils.processLeakLog() | leaked %(numLeaked)d %(instance)s of %(name)s "
358
"with size %(size)s bytes%(rest)s" %
360
"process": processString,
361
"numLeaked": numLeaked,
362
"instance": instance,
364
"size": matches.group("size"),
368
log.info("INFO | automationutils.processLeakLog() | process %s was " \
369
"deliberately crashed and thus has no leak log" % PID)
371
log.info("TEST-UNEXPECTED-FAIL %s| automationutils.processLeakLog() | missing output line for total leaks!" %
376
def processLeakLog(leakLogFile, leakThreshold = 0):
377
"""Process the leak log, including separate leak logs created
380
Use this function if you want an additional PASS/FAIL summary.
381
It must be used with the |XPCOM_MEM_BLOAT_LOG| environment variable.
384
if not os.path.exists(leakLogFile):
385
log.info("WARNING | automationutils.processLeakLog() | refcount logging is off, so leaks can't be detected!")
388
(leakLogFileDir, leakFileBase) = os.path.split(leakLogFile)
389
pidRegExp = re.compile(r".*?_([a-z]*)_pid(\d*)$")
390
if leakFileBase[-4:] == ".log":
391
leakFileBase = leakFileBase[:-4]
392
pidRegExp = re.compile(r".*?_([a-z]*)_pid(\d*).log$")
394
for fileName in os.listdir(leakLogFileDir):
395
if fileName.find(leakFileBase) != -1:
396
thisFile = os.path.join(leakLogFileDir, fileName)
399
m = pidRegExp.search(fileName)
401
processType = m.group(1)
402
processPID = m.group(2)
403
processSingleLeakFile(thisFile, processPID, processType, leakThreshold)
405
def replaceBackSlashes(input):
406
return input.replace('\\', '/')
408
def wrapCommand(cmd):
410
If running on OS X 10.5 or older, wrap |cmd| so that it will
411
be executed as an i386 binary, in case it's a 32-bit/64-bit universal
414
if platform.system() == "Darwin" and \
415
hasattr(platform, 'mac_ver') and \
416
platform.mac_ver()[0][:4] < '10.6':
417
return ["arch", "-arch", "i386"] + cmd
418
# otherwise just execute the command normally