82
82
Image Test Case Class.
88
self.supportedmetrics = ("MAE", "MSE", "PSE", "PSNR","RMSE", "none")
89
self.scratchDir = config.scratchDir
92
# Get the ImageMagick version by parsing its '-version' output.
93
IMVer = os.popen('compare -version').readline()
94
IMVer = re.match('Version: ImageMagick ([0-9\.]+) .*', IMVer)
96
IMVer = IMVer.groups()[0]
97
TCImage.IMVersion = IMVer
99
raise DependencyNotFoundError, "ImageMagick"
101
# Use ImageMagick to compare 2 files
102
def compare(self, label, baseline, undertest, dfile='default', metric='none', threshold=0.0):
104
Calls ImageMagick's "compare" program. Default compares are based on
105
size but metric based comparisons are also supported with a threshold
106
determining pass/fail criteria.
84
def compare(self, label, baseline, undertest):
85
for _file in (baseline, undertest):
86
if type(_file) is not unicode and type(_file) is not str:
87
raise TypeError("Need filenames!")
108
88
self.label = label.strip()
109
89
self.baseline = baseline.strip()
110
90
self.undertest = undertest.strip()
111
self.difference = 0.0
112
self.metric = metric.strip()
113
self.threshold = threshold
115
# Create a default filename and flag it for deletion
116
if dfile == "default":
118
# Remove all whitespace from the label since IM chokes on it
119
splabel = label.split(" ")
120
label = "".join(splabel)
121
self.dfile = self.scratchDir + x.fileStamp(label)
123
else: # or use the supplied one with no deletion
127
# Check to see if the metric type is supported
128
if self.metric in self.supportedmetrics:
129
# This is a bit convoluted and will be until IM completes
130
# implementation of command line chaining. This should be put into
131
# a try also munge together our IM call chain, if we're not doing
133
if self.metric == "none":
134
# Build the comparison string. Redirect STDERR to STDOUT;
135
# IM writes to STDERR and cmd reads STDOUT
136
cmd = ("compare " + self.baseline + " " + self.undertest + " " + self.dfile + " " + "2>&1")
137
# os.popen returns a list; if it is empty then we have passed
138
answer = os.popen(cmd).readlines()
140
self.result = {self.label: "Passed - Images are the same size"}
142
fanswer = answer[0].strip()
143
self.result = {self.label: "Failed - " + fanswer}
144
TC.logger.log(self.result)
146
else: # otherwise run the metric code
148
cmd = ("compare -metric " + self.metric + " " + self.baseline + " " + self.undertest + " " + self.dfile + " " + "2>&1")
149
answer = os.popen(cmd).readlines()
151
# We need to check if the metric comparison failed. Unfortunately we
152
# can only tell this by checking the length of the output of the
153
# command. More unfortunately, ImageMagic changed the length of the
154
# output at version 6.2.4, so we have to work around that.
156
IMVersion = TCImage.IMVersion
157
if IMVersion <= '6.2.3' and len(answer) == 1: metricFailed = False
158
if IMVersion >= '6.2.4' and len(answer) != 1: metricFailed = False
161
self.result = {self.label: "Failed - " + fanswer}
162
else: # grab the metric from answer and convert it to a number
163
fanswer = answer[0].strip()
164
fanswer = fanswer.split(" ")
166
fanswer = float(fanswer)
168
if fanswer == float("inf"): #same under PSNR returns inf dB:
169
self.result = {self.label: "Passed - " + "compare results: " + str(fanswer) + " dB"}
170
elif fanswer > self.threshold:
171
excess = fanswer - self.threshold
172
self.result = {self.label: "Failed - " + "compare result exceeds threshold by: " + str(excess) + " dB"}
174
under = self.threshold - fanswer
175
self.result = {self.label: "Passed - " + "compare results under threshold by: " + str(under) + " dB"}
176
TC.logger.log(self.result)
179
# delete the composite image file if self.dfile is default
180
if self.deldfile == 1:
182
os.remove(self.dfile)
184
debugLogger.log("Could not delete tempfile " + self.dfile)
186
else: # unsupported metric given
187
self.result = {self.label: "Failed - " + self.metric + " is not in the list of supported metrics"}
188
TC.logger.log(self.result)
91
diffName = TimeStamp().fileStamp("diff") + ".png"
92
self.diff = os.path.normpath(
93
os.path.sep.join((config.scratchDir, diffName)))
95
self.baseImage = Image.open(self.baseline)
96
self.testImage = Image.open(self.undertest)
98
if self.baseImage.size != self.testImage.size:
99
self.result = {self.label: "Failed - images are different sizes"}
102
self.diffImage = ImageChops.difference(self.baseImage,
104
self.diffImage.save(self.diff)
106
for stat in ('stddev', 'mean', 'sum2'):
107
for item in getattr(ImageStat.Stat(self.diffImage), stat):
109
self.result = {self.label: "Failed - see %s" %
113
except StopIteration:
116
if result: self.result = {self.label: "Passed"}
118
TC.logger.log(self.result)
193
122
class TCNumber(TC):
237
166
TC.logger.log(self.result)
238
167
return self.result
170
def __init__(self): pass
172
def compare(self, label, _bool):
174
If _bool is True, pass.
175
If _bool is False, fail.
177
if type(_bool) is not bool: raise TypeError
178
if _bool: result = {label: "Passed"}
179
else: result = {label: "Failed"}
180
TC.logger.log(result)
182
from tree import Node
184
def __init__(self): pass
186
def compare(self, label, baseline, undertest):
188
If baseline is None, simply check that undertest is a Node.
189
If baseline is a Node, check that it is equal to undertest.
191
if baseline is not None and not isinstance(baseline, Node):
194
if not isinstance(undertest, Node):
195
result = {label: "Failed - %s is not a Node" % undertest}
196
elif baseline is None:
197
result = {label: "Passed - %s is a Node" % undertest}
198
elif isinstance(baseline, Node):
199
if baseline == undertest:
200
result = {label: "Passed - %s == %s" % (baseline, undertest)}
201
else: result = {label: "Failed - %s != %s" % (baseline, undertest)}
202
TC.logger.log(result)
241
205
if __name__ == '__main__':
242
206
# import the test modules
404
367
# Fire off the compare
405
368
result = case3.compare(label, baseline, undertest)
407
# Print the result Should be label - Failied compare:Images differ in size
370
# Print the result Should be label - Failed
410
# Image compare pass with the metrics option
373
# Compare the same image
411
374
label = "unit test case 3.2"
412
baseline = "../examples/data/20w.png"
413
undertest = "../examples/data/20b.png"
415
metrics = ("MAE", "MSE", "PSE", "PSNR"," RMSE")
417
for i in range(len(metrics)):
418
result = case3.compare(label, baseline, undertest, metric=metrics[i])
422
# Image compare fail metrics
423
label = "unit test case 3.3"
424
baseline = "../examples/data/10w.png"
425
undertest = "../examples/data/10b.png"
427
metrics = ("MAE", "MSE", "PSE", "PSNR"," RMSE")
429
for i in range(len(metrics)):
430
result = case3.compare(label, baseline, undertest, metric=metrics[i])
434
# Image comapre threshold metrics - only PNSR should pass
435
label = "unit test case 3.4"
436
baseline = "../examples/data/10w.png"
437
undertest = "../examples/data/10b.png"
439
metrics = ("MAE", "MSE", "PSE", "PSNR"," RMSE")
442
for i in range(len(metrics)):
443
result = case3.compare(label, baseline, undertest, metric=metrics[i], threshold=bound)
448
label = "unit test case 3.5"
449
baseline = "../examples/data/10w.png"
450
undertest = "../examples/data/10b.png"
454
result = case3.compare(label, baseline, undertest, metric=metrics)
456
# Should be failed - metric unsupported
375
baseline = "../examples/data/10w.png"
376
undertest = "../examples/data/10w.png"
379
# Fire off the compare
380
result = case3.compare(label, baseline, undertest)
382
# Print the result Should be label - Passed
459
385
# Number comparison tests