1
/* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
3
This program is free software; you can redistribute it and/or modify
4
it under the terms of the GNU General Public License as published by
5
the Free Software Foundation; version 2 of the License.
7
This program is distributed in the hope that it will be useful,
8
but WITHOUT ANY WARRANTY; without even the implied warranty of
9
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
GNU General Public License for more details.
12
You should have received a copy of the GNU General Public License
13
along with this program; if not, write to the Free Software
14
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
16
Library for providing TAP support for testing C and C++ was written
17
by Mats Kindahl <mats@mysql.com>.
22
#include "my_global.h"
23
#include "my_stacktrace.h"
32
Visual Studio 2003 does not know vsnprintf but knows _vsnprintf.
33
We don't put this #define elsewhere because we prefer my_vsnprintf
34
everywhere instead, except when linking with libmysys is not
35
desirable - the case here.
37
#if defined(_MSC_VER) && ( _MSC_VER == 1310 )
38
#define vsnprintf _vsnprintf
42
@defgroup MyTAP_Internal MyTAP Internals
44
Internal functions and data structures for the MyTAP implementation.
50
Data structure containing all information about the test suite.
52
@ingroup MyTAP_Internal
54
static TEST_DATA g_test = { NO_PLAN, 0, 0, "" };
57
Output stream for test report message.
59
The macro is just a temporary solution.
61
@ingroup MyTAP_Internal
66
Emit the beginning of a test line, that is: "(not) ok", test number,
69
To emit the directive, use the emit_dir() function
71
@ingroup MyTAP_Internal
75
@param pass 'true' if test passed, 'false' otherwise
76
@param fmt Description of test in printf() format.
77
@param ap Vararg list for the description string above.
80
vemit_tap(int pass, char const *fmt, va_list ap)
82
fprintf(tapout, "%sok %d%s",
85
(fmt && *fmt) ? " - " : "");
87
vfprintf(tapout, fmt, ap);
95
TAP directives are comments after that have the form:
98
ok 1 # skip reason for skipping
99
not ok 2 # todo some text explaining what remains
102
@ingroup MyTAP_Internal
104
@param dir Directive as a string
105
@param why Explanation string
108
emit_dir(const char *dir, const char *why)
110
fprintf(tapout, " # %s %s", dir, why);
116
Emit a newline to the TAP output stream.
118
@ingroup MyTAP_Internal
123
fprintf(tapout, "\n");
128
handle_core_signal(int signo)
130
/* BAIL_OUT("Signal %d thrown", signo); */
131
#ifdef HAVE_STACKTRACE
132
fprintf(stderr, "Signal %d thrown, attempting backtrace.\n", signo);
133
my_print_stacktrace(NULL, 0);
135
signal(signo, SIG_DFL);
141
BAIL_OUT(char const *fmt, ...)
145
fprintf(tapout, "Bail out! ");
146
vfprintf(tapout, fmt, ap);
154
diag(char const *fmt, ...)
158
fprintf(tapout, "# ");
159
vfprintf(tapout, fmt, ap);
164
typedef struct signal_entry {
166
void (*handler)(int);
169
static signal_entry install_signal[]= {
170
{ SIGQUIT, handle_core_signal },
171
{ SIGILL, handle_core_signal },
172
{ SIGABRT, handle_core_signal },
173
{ SIGFPE, handle_core_signal },
174
{ SIGSEGV, handle_core_signal }
176
, { SIGBUS, handle_core_signal }
179
, { SIGXCPU, handle_core_signal }
182
, { SIGXFSZ, handle_core_signal }
185
, { SIGSYS, handle_core_signal }
188
, { SIGTRAP, handle_core_signal }
192
int skip_big_tests= 1;
195
plan(int const count)
197
char *config= getenv("MYTAP_CONFIG");
201
skip_big_tests= strcmp(config, "big");
204
Install signal handler
207
for (i= 0; i < sizeof(install_signal)/sizeof(*install_signal); ++i)
208
signal(install_signal[i].signo, install_signal[i].handler);
218
fprintf(tapout, "1..%d\n", count);
227
skip_all(char const *reason, ...)
230
va_start(ap, reason);
231
fprintf(tapout, "1..0 # skip ");
232
vfprintf(tapout, reason, ap);
239
ok(int const pass, char const *fmt, ...)
244
if (!pass && *g_test.todo == '\0')
247
vemit_tap(pass, fmt, ap);
249
if (*g_test.todo != '\0')
250
emit_dir("todo", g_test.todo);
259
memset(&ap, 0, sizeof(ap));
261
if (!pass && *g_test.todo == '\0')
264
vemit_tap(pass, NULL, ap);
266
if (*g_test.todo != '\0')
267
emit_dir("todo", g_test.todo);
273
skip(int how_many, char const *fmt, ...)
280
vsnprintf(reason, sizeof(reason), fmt, ap);
286
while (how_many-- > 0)
289
memset((char*) &ap, 0, sizeof(ap)); /* Keep compiler happy */
290
vemit_tap(1, NULL, ap);
291
emit_dir("skip", reason);
297
todo_start(char const *message, ...)
300
va_start(ap, message);
301
vsnprintf(g_test.todo, sizeof(g_test.todo), message, ap);
313
If there were no plan, we write one last instead.
315
if (g_test.plan == NO_PLAN)
318
if (g_test.plan != g_test.last)
320
diag("%d tests planned but%s %d executed",
321
g_test.plan, (g_test.plan > g_test.last ? " only" : ""), g_test.last);
325
if (g_test.failed > 0)
327
diag("Failed %d tests!", g_test.failed);
335
@mainpage Testing C and C++ using MyTAP
337
@section IntroSec Introduction
339
Unit tests are used to test individual components of a system. In
340
contrast, functional tests usually test the entire system. The
341
rationale is that each component should be correct if the system is
342
to be correct. Unit tests are usually small pieces of code that
343
tests an individual function, class, a module, or other unit of the
346
Observe that a correctly functioning system can be built from
347
"faulty" components. The problem with this approach is that as the
348
system evolves, the bugs surface in unexpected ways, making
351
The advantages of using unit tests to test components of the system
354
- The unit tests can make a more thorough testing than the
355
functional tests by testing correctness even for pathological use
356
(which shouldn't be present in the system). This increases the
357
overall robustness of the system and makes maintenance easier.
359
- It is easier and faster to find problems with a malfunctioning
360
component than to find problems in a malfunctioning system. This
361
shortens the compile-run-edit cycle and therefore improves the
362
overall performance of development.
364
- The component has to support at least two uses: in the system and
365
in a unit test. This leads to more generic and stable interfaces
366
and in addition promotes the development of reusable components.
368
For example, the following are typical functional tests:
369
- Does transactions work according to specifications?
370
- Can we connect a client to the server and execute statements?
372
In contrast, the following are typical unit tests:
374
- Can the 'String' class handle a specified list of character sets?
375
- Does all operations for 'my_bitmap' produce the correct result?
376
- Does all the NIST test vectors for the AES implementation encrypt
380
@section UnitTest Writing unit tests
382
The purpose of writing unit tests is to use them to drive component
383
development towards a solution that passes the tests. This means that the
384
unit tests has to be as complete as possible, testing at least:
392
@subsection NormalSubSec Normal input
394
This is to test that the component have the expected behaviour.
395
This is just plain simple: test that it works. For example, test
396
that you can unpack what you packed, adding gives the sum, pincing
397
the duck makes it quack.
399
This is what everybody does when they write tests.
402
@subsection BorderlineTests Borderline cases
404
If you have a size anywhere for your component, does it work for
405
size 1? Size 0? Sizes close to <code>UINT_MAX</code>?
407
It might not be sensible to have a size 0, so in this case it is
408
not a borderline case, but rather a faulty input (see @ref
412
@subsection FaultyInputTests Faulty input
414
Does your bitmap handle 0 bits size? Well, it might not be designed
415
for it, but is should <em>not</em> crash the application, but
416
rather produce an error. This is called defensive programming.
418
Unfortunately, adding checks for values that should just not be
419
entered at all is not always practical: the checks cost cycles and
420
might cost more than it's worth. For example, some functions are
421
designed so that you may not give it a null pointer. In those
422
cases it's not sensible to pass it <code>NULL</code> just to see it
425
Since every experienced programmer add an <code>assert()</code> to
426
ensure that you get a proper failure for the debug builds when a
427
null pointer passed (you add asserts too, right?), you will in this
428
case instead have a controlled (early) crash in the debug build.
431
@subsection ErrorHandlingTests Error handling
433
This is testing that the errors your component is designed to give
434
actually are produced. For example, testing that trying to open a
435
non-existing file produces a sensible error code.
438
@subsection BadEnvironmentTests Environment
440
Sometimes, modules has to behave well even when the environment
441
fails to work correctly. Typical examples are when the computer is
442
out of dynamic memory or when the disk is full. You can emulate
443
this by replacing, e.g., <code>malloc()</code> with your own
444
version that will work for a while, but then fail. Some things are
445
worth to keep in mind here:
447
- Make sure to make the function fail deterministically, so that
448
you really can repeat the test.
450
- Make sure that it doesn't just fail immediately. The unit might
451
have checks for the first case, but might actually fail some time
455
@section UnitTest How to structure a unit test
457
In this section we will give some advice on how to structure the
458
unit tests to make the development run smoothly. The basic
459
structure of a test is:
466
@subsection TestPlanning Plan the test
468
Planning the test means telling how many tests there are. In the
469
event that one of the tests causes a crash, it is then possible to
470
see that there are fewer tests than expected, and print a proper
473
To plan a test, use the @c plan() function in the following manner:
476
int main(int argc, char *argv[])
485
If you don't call the @c plan() function, the number of tests
486
executed will be printed at the end. This is intended to be used
487
while developing the unit and you are constantly adding tests. It
488
is not indented to be used after the unit has been released.
491
@subsection TestRunning Execute the test
493
To report the status of a test, the @c ok() function is used in the
497
int main(int argc, char *argv[])
500
ok(ducks == paddling_ducks,
501
"%d ducks did not paddle", ducks - paddling_ducks);
508
This will print a test result line on the standard output in TAP
509
format, which allows TAP handling frameworks (like Test::Harness)
510
to parse the status of the test.
512
@subsection TestReport Report the result of the test
514
At the end, a complete test report should be written, with some
515
statistics. If the test returns EXIT_SUCCESS, all tests were
516
successfull, otherwise at least one test failed.
518
To get a TAP complient output and exit status, report the exit
519
status in the following manner:
522
int main(int argc, char *argv[])
525
ok(ducks == paddling_ducks,
526
"%d ducks did not paddle", ducks - paddling_ducks);
530
return exit_status();
534
@section DontDoThis Ways to not do unit testing
536
In this section, we'll go through some quite common ways to write
537
tests that are <em>not</em> a good idea.
539
@subsection BreadthFirstTests Doing breadth-first testing
541
If you're writing a library with several functions, don't test all
542
functions using size 1, then all functions using size 2, etc. If a
543
test for size 42 fails, you have no easy way of tracking down why
546
It is better to concentrate on getting one function to work at a
547
time, which means that you test each function for all sizes that
548
you think is reasonable. Then you continue with the next function,
549
doing the same. This is usually also the way that a library is
550
developed (one function at a time) so stick to testing that is
551
appropriate for now the unit is developed.
553
@subsection JustToBeSafeTest Writing unnecessarily large tests
555
Don't write tests that use parameters in the range 1-1024 unless
556
you have a very good reason to belive that the component will
557
succeed for 562 but fail for 564 (the numbers picked are just
560
It is very common to write extensive tests "just to be safe."
561
Having a test suite with a lot of values might give you a warm
562
fuzzy feeling, but it doesn't really help you find the bugs. Good
563
tests fail; seriously, if you write a test that you expect to
564
succeed, you don't need to write it. If you think that it
565
<em>might</em> fail, <em>then</em> you should write it.
567
Don't take this as an excuse to avoid writing any tests at all
568
"since I make no mistakes" (when it comes to this, there are two
569
kinds of people: those who admit they make mistakes, and those who
570
don't); rather, this means that there is no reason to test that
571
using a buffer with size 100 works when you have a test for buffer
574
The drawback is that the test suite takes longer to run, for little
575
or no benefit. It is acceptable to do a exhaustive test if it
576
doesn't take too long to run and it is quite common to do an
577
exhaustive test of a function for a small set of values.
578
Use your judgment to decide what is excessive: your milage may
585
This is an simple example of how to write a test using the
586
library. The output of this program is:
590
# Testing basic functions
594
The basic structure is: plan the number of test points using the
595
plan() function, perform the test and write out the result of each
596
test point using the ok() function, print out a diagnostics message
597
using diag(), and report the result of the test by calling the
598
exit_status() function. Observe that this test does excessive
599
testing (see @ref JustToBeSafeTest), but the test point doesn't
606
This example demonstrates how to use the <code>todo_start()</code>
607
and <code>todo_end()</code> function to mark a sequence of tests to
608
be done. Observe that the tests are assumed to fail: if any test
609
succeeds, it is considered a "bonus".
615
This is an example of how the <code>SKIP_BLOCK_IF</code> can be
616
used to skip a predetermined number of tests. Observe that the
617
macro actually skips the following statement, but it's not sensible
618
to use anything than a block.
622
@example skip_all.t.c
624
Sometimes, you skip an entire test because it's testing a feature
625
that doesn't exist on the system that you're testing. To skip an
626
entire test, use the <code>skip_all()</code> function according to