2
* linux/drivers/mmc/card/mmc_test.c
4
* Copyright 2007-2008 Pierre Ossman
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License as published by
8
* the Free Software Foundation; either version 2 of the License, or (at
9
* your option) any later version.
12
#include <linux/mmc/core.h>
13
#include <linux/mmc/card.h>
14
#include <linux/mmc/host.h>
15
#include <linux/mmc/mmc.h>
16
#include <linux/slab.h>
18
#include <linux/scatterlist.h>
19
#include <linux/swap.h> /* For nr_free_buffer_pages() */
20
#include <linux/list.h>
22
#include <linux/debugfs.h>
23
#include <linux/uaccess.h>
24
#include <linux/seq_file.h>
25
#include <linux/module.h>
29
#define RESULT_UNSUP_HOST 2
30
#define RESULT_UNSUP_CARD 3
32
#define BUFFER_ORDER 2
33
#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
36
* Limit the test area size to the maximum MMC HC erase group size. Note that
37
* the maximum SD allocation unit size is just 4MiB.
39
#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
42
* struct mmc_test_pages - pages allocated by 'alloc_pages()'.
43
* @page: first page in the allocation
44
* @order: order of the number of pages allocated
46
struct mmc_test_pages {
52
* struct mmc_test_mem - allocated memory.
53
* @arr: array of allocations
54
* @cnt: number of allocations
57
struct mmc_test_pages *arr;
62
* struct mmc_test_area - information for performance tests.
63
* @max_sz: test area size (in bytes)
64
* @dev_addr: address on card at which to do performance tests
65
* @max_tfr: maximum transfer size allowed by driver (in bytes)
66
* @max_segs: maximum segments allowed by driver in scatterlist @sg
67
* @max_seg_sz: maximum segment size allowed by driver
68
* @blocks: number of (512 byte) blocks currently mapped by @sg
69
* @sg_len: length of currently mapped scatterlist @sg
70
* @mem: allocated memory
73
struct mmc_test_area {
75
unsigned int dev_addr;
77
unsigned int max_segs;
78
unsigned int max_seg_sz;
81
struct mmc_test_mem *mem;
82
struct scatterlist *sg;
86
* struct mmc_test_transfer_result - transfer results for performance tests.
87
* @link: double-linked list
88
* @count: amount of group of sectors to check
89
* @sectors: amount of sectors to check in one group
90
* @ts: time values of transfer
91
* @rate: calculated transfer rate
92
* @iops: I/O operations per second (times 100)
94
struct mmc_test_transfer_result {
95
struct list_head link;
104
* struct mmc_test_general_result - results for tests.
105
* @link: double-linked list
106
* @card: card under test
107
* @testcase: number of test case
108
* @result: result of test run
109
* @tr_lst: transfer measurements if any as mmc_test_transfer_result
111
struct mmc_test_general_result {
112
struct list_head link;
113
struct mmc_card *card;
116
struct list_head tr_lst;
120
* struct mmc_test_dbgfs_file - debugfs related file.
121
* @link: double-linked list
122
* @card: card under test
123
* @file: file created under debugfs
125
struct mmc_test_dbgfs_file {
126
struct list_head link;
127
struct mmc_card *card;
132
* struct mmc_test_card - test information.
133
* @card: card under test
134
* @scratch: transfer buffer
135
* @buffer: transfer buffer
136
* @highmem: buffer for highmem tests
137
* @area: information for performance tests
138
* @gr: pointer to results of current testcase
140
struct mmc_test_card {
141
struct mmc_card *card;
143
u8 scratch[BUFFER_SIZE];
145
#ifdef CONFIG_HIGHMEM
146
struct page *highmem;
148
struct mmc_test_area area;
149
struct mmc_test_general_result *gr;
152
enum mmc_test_prep_media {
153
MMC_TEST_PREP_NONE = 0,
154
MMC_TEST_PREP_WRITE_FULL = 1 << 0,
155
MMC_TEST_PREP_ERASE = 1 << 1,
158
struct mmc_test_multiple_rw {
159
unsigned int *sg_len;
164
bool do_nonblock_req;
165
enum mmc_test_prep_media prepare;
168
struct mmc_test_async_req {
169
struct mmc_async_req areq;
170
struct mmc_test_card *test;
173
/*******************************************************************/
174
/* General helper functions */
175
/*******************************************************************/
178
* Configure correct block size in card
180
static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
182
return mmc_set_blocklen(test->card, size);
186
* Fill in the mmc_request structure given a set of transfer parameters.
188
static void mmc_test_prepare_mrq(struct mmc_test_card *test,
189
struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
190
unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
192
BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
195
mrq->cmd->opcode = write ?
196
MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
198
mrq->cmd->opcode = write ?
199
MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
202
mrq->cmd->arg = dev_addr;
203
if (!mmc_card_blockaddr(test->card))
206
mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
211
mrq->stop->opcode = MMC_STOP_TRANSMISSION;
213
mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
216
mrq->data->blksz = blksz;
217
mrq->data->blocks = blocks;
218
mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
220
mrq->data->sg_len = sg_len;
222
mmc_set_data_timeout(mrq->data, test->card);
225
static int mmc_test_busy(struct mmc_command *cmd)
227
return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
228
(R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
232
* Wait for the card to finish the busy state
234
static int mmc_test_wait_busy(struct mmc_test_card *test)
237
struct mmc_command cmd = {0};
241
memset(&cmd, 0, sizeof(struct mmc_command));
243
cmd.opcode = MMC_SEND_STATUS;
244
cmd.arg = test->card->rca << 16;
245
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
247
ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
251
if (!busy && mmc_test_busy(&cmd)) {
253
if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
254
pr_info("%s: Warning: Host did not "
255
"wait for busy state to end.\n",
256
mmc_hostname(test->card->host));
258
} while (mmc_test_busy(&cmd));
264
* Transfer a single sector of kernel addressable data
266
static int mmc_test_buffer_transfer(struct mmc_test_card *test,
267
u8 *buffer, unsigned addr, unsigned blksz, int write)
271
struct mmc_request mrq = {0};
272
struct mmc_command cmd = {0};
273
struct mmc_command stop = {0};
274
struct mmc_data data = {0};
276
struct scatterlist sg;
282
sg_init_one(&sg, buffer, blksz);
284
mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
286
mmc_wait_for_req(test->card->host, &mrq);
293
ret = mmc_test_wait_busy(test);
300
static void mmc_test_free_mem(struct mmc_test_mem *mem)
305
__free_pages(mem->arr[mem->cnt].page,
306
mem->arr[mem->cnt].order);
312
* Allocate a lot of memory, preferably max_sz but at least min_sz. In case
313
* there isn't much memory do not exceed 1/16th total lowmem pages. Also do
314
* not exceed a maximum number of segments and try not to make segments much
315
* bigger than maximum segment size.
317
static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
318
unsigned long max_sz,
319
unsigned int max_segs,
320
unsigned int max_seg_sz)
322
unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
323
unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
324
unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
325
unsigned long page_cnt = 0;
326
unsigned long limit = nr_free_buffer_pages() >> 4;
327
struct mmc_test_mem *mem;
329
if (max_page_cnt > limit)
330
max_page_cnt = limit;
331
if (min_page_cnt > max_page_cnt)
332
min_page_cnt = max_page_cnt;
334
if (max_seg_page_cnt > max_page_cnt)
335
max_seg_page_cnt = max_page_cnt;
337
if (max_segs > max_page_cnt)
338
max_segs = max_page_cnt;
340
mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
344
mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
349
while (max_page_cnt) {
352
gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
355
order = get_order(max_seg_page_cnt << PAGE_SHIFT);
357
page = alloc_pages(flags, order);
363
if (page_cnt < min_page_cnt)
367
mem->arr[mem->cnt].page = page;
368
mem->arr[mem->cnt].order = order;
370
if (max_page_cnt <= (1UL << order))
372
max_page_cnt -= 1UL << order;
373
page_cnt += 1UL << order;
374
if (mem->cnt >= max_segs) {
375
if (page_cnt < min_page_cnt)
384
mmc_test_free_mem(mem);
389
* Map memory into a scatterlist. Optionally allow the same memory to be
390
* mapped more than once.
392
static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
393
struct scatterlist *sglist, int repeat,
394
unsigned int max_segs, unsigned int max_seg_sz,
395
unsigned int *sg_len, int min_sg_len)
397
struct scatterlist *sg = NULL;
399
unsigned long sz = size;
401
sg_init_table(sglist, max_segs);
402
if (min_sg_len > max_segs)
403
min_sg_len = max_segs;
407
for (i = 0; i < mem->cnt; i++) {
408
unsigned long len = PAGE_SIZE << mem->arr[i].order;
410
if (min_sg_len && (size / min_sg_len < len))
411
len = ALIGN(size / min_sg_len, 512);
414
if (len > max_seg_sz)
422
sg_set_page(sg, mem->arr[i].page, len, 0);
428
} while (sz && repeat);
440
* Map memory into a scatterlist so that no pages are contiguous. Allow the
441
* same memory to be mapped more than once.
443
static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
445
struct scatterlist *sglist,
446
unsigned int max_segs,
447
unsigned int max_seg_sz,
448
unsigned int *sg_len)
450
struct scatterlist *sg = NULL;
451
unsigned int i = mem->cnt, cnt;
453
void *base, *addr, *last_addr = NULL;
455
sg_init_table(sglist, max_segs);
459
base = page_address(mem->arr[--i].page);
460
cnt = 1 << mem->arr[i].order;
462
addr = base + PAGE_SIZE * --cnt;
463
if (last_addr && last_addr + PAGE_SIZE == addr)
467
if (len > max_seg_sz)
477
sg_set_page(sg, virt_to_page(addr), len, 0);
492
* Calculate transfer rate in bytes per second.
494
static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
504
while (ns > UINT_MAX) {
512
do_div(bytes, (uint32_t)ns);
518
* Save transfer results for future usage
520
static void mmc_test_save_transfer_result(struct mmc_test_card *test,
521
unsigned int count, unsigned int sectors, struct timespec ts,
522
unsigned int rate, unsigned int iops)
524
struct mmc_test_transfer_result *tr;
529
tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
534
tr->sectors = sectors;
539
list_add_tail(&tr->link, &test->gr->tr_lst);
543
* Print the transfer rate.
545
static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
546
struct timespec *ts1, struct timespec *ts2)
548
unsigned int rate, iops, sectors = bytes >> 9;
551
ts = timespec_sub(*ts2, *ts1);
553
rate = mmc_test_rate(bytes, &ts);
554
iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
556
pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
557
"seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
558
mmc_hostname(test->card->host), sectors, sectors >> 1,
559
(sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
560
(unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
561
iops / 100, iops % 100);
563
mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
567
* Print the average transfer rate.
569
static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
570
unsigned int count, struct timespec *ts1,
571
struct timespec *ts2)
573
unsigned int rate, iops, sectors = bytes >> 9;
574
uint64_t tot = bytes * count;
577
ts = timespec_sub(*ts2, *ts1);
579
rate = mmc_test_rate(tot, &ts);
580
iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
582
pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
583
"%lu.%09lu seconds (%u kB/s, %u KiB/s, "
584
"%u.%02u IOPS, sg_len %d)\n",
585
mmc_hostname(test->card->host), count, sectors, count,
586
sectors >> 1, (sectors & 1 ? ".5" : ""),
587
(unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
588
rate / 1000, rate / 1024, iops / 100, iops % 100,
591
mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
595
* Return the card size in sectors.
597
static unsigned int mmc_test_capacity(struct mmc_card *card)
599
if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
600
return card->ext_csd.sectors;
602
return card->csd.capacity << (card->csd.read_blkbits - 9);
605
/*******************************************************************/
606
/* Test preparation and cleanup */
607
/*******************************************************************/
610
* Fill the first couple of sectors of the card with known data
611
* so that bad reads/writes can be detected
613
static int __mmc_test_prepare(struct mmc_test_card *test, int write)
617
ret = mmc_test_set_blksize(test, 512);
622
memset(test->buffer, 0xDF, 512);
624
for (i = 0;i < 512;i++)
628
for (i = 0;i < BUFFER_SIZE / 512;i++) {
629
ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
637
static int mmc_test_prepare_write(struct mmc_test_card *test)
639
return __mmc_test_prepare(test, 1);
642
static int mmc_test_prepare_read(struct mmc_test_card *test)
644
return __mmc_test_prepare(test, 0);
647
static int mmc_test_cleanup(struct mmc_test_card *test)
651
ret = mmc_test_set_blksize(test, 512);
655
memset(test->buffer, 0, 512);
657
for (i = 0;i < BUFFER_SIZE / 512;i++) {
658
ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
666
/*******************************************************************/
667
/* Test execution helpers */
668
/*******************************************************************/
671
* Modifies the mmc_request to perform the "short transfer" tests
673
static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
674
struct mmc_request *mrq, int write)
676
BUG_ON(!mrq || !mrq->cmd || !mrq->data);
678
if (mrq->data->blocks > 1) {
679
mrq->cmd->opcode = write ?
680
MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
683
mrq->cmd->opcode = MMC_SEND_STATUS;
684
mrq->cmd->arg = test->card->rca << 16;
689
* Checks that a normal transfer didn't have any errors
691
static int mmc_test_check_result(struct mmc_test_card *test,
692
struct mmc_request *mrq)
696
BUG_ON(!mrq || !mrq->cmd || !mrq->data);
700
if (!ret && mrq->cmd->error)
701
ret = mrq->cmd->error;
702
if (!ret && mrq->data->error)
703
ret = mrq->data->error;
704
if (!ret && mrq->stop && mrq->stop->error)
705
ret = mrq->stop->error;
706
if (!ret && mrq->data->bytes_xfered !=
707
mrq->data->blocks * mrq->data->blksz)
711
ret = RESULT_UNSUP_HOST;
716
static int mmc_test_check_result_async(struct mmc_card *card,
717
struct mmc_async_req *areq)
719
struct mmc_test_async_req *test_async =
720
container_of(areq, struct mmc_test_async_req, areq);
722
mmc_test_wait_busy(test_async->test);
724
return mmc_test_check_result(test_async->test, areq->mrq);
728
* Checks that a "short transfer" behaved as expected
730
static int mmc_test_check_broken_result(struct mmc_test_card *test,
731
struct mmc_request *mrq)
735
BUG_ON(!mrq || !mrq->cmd || !mrq->data);
739
if (!ret && mrq->cmd->error)
740
ret = mrq->cmd->error;
741
if (!ret && mrq->data->error == 0)
743
if (!ret && mrq->data->error != -ETIMEDOUT)
744
ret = mrq->data->error;
745
if (!ret && mrq->stop && mrq->stop->error)
746
ret = mrq->stop->error;
747
if (mrq->data->blocks > 1) {
748
if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
751
if (!ret && mrq->data->bytes_xfered > 0)
756
ret = RESULT_UNSUP_HOST;
762
* Tests nonblock transfer with certain parameters
764
static void mmc_test_nonblock_reset(struct mmc_request *mrq,
765
struct mmc_command *cmd,
766
struct mmc_command *stop,
767
struct mmc_data *data)
769
memset(mrq, 0, sizeof(struct mmc_request));
770
memset(cmd, 0, sizeof(struct mmc_command));
771
memset(data, 0, sizeof(struct mmc_data));
772
memset(stop, 0, sizeof(struct mmc_command));
778
static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
779
struct scatterlist *sg, unsigned sg_len,
780
unsigned dev_addr, unsigned blocks,
781
unsigned blksz, int write, int count)
783
struct mmc_request mrq1;
784
struct mmc_command cmd1;
785
struct mmc_command stop1;
786
struct mmc_data data1;
788
struct mmc_request mrq2;
789
struct mmc_command cmd2;
790
struct mmc_command stop2;
791
struct mmc_data data2;
793
struct mmc_test_async_req test_areq[2];
794
struct mmc_async_req *done_areq;
795
struct mmc_async_req *cur_areq = &test_areq[0].areq;
796
struct mmc_async_req *other_areq = &test_areq[1].areq;
800
test_areq[0].test = test;
801
test_areq[1].test = test;
803
mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
804
mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
806
cur_areq->mrq = &mrq1;
807
cur_areq->err_check = mmc_test_check_result_async;
808
other_areq->mrq = &mrq2;
809
other_areq->err_check = mmc_test_check_result_async;
811
for (i = 0; i < count; i++) {
812
mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
813
blocks, blksz, write);
814
done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
816
if (ret || (!done_areq && i > 0))
820
if (done_areq->mrq == &mrq2)
821
mmc_test_nonblock_reset(&mrq2, &cmd2,
824
mmc_test_nonblock_reset(&mrq1, &cmd1,
827
done_areq = cur_areq;
828
cur_areq = other_areq;
829
other_areq = done_areq;
833
done_areq = mmc_start_req(test->card->host, NULL, &ret);
841
* Tests a basic transfer with certain parameters
843
static int mmc_test_simple_transfer(struct mmc_test_card *test,
844
struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
845
unsigned blocks, unsigned blksz, int write)
847
struct mmc_request mrq = {0};
848
struct mmc_command cmd = {0};
849
struct mmc_command stop = {0};
850
struct mmc_data data = {0};
856
mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
857
blocks, blksz, write);
859
mmc_wait_for_req(test->card->host, &mrq);
861
mmc_test_wait_busy(test);
863
return mmc_test_check_result(test, &mrq);
867
* Tests a transfer where the card will fail completely or partly
869
static int mmc_test_broken_transfer(struct mmc_test_card *test,
870
unsigned blocks, unsigned blksz, int write)
872
struct mmc_request mrq = {0};
873
struct mmc_command cmd = {0};
874
struct mmc_command stop = {0};
875
struct mmc_data data = {0};
877
struct scatterlist sg;
883
sg_init_one(&sg, test->buffer, blocks * blksz);
885
mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
886
mmc_test_prepare_broken_mrq(test, &mrq, write);
888
mmc_wait_for_req(test->card->host, &mrq);
890
mmc_test_wait_busy(test);
892
return mmc_test_check_broken_result(test, &mrq);
896
* Does a complete transfer test where data is also validated
898
* Note: mmc_test_prepare() must have been done before this call
900
static int mmc_test_transfer(struct mmc_test_card *test,
901
struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
902
unsigned blocks, unsigned blksz, int write)
908
for (i = 0;i < blocks * blksz;i++)
909
test->scratch[i] = i;
911
memset(test->scratch, 0, BUFFER_SIZE);
913
local_irq_save(flags);
914
sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
915
local_irq_restore(flags);
917
ret = mmc_test_set_blksize(test, blksz);
921
ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
922
blocks, blksz, write);
929
ret = mmc_test_set_blksize(test, 512);
933
sectors = (blocks * blksz + 511) / 512;
934
if ((sectors * 512) == (blocks * blksz))
937
if ((sectors * 512) > BUFFER_SIZE)
940
memset(test->buffer, 0, sectors * 512);
942
for (i = 0;i < sectors;i++) {
943
ret = mmc_test_buffer_transfer(test,
944
test->buffer + i * 512,
945
dev_addr + i, 512, 0);
950
for (i = 0;i < blocks * blksz;i++) {
951
if (test->buffer[i] != (u8)i)
955
for (;i < sectors * 512;i++) {
956
if (test->buffer[i] != 0xDF)
960
local_irq_save(flags);
961
sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
962
local_irq_restore(flags);
963
for (i = 0;i < blocks * blksz;i++) {
964
if (test->scratch[i] != (u8)i)
972
/*******************************************************************/
974
/*******************************************************************/
976
struct mmc_test_case {
979
int (*prepare)(struct mmc_test_card *);
980
int (*run)(struct mmc_test_card *);
981
int (*cleanup)(struct mmc_test_card *);
984
static int mmc_test_basic_write(struct mmc_test_card *test)
987
struct scatterlist sg;
989
ret = mmc_test_set_blksize(test, 512);
993
sg_init_one(&sg, test->buffer, 512);
995
ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
1002
static int mmc_test_basic_read(struct mmc_test_card *test)
1005
struct scatterlist sg;
1007
ret = mmc_test_set_blksize(test, 512);
1011
sg_init_one(&sg, test->buffer, 512);
1013
ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1020
static int mmc_test_verify_write(struct mmc_test_card *test)
1023
struct scatterlist sg;
1025
sg_init_one(&sg, test->buffer, 512);
1027
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1034
static int mmc_test_verify_read(struct mmc_test_card *test)
1037
struct scatterlist sg;
1039
sg_init_one(&sg, test->buffer, 512);
1041
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1048
static int mmc_test_multi_write(struct mmc_test_card *test)
1052
struct scatterlist sg;
1054
if (test->card->host->max_blk_count == 1)
1055
return RESULT_UNSUP_HOST;
1057
size = PAGE_SIZE * 2;
1058
size = min(size, test->card->host->max_req_size);
1059
size = min(size, test->card->host->max_seg_size);
1060
size = min(size, test->card->host->max_blk_count * 512);
1063
return RESULT_UNSUP_HOST;
1065
sg_init_one(&sg, test->buffer, size);
1067
ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1074
static int mmc_test_multi_read(struct mmc_test_card *test)
1078
struct scatterlist sg;
1080
if (test->card->host->max_blk_count == 1)
1081
return RESULT_UNSUP_HOST;
1083
size = PAGE_SIZE * 2;
1084
size = min(size, test->card->host->max_req_size);
1085
size = min(size, test->card->host->max_seg_size);
1086
size = min(size, test->card->host->max_blk_count * 512);
1089
return RESULT_UNSUP_HOST;
1091
sg_init_one(&sg, test->buffer, size);
1093
ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1100
static int mmc_test_pow2_write(struct mmc_test_card *test)
1103
struct scatterlist sg;
1105
if (!test->card->csd.write_partial)
1106
return RESULT_UNSUP_CARD;
1108
for (i = 1; i < 512;i <<= 1) {
1109
sg_init_one(&sg, test->buffer, i);
1110
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1118
static int mmc_test_pow2_read(struct mmc_test_card *test)
1121
struct scatterlist sg;
1123
if (!test->card->csd.read_partial)
1124
return RESULT_UNSUP_CARD;
1126
for (i = 1; i < 512;i <<= 1) {
1127
sg_init_one(&sg, test->buffer, i);
1128
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1136
static int mmc_test_weird_write(struct mmc_test_card *test)
1139
struct scatterlist sg;
1141
if (!test->card->csd.write_partial)
1142
return RESULT_UNSUP_CARD;
1144
for (i = 3; i < 512;i += 7) {
1145
sg_init_one(&sg, test->buffer, i);
1146
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1154
static int mmc_test_weird_read(struct mmc_test_card *test)
1157
struct scatterlist sg;
1159
if (!test->card->csd.read_partial)
1160
return RESULT_UNSUP_CARD;
1162
for (i = 3; i < 512;i += 7) {
1163
sg_init_one(&sg, test->buffer, i);
1164
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1172
static int mmc_test_align_write(struct mmc_test_card *test)
1175
struct scatterlist sg;
1177
for (i = 1;i < 4;i++) {
1178
sg_init_one(&sg, test->buffer + i, 512);
1179
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1187
static int mmc_test_align_read(struct mmc_test_card *test)
1190
struct scatterlist sg;
1192
for (i = 1;i < 4;i++) {
1193
sg_init_one(&sg, test->buffer + i, 512);
1194
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1202
static int mmc_test_align_multi_write(struct mmc_test_card *test)
1206
struct scatterlist sg;
1208
if (test->card->host->max_blk_count == 1)
1209
return RESULT_UNSUP_HOST;
1211
size = PAGE_SIZE * 2;
1212
size = min(size, test->card->host->max_req_size);
1213
size = min(size, test->card->host->max_seg_size);
1214
size = min(size, test->card->host->max_blk_count * 512);
1217
return RESULT_UNSUP_HOST;
1219
for (i = 1;i < 4;i++) {
1220
sg_init_one(&sg, test->buffer + i, size);
1221
ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1229
static int mmc_test_align_multi_read(struct mmc_test_card *test)
1233
struct scatterlist sg;
1235
if (test->card->host->max_blk_count == 1)
1236
return RESULT_UNSUP_HOST;
1238
size = PAGE_SIZE * 2;
1239
size = min(size, test->card->host->max_req_size);
1240
size = min(size, test->card->host->max_seg_size);
1241
size = min(size, test->card->host->max_blk_count * 512);
1244
return RESULT_UNSUP_HOST;
1246
for (i = 1;i < 4;i++) {
1247
sg_init_one(&sg, test->buffer + i, size);
1248
ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1256
static int mmc_test_xfersize_write(struct mmc_test_card *test)
1260
ret = mmc_test_set_blksize(test, 512);
1264
ret = mmc_test_broken_transfer(test, 1, 512, 1);
1271
static int mmc_test_xfersize_read(struct mmc_test_card *test)
1275
ret = mmc_test_set_blksize(test, 512);
1279
ret = mmc_test_broken_transfer(test, 1, 512, 0);
1286
static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1290
if (test->card->host->max_blk_count == 1)
1291
return RESULT_UNSUP_HOST;
1293
ret = mmc_test_set_blksize(test, 512);
1297
ret = mmc_test_broken_transfer(test, 2, 512, 1);
1304
static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1308
if (test->card->host->max_blk_count == 1)
1309
return RESULT_UNSUP_HOST;
1311
ret = mmc_test_set_blksize(test, 512);
1315
ret = mmc_test_broken_transfer(test, 2, 512, 0);
1322
#ifdef CONFIG_HIGHMEM
1324
static int mmc_test_write_high(struct mmc_test_card *test)
1327
struct scatterlist sg;
1329
sg_init_table(&sg, 1);
1330
sg_set_page(&sg, test->highmem, 512, 0);
1332
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1339
static int mmc_test_read_high(struct mmc_test_card *test)
1342
struct scatterlist sg;
1344
sg_init_table(&sg, 1);
1345
sg_set_page(&sg, test->highmem, 512, 0);
1347
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1354
static int mmc_test_multi_write_high(struct mmc_test_card *test)
1358
struct scatterlist sg;
1360
if (test->card->host->max_blk_count == 1)
1361
return RESULT_UNSUP_HOST;
1363
size = PAGE_SIZE * 2;
1364
size = min(size, test->card->host->max_req_size);
1365
size = min(size, test->card->host->max_seg_size);
1366
size = min(size, test->card->host->max_blk_count * 512);
1369
return RESULT_UNSUP_HOST;
1371
sg_init_table(&sg, 1);
1372
sg_set_page(&sg, test->highmem, size, 0);
1374
ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1381
static int mmc_test_multi_read_high(struct mmc_test_card *test)
1385
struct scatterlist sg;
1387
if (test->card->host->max_blk_count == 1)
1388
return RESULT_UNSUP_HOST;
1390
size = PAGE_SIZE * 2;
1391
size = min(size, test->card->host->max_req_size);
1392
size = min(size, test->card->host->max_seg_size);
1393
size = min(size, test->card->host->max_blk_count * 512);
1396
return RESULT_UNSUP_HOST;
1398
sg_init_table(&sg, 1);
1399
sg_set_page(&sg, test->highmem, size, 0);
1401
ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1410
static int mmc_test_no_highmem(struct mmc_test_card *test)
1412
pr_info("%s: Highmem not configured - test skipped\n",
1413
mmc_hostname(test->card->host));
1417
#endif /* CONFIG_HIGHMEM */
1420
* Map sz bytes so that it can be transferred.
1422
static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1423
int max_scatter, int min_sg_len)
1425
struct mmc_test_area *t = &test->area;
1428
t->blocks = sz >> 9;
1431
err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1432
t->max_segs, t->max_seg_sz,
1435
err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1436
t->max_seg_sz, &t->sg_len, min_sg_len);
1439
pr_info("%s: Failed to map sg list\n",
1440
mmc_hostname(test->card->host));
1445
* Transfer bytes mapped by mmc_test_area_map().
1447
static int mmc_test_area_transfer(struct mmc_test_card *test,
1448
unsigned int dev_addr, int write)
1450
struct mmc_test_area *t = &test->area;
1452
return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1453
t->blocks, 512, write);
1457
* Map and transfer bytes for multiple transfers.
1459
static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1460
unsigned int dev_addr, int write,
1461
int max_scatter, int timed, int count,
1462
bool nonblock, int min_sg_len)
1464
struct timespec ts1, ts2;
1467
struct mmc_test_area *t = &test->area;
1470
* In the case of a maximally scattered transfer, the maximum transfer
1471
* size is further limited by using PAGE_SIZE segments.
1474
struct mmc_test_area *t = &test->area;
1475
unsigned long max_tfr;
1477
if (t->max_seg_sz >= PAGE_SIZE)
1478
max_tfr = t->max_segs * PAGE_SIZE;
1480
max_tfr = t->max_segs * t->max_seg_sz;
1485
ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
1490
getnstimeofday(&ts1);
1492
ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1493
dev_addr, t->blocks, 512, write, count);
1495
for (i = 0; i < count && ret == 0; i++) {
1496
ret = mmc_test_area_transfer(test, dev_addr, write);
1497
dev_addr += sz >> 9;
1504
getnstimeofday(&ts2);
1507
mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1512
static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1513
unsigned int dev_addr, int write, int max_scatter,
1516
return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1517
timed, 1, false, 0);
1521
* Write the test area entirely.
1523
static int mmc_test_area_fill(struct mmc_test_card *test)
1525
struct mmc_test_area *t = &test->area;
1527
return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1531
* Erase the test area entirely.
1533
static int mmc_test_area_erase(struct mmc_test_card *test)
1535
struct mmc_test_area *t = &test->area;
1537
if (!mmc_can_erase(test->card))
1540
return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1545
* Cleanup struct mmc_test_area.
1547
static int mmc_test_area_cleanup(struct mmc_test_card *test)
1549
struct mmc_test_area *t = &test->area;
1552
mmc_test_free_mem(t->mem);
1558
* Initialize an area for testing large transfers. The test area is set to the
1559
* middle of the card because cards may have different charateristics at the
1560
* front (for FAT file system optimization). Optionally, the area is erased
1561
* (if the card supports it) which may improve write performance. Optionally,
1562
* the area is filled with data for subsequent read tests.
1564
static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1566
struct mmc_test_area *t = &test->area;
1567
unsigned long min_sz = 64 * 1024, sz;
1570
ret = mmc_test_set_blksize(test, 512);
1574
/* Make the test area size about 4MiB */
1575
sz = (unsigned long)test->card->pref_erase << 9;
1577
while (t->max_sz < 4 * 1024 * 1024)
1579
while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1582
t->max_segs = test->card->host->max_segs;
1583
t->max_seg_sz = test->card->host->max_seg_size;
1585
t->max_tfr = t->max_sz;
1586
if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1587
t->max_tfr = test->card->host->max_blk_count << 9;
1588
if (t->max_tfr > test->card->host->max_req_size)
1589
t->max_tfr = test->card->host->max_req_size;
1590
if (t->max_tfr / t->max_seg_sz > t->max_segs)
1591
t->max_tfr = t->max_segs * t->max_seg_sz;
1594
* Try to allocate enough memory for a max. sized transfer. Less is OK
1595
* because the same memory can be mapped into the scatterlist more than
1596
* once. Also, take into account the limits imposed on scatterlist
1597
* segments by the host driver.
1599
t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1604
t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1610
t->dev_addr = mmc_test_capacity(test->card) / 2;
1611
t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1614
ret = mmc_test_area_erase(test);
1620
ret = mmc_test_area_fill(test);
1628
mmc_test_area_cleanup(test);
1633
* Prepare for large transfers. Do not erase the test area.
1635
static int mmc_test_area_prepare(struct mmc_test_card *test)
1637
return mmc_test_area_init(test, 0, 0);
1641
* Prepare for large transfers. Do erase the test area.
1643
static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1645
return mmc_test_area_init(test, 1, 0);
1649
* Prepare for large transfers. Erase and fill the test area.
1651
static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1653
return mmc_test_area_init(test, 1, 1);
1657
* Test best-case performance. Best-case performance is expected from
1658
* a single large transfer.
1660
* An additional option (max_scatter) allows the measurement of the same
1661
* transfer but with no contiguous pages in the scatter list. This tests
1662
* the efficiency of DMA to handle scattered pages.
1664
static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1667
struct mmc_test_area *t = &test->area;
1669
return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1674
* Best-case read performance.
1676
static int mmc_test_best_read_performance(struct mmc_test_card *test)
1678
return mmc_test_best_performance(test, 0, 0);
1682
* Best-case write performance.
1684
static int mmc_test_best_write_performance(struct mmc_test_card *test)
1686
return mmc_test_best_performance(test, 1, 0);
1690
* Best-case read performance into scattered pages.
1692
static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1694
return mmc_test_best_performance(test, 0, 1);
1698
* Best-case write performance from scattered pages.
1700
static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1702
return mmc_test_best_performance(test, 1, 1);
1706
* Single read performance by transfer size.
1708
static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1710
struct mmc_test_area *t = &test->area;
1712
unsigned int dev_addr;
1715
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1716
dev_addr = t->dev_addr + (sz >> 9);
1717
ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1722
dev_addr = t->dev_addr;
1723
return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1727
* Single write performance by transfer size.
1729
static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1731
struct mmc_test_area *t = &test->area;
1733
unsigned int dev_addr;
1736
ret = mmc_test_area_erase(test);
1739
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1740
dev_addr = t->dev_addr + (sz >> 9);
1741
ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1745
ret = mmc_test_area_erase(test);
1749
dev_addr = t->dev_addr;
1750
return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1754
* Single trim performance by transfer size.
1756
static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1758
struct mmc_test_area *t = &test->area;
1760
unsigned int dev_addr;
1761
struct timespec ts1, ts2;
1764
if (!mmc_can_trim(test->card))
1765
return RESULT_UNSUP_CARD;
1767
if (!mmc_can_erase(test->card))
1768
return RESULT_UNSUP_HOST;
1770
for (sz = 512; sz < t->max_sz; sz <<= 1) {
1771
dev_addr = t->dev_addr + (sz >> 9);
1772
getnstimeofday(&ts1);
1773
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1776
getnstimeofday(&ts2);
1777
mmc_test_print_rate(test, sz, &ts1, &ts2);
1779
dev_addr = t->dev_addr;
1780
getnstimeofday(&ts1);
1781
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1784
getnstimeofday(&ts2);
1785
mmc_test_print_rate(test, sz, &ts1, &ts2);
1789
static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1791
struct mmc_test_area *t = &test->area;
1792
unsigned int dev_addr, i, cnt;
1793
struct timespec ts1, ts2;
1796
cnt = t->max_sz / sz;
1797
dev_addr = t->dev_addr;
1798
getnstimeofday(&ts1);
1799
for (i = 0; i < cnt; i++) {
1800
ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1803
dev_addr += (sz >> 9);
1805
getnstimeofday(&ts2);
1806
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1811
* Consecutive read performance by transfer size.
1813
static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1815
struct mmc_test_area *t = &test->area;
1819
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1820
ret = mmc_test_seq_read_perf(test, sz);
1825
return mmc_test_seq_read_perf(test, sz);
1828
static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1830
struct mmc_test_area *t = &test->area;
1831
unsigned int dev_addr, i, cnt;
1832
struct timespec ts1, ts2;
1835
ret = mmc_test_area_erase(test);
1838
cnt = t->max_sz / sz;
1839
dev_addr = t->dev_addr;
1840
getnstimeofday(&ts1);
1841
for (i = 0; i < cnt; i++) {
1842
ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1845
dev_addr += (sz >> 9);
1847
getnstimeofday(&ts2);
1848
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1853
* Consecutive write performance by transfer size.
1855
static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1857
struct mmc_test_area *t = &test->area;
1861
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1862
ret = mmc_test_seq_write_perf(test, sz);
1867
return mmc_test_seq_write_perf(test, sz);
1871
* Consecutive trim performance by transfer size.
1873
static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1875
struct mmc_test_area *t = &test->area;
1877
unsigned int dev_addr, i, cnt;
1878
struct timespec ts1, ts2;
1881
if (!mmc_can_trim(test->card))
1882
return RESULT_UNSUP_CARD;
1884
if (!mmc_can_erase(test->card))
1885
return RESULT_UNSUP_HOST;
1887
for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1888
ret = mmc_test_area_erase(test);
1891
ret = mmc_test_area_fill(test);
1894
cnt = t->max_sz / sz;
1895
dev_addr = t->dev_addr;
1896
getnstimeofday(&ts1);
1897
for (i = 0; i < cnt; i++) {
1898
ret = mmc_erase(test->card, dev_addr, sz >> 9,
1902
dev_addr += (sz >> 9);
1904
getnstimeofday(&ts2);
1905
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1910
static unsigned int rnd_next = 1;
1912
static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1916
rnd_next = rnd_next * 1103515245 + 12345;
1917
r = (rnd_next >> 16) & 0x7fff;
1918
return (r * rnd_cnt) >> 15;
1921
static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1924
unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1926
struct timespec ts1, ts2, ts;
1931
rnd_addr = mmc_test_capacity(test->card) / 4;
1932
range1 = rnd_addr / test->card->pref_erase;
1933
range2 = range1 / ssz;
1935
getnstimeofday(&ts1);
1936
for (cnt = 0; cnt < UINT_MAX; cnt++) {
1937
getnstimeofday(&ts2);
1938
ts = timespec_sub(ts2, ts1);
1939
if (ts.tv_sec >= 10)
1941
ea = mmc_test_rnd_num(range1);
1945
dev_addr = rnd_addr + test->card->pref_erase * ea +
1946
ssz * mmc_test_rnd_num(range2);
1947
ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1952
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1956
static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1958
struct mmc_test_area *t = &test->area;
1963
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1965
* When writing, try to get more consistent results by running
1966
* the test twice with exactly the same I/O but outputting the
1967
* results only for the 2nd run.
1971
ret = mmc_test_rnd_perf(test, write, 0, sz);
1976
ret = mmc_test_rnd_perf(test, write, 1, sz);
1983
ret = mmc_test_rnd_perf(test, write, 0, sz);
1988
return mmc_test_rnd_perf(test, write, 1, sz);
1992
* Random read performance by transfer size.
1994
static int mmc_test_random_read_perf(struct mmc_test_card *test)
1996
return mmc_test_random_perf(test, 0);
2000
* Random write performance by transfer size.
2002
static int mmc_test_random_write_perf(struct mmc_test_card *test)
2004
return mmc_test_random_perf(test, 1);
2007
static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
2008
unsigned int tot_sz, int max_scatter)
2010
struct mmc_test_area *t = &test->area;
2011
unsigned int dev_addr, i, cnt, sz, ssz;
2012
struct timespec ts1, ts2;
2018
* In the case of a maximally scattered transfer, the maximum transfer
2019
* size is further limited by using PAGE_SIZE segments.
2022
unsigned long max_tfr;
2024
if (t->max_seg_sz >= PAGE_SIZE)
2025
max_tfr = t->max_segs * PAGE_SIZE;
2027
max_tfr = t->max_segs * t->max_seg_sz;
2033
dev_addr = mmc_test_capacity(test->card) / 4;
2034
if (tot_sz > dev_addr << 9)
2035
tot_sz = dev_addr << 9;
2037
dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2039
getnstimeofday(&ts1);
2040
for (i = 0; i < cnt; i++) {
2041
ret = mmc_test_area_io(test, sz, dev_addr, write,
2047
getnstimeofday(&ts2);
2049
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2054
static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2058
for (i = 0; i < 10; i++) {
2059
ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2063
for (i = 0; i < 5; i++) {
2064
ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2068
for (i = 0; i < 3; i++) {
2069
ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2078
* Large sequential read performance.
2080
static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2082
return mmc_test_large_seq_perf(test, 0);
2086
* Large sequential write performance.
2088
static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2090
return mmc_test_large_seq_perf(test, 1);
2093
static int mmc_test_rw_multiple(struct mmc_test_card *test,
2094
struct mmc_test_multiple_rw *tdata,
2095
unsigned int reqsize, unsigned int size,
2098
unsigned int dev_addr;
2099
struct mmc_test_area *t = &test->area;
2102
/* Set up test area */
2103
if (size > mmc_test_capacity(test->card) / 2 * 512)
2104
size = mmc_test_capacity(test->card) / 2 * 512;
2105
if (reqsize > t->max_tfr)
2106
reqsize = t->max_tfr;
2107
dev_addr = mmc_test_capacity(test->card) / 4;
2108
if ((dev_addr & 0xffff0000))
2109
dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2111
dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2118
/* prepare test area */
2119
if (mmc_can_erase(test->card) &&
2120
tdata->prepare & MMC_TEST_PREP_ERASE) {
2121
ret = mmc_erase(test->card, dev_addr,
2122
size / 512, MMC_SECURE_ERASE_ARG);
2124
ret = mmc_erase(test->card, dev_addr,
2125
size / 512, MMC_ERASE_ARG);
2131
ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2132
tdata->do_write, 0, 1, size / reqsize,
2133
tdata->do_nonblock_req, min_sg_len);
2139
pr_info("[%s] error\n", __func__);
2143
static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2144
struct mmc_test_multiple_rw *rw)
2148
void *pre_req = test->card->host->ops->pre_req;
2149
void *post_req = test->card->host->ops->post_req;
2151
if (rw->do_nonblock_req &&
2152
((!pre_req && post_req) || (pre_req && !post_req))) {
2153
pr_info("error: only one of pre/post is defined\n");
2157
for (i = 0 ; i < rw->len && ret == 0; i++) {
2158
ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2165
static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2166
struct mmc_test_multiple_rw *rw)
2171
for (i = 0 ; i < rw->len && ret == 0; i++) {
2172
ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
2181
* Multiple blocking write 4k to 4 MB chunks
2183
static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2185
unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2186
1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2187
struct mmc_test_multiple_rw test_data = {
2189
.size = TEST_AREA_MAX_SIZE,
2190
.len = ARRAY_SIZE(bs),
2192
.do_nonblock_req = false,
2193
.prepare = MMC_TEST_PREP_ERASE,
2196
return mmc_test_rw_multiple_size(test, &test_data);
2200
* Multiple non-blocking write 4k to 4 MB chunks
2202
static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2204
unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2205
1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2206
struct mmc_test_multiple_rw test_data = {
2208
.size = TEST_AREA_MAX_SIZE,
2209
.len = ARRAY_SIZE(bs),
2211
.do_nonblock_req = true,
2212
.prepare = MMC_TEST_PREP_ERASE,
2215
return mmc_test_rw_multiple_size(test, &test_data);
2219
* Multiple blocking read 4k to 4 MB chunks
2221
static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2223
unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2224
1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2225
struct mmc_test_multiple_rw test_data = {
2227
.size = TEST_AREA_MAX_SIZE,
2228
.len = ARRAY_SIZE(bs),
2230
.do_nonblock_req = false,
2231
.prepare = MMC_TEST_PREP_NONE,
2234
return mmc_test_rw_multiple_size(test, &test_data);
2238
* Multiple non-blocking read 4k to 4 MB chunks
2240
static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2242
unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2243
1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2244
struct mmc_test_multiple_rw test_data = {
2246
.size = TEST_AREA_MAX_SIZE,
2247
.len = ARRAY_SIZE(bs),
2249
.do_nonblock_req = true,
2250
.prepare = MMC_TEST_PREP_NONE,
2253
return mmc_test_rw_multiple_size(test, &test_data);
2257
* Multiple blocking write 1 to 512 sg elements
2259
static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2261
unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2262
1 << 7, 1 << 8, 1 << 9};
2263
struct mmc_test_multiple_rw test_data = {
2265
.size = TEST_AREA_MAX_SIZE,
2266
.len = ARRAY_SIZE(sg_len),
2268
.do_nonblock_req = false,
2269
.prepare = MMC_TEST_PREP_ERASE,
2272
return mmc_test_rw_multiple_sg_len(test, &test_data);
2276
* Multiple non-blocking write 1 to 512 sg elements
2278
static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2280
unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2281
1 << 7, 1 << 8, 1 << 9};
2282
struct mmc_test_multiple_rw test_data = {
2284
.size = TEST_AREA_MAX_SIZE,
2285
.len = ARRAY_SIZE(sg_len),
2287
.do_nonblock_req = true,
2288
.prepare = MMC_TEST_PREP_ERASE,
2291
return mmc_test_rw_multiple_sg_len(test, &test_data);
2295
* Multiple blocking read 1 to 512 sg elements
2297
static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2299
unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2300
1 << 7, 1 << 8, 1 << 9};
2301
struct mmc_test_multiple_rw test_data = {
2303
.size = TEST_AREA_MAX_SIZE,
2304
.len = ARRAY_SIZE(sg_len),
2306
.do_nonblock_req = false,
2307
.prepare = MMC_TEST_PREP_NONE,
2310
return mmc_test_rw_multiple_sg_len(test, &test_data);
2314
* Multiple non-blocking read 1 to 512 sg elements
2316
static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2318
unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2319
1 << 7, 1 << 8, 1 << 9};
2320
struct mmc_test_multiple_rw test_data = {
2322
.size = TEST_AREA_MAX_SIZE,
2323
.len = ARRAY_SIZE(sg_len),
2325
.do_nonblock_req = true,
2326
.prepare = MMC_TEST_PREP_NONE,
2329
return mmc_test_rw_multiple_sg_len(test, &test_data);
2333
* eMMC hardware reset.
2335
static int mmc_test_hw_reset(struct mmc_test_card *test)
2337
struct mmc_card *card = test->card;
2338
struct mmc_host *host = card->host;
2341
err = mmc_hw_reset_check(host);
2348
if (err != -EOPNOTSUPP)
2351
if (!mmc_can_reset(card))
2352
return RESULT_UNSUP_CARD;
2354
return RESULT_UNSUP_HOST;
2357
static const struct mmc_test_case mmc_test_cases[] = {
2359
.name = "Basic write (no data verification)",
2360
.run = mmc_test_basic_write,
2364
.name = "Basic read (no data verification)",
2365
.run = mmc_test_basic_read,
2369
.name = "Basic write (with data verification)",
2370
.prepare = mmc_test_prepare_write,
2371
.run = mmc_test_verify_write,
2372
.cleanup = mmc_test_cleanup,
2376
.name = "Basic read (with data verification)",
2377
.prepare = mmc_test_prepare_read,
2378
.run = mmc_test_verify_read,
2379
.cleanup = mmc_test_cleanup,
2383
.name = "Multi-block write",
2384
.prepare = mmc_test_prepare_write,
2385
.run = mmc_test_multi_write,
2386
.cleanup = mmc_test_cleanup,
2390
.name = "Multi-block read",
2391
.prepare = mmc_test_prepare_read,
2392
.run = mmc_test_multi_read,
2393
.cleanup = mmc_test_cleanup,
2397
.name = "Power of two block writes",
2398
.prepare = mmc_test_prepare_write,
2399
.run = mmc_test_pow2_write,
2400
.cleanup = mmc_test_cleanup,
2404
.name = "Power of two block reads",
2405
.prepare = mmc_test_prepare_read,
2406
.run = mmc_test_pow2_read,
2407
.cleanup = mmc_test_cleanup,
2411
.name = "Weird sized block writes",
2412
.prepare = mmc_test_prepare_write,
2413
.run = mmc_test_weird_write,
2414
.cleanup = mmc_test_cleanup,
2418
.name = "Weird sized block reads",
2419
.prepare = mmc_test_prepare_read,
2420
.run = mmc_test_weird_read,
2421
.cleanup = mmc_test_cleanup,
2425
.name = "Badly aligned write",
2426
.prepare = mmc_test_prepare_write,
2427
.run = mmc_test_align_write,
2428
.cleanup = mmc_test_cleanup,
2432
.name = "Badly aligned read",
2433
.prepare = mmc_test_prepare_read,
2434
.run = mmc_test_align_read,
2435
.cleanup = mmc_test_cleanup,
2439
.name = "Badly aligned multi-block write",
2440
.prepare = mmc_test_prepare_write,
2441
.run = mmc_test_align_multi_write,
2442
.cleanup = mmc_test_cleanup,
2446
.name = "Badly aligned multi-block read",
2447
.prepare = mmc_test_prepare_read,
2448
.run = mmc_test_align_multi_read,
2449
.cleanup = mmc_test_cleanup,
2453
.name = "Correct xfer_size at write (start failure)",
2454
.run = mmc_test_xfersize_write,
2458
.name = "Correct xfer_size at read (start failure)",
2459
.run = mmc_test_xfersize_read,
2463
.name = "Correct xfer_size at write (midway failure)",
2464
.run = mmc_test_multi_xfersize_write,
2468
.name = "Correct xfer_size at read (midway failure)",
2469
.run = mmc_test_multi_xfersize_read,
2472
#ifdef CONFIG_HIGHMEM
2475
.name = "Highmem write",
2476
.prepare = mmc_test_prepare_write,
2477
.run = mmc_test_write_high,
2478
.cleanup = mmc_test_cleanup,
2482
.name = "Highmem read",
2483
.prepare = mmc_test_prepare_read,
2484
.run = mmc_test_read_high,
2485
.cleanup = mmc_test_cleanup,
2489
.name = "Multi-block highmem write",
2490
.prepare = mmc_test_prepare_write,
2491
.run = mmc_test_multi_write_high,
2492
.cleanup = mmc_test_cleanup,
2496
.name = "Multi-block highmem read",
2497
.prepare = mmc_test_prepare_read,
2498
.run = mmc_test_multi_read_high,
2499
.cleanup = mmc_test_cleanup,
2505
.name = "Highmem write",
2506
.run = mmc_test_no_highmem,
2510
.name = "Highmem read",
2511
.run = mmc_test_no_highmem,
2515
.name = "Multi-block highmem write",
2516
.run = mmc_test_no_highmem,
2520
.name = "Multi-block highmem read",
2521
.run = mmc_test_no_highmem,
2524
#endif /* CONFIG_HIGHMEM */
2527
.name = "Best-case read performance",
2528
.prepare = mmc_test_area_prepare_fill,
2529
.run = mmc_test_best_read_performance,
2530
.cleanup = mmc_test_area_cleanup,
2534
.name = "Best-case write performance",
2535
.prepare = mmc_test_area_prepare_erase,
2536
.run = mmc_test_best_write_performance,
2537
.cleanup = mmc_test_area_cleanup,
2541
.name = "Best-case read performance into scattered pages",
2542
.prepare = mmc_test_area_prepare_fill,
2543
.run = mmc_test_best_read_perf_max_scatter,
2544
.cleanup = mmc_test_area_cleanup,
2548
.name = "Best-case write performance from scattered pages",
2549
.prepare = mmc_test_area_prepare_erase,
2550
.run = mmc_test_best_write_perf_max_scatter,
2551
.cleanup = mmc_test_area_cleanup,
2555
.name = "Single read performance by transfer size",
2556
.prepare = mmc_test_area_prepare_fill,
2557
.run = mmc_test_profile_read_perf,
2558
.cleanup = mmc_test_area_cleanup,
2562
.name = "Single write performance by transfer size",
2563
.prepare = mmc_test_area_prepare,
2564
.run = mmc_test_profile_write_perf,
2565
.cleanup = mmc_test_area_cleanup,
2569
.name = "Single trim performance by transfer size",
2570
.prepare = mmc_test_area_prepare_fill,
2571
.run = mmc_test_profile_trim_perf,
2572
.cleanup = mmc_test_area_cleanup,
2576
.name = "Consecutive read performance by transfer size",
2577
.prepare = mmc_test_area_prepare_fill,
2578
.run = mmc_test_profile_seq_read_perf,
2579
.cleanup = mmc_test_area_cleanup,
2583
.name = "Consecutive write performance by transfer size",
2584
.prepare = mmc_test_area_prepare,
2585
.run = mmc_test_profile_seq_write_perf,
2586
.cleanup = mmc_test_area_cleanup,
2590
.name = "Consecutive trim performance by transfer size",
2591
.prepare = mmc_test_area_prepare,
2592
.run = mmc_test_profile_seq_trim_perf,
2593
.cleanup = mmc_test_area_cleanup,
2597
.name = "Random read performance by transfer size",
2598
.prepare = mmc_test_area_prepare,
2599
.run = mmc_test_random_read_perf,
2600
.cleanup = mmc_test_area_cleanup,
2604
.name = "Random write performance by transfer size",
2605
.prepare = mmc_test_area_prepare,
2606
.run = mmc_test_random_write_perf,
2607
.cleanup = mmc_test_area_cleanup,
2611
.name = "Large sequential read into scattered pages",
2612
.prepare = mmc_test_area_prepare,
2613
.run = mmc_test_large_seq_read_perf,
2614
.cleanup = mmc_test_area_cleanup,
2618
.name = "Large sequential write from scattered pages",
2619
.prepare = mmc_test_area_prepare,
2620
.run = mmc_test_large_seq_write_perf,
2621
.cleanup = mmc_test_area_cleanup,
2625
.name = "Write performance with blocking req 4k to 4MB",
2626
.prepare = mmc_test_area_prepare,
2627
.run = mmc_test_profile_mult_write_blocking_perf,
2628
.cleanup = mmc_test_area_cleanup,
2632
.name = "Write performance with non-blocking req 4k to 4MB",
2633
.prepare = mmc_test_area_prepare,
2634
.run = mmc_test_profile_mult_write_nonblock_perf,
2635
.cleanup = mmc_test_area_cleanup,
2639
.name = "Read performance with blocking req 4k to 4MB",
2640
.prepare = mmc_test_area_prepare,
2641
.run = mmc_test_profile_mult_read_blocking_perf,
2642
.cleanup = mmc_test_area_cleanup,
2646
.name = "Read performance with non-blocking req 4k to 4MB",
2647
.prepare = mmc_test_area_prepare,
2648
.run = mmc_test_profile_mult_read_nonblock_perf,
2649
.cleanup = mmc_test_area_cleanup,
2653
.name = "Write performance blocking req 1 to 512 sg elems",
2654
.prepare = mmc_test_area_prepare,
2655
.run = mmc_test_profile_sglen_wr_blocking_perf,
2656
.cleanup = mmc_test_area_cleanup,
2660
.name = "Write performance non-blocking req 1 to 512 sg elems",
2661
.prepare = mmc_test_area_prepare,
2662
.run = mmc_test_profile_sglen_wr_nonblock_perf,
2663
.cleanup = mmc_test_area_cleanup,
2667
.name = "Read performance blocking req 1 to 512 sg elems",
2668
.prepare = mmc_test_area_prepare,
2669
.run = mmc_test_profile_sglen_r_blocking_perf,
2670
.cleanup = mmc_test_area_cleanup,
2674
.name = "Read performance non-blocking req 1 to 512 sg elems",
2675
.prepare = mmc_test_area_prepare,
2676
.run = mmc_test_profile_sglen_r_nonblock_perf,
2677
.cleanup = mmc_test_area_cleanup,
2681
.name = "eMMC hardware reset",
2682
.run = mmc_test_hw_reset,
2686
static DEFINE_MUTEX(mmc_test_lock);
2688
static LIST_HEAD(mmc_test_result);
2690
static void mmc_test_run(struct mmc_test_card *test, int testcase)
2694
pr_info("%s: Starting tests of card %s...\n",
2695
mmc_hostname(test->card->host), mmc_card_id(test->card));
2697
mmc_claim_host(test->card->host);
2699
for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2700
struct mmc_test_general_result *gr;
2702
if (testcase && ((i + 1) != testcase))
2705
pr_info("%s: Test case %d. %s...\n",
2706
mmc_hostname(test->card->host), i + 1,
2707
mmc_test_cases[i].name);
2709
if (mmc_test_cases[i].prepare) {
2710
ret = mmc_test_cases[i].prepare(test);
2712
pr_info("%s: Result: Prepare "
2713
"stage failed! (%d)\n",
2714
mmc_hostname(test->card->host),
2720
gr = kzalloc(sizeof(struct mmc_test_general_result),
2723
INIT_LIST_HEAD(&gr->tr_lst);
2725
/* Assign data what we know already */
2726
gr->card = test->card;
2729
/* Append container to global one */
2730
list_add_tail(&gr->link, &mmc_test_result);
2733
* Save the pointer to created container in our private
2739
ret = mmc_test_cases[i].run(test);
2742
pr_info("%s: Result: OK\n",
2743
mmc_hostname(test->card->host));
2746
pr_info("%s: Result: FAILED\n",
2747
mmc_hostname(test->card->host));
2749
case RESULT_UNSUP_HOST:
2750
pr_info("%s: Result: UNSUPPORTED "
2752
mmc_hostname(test->card->host));
2754
case RESULT_UNSUP_CARD:
2755
pr_info("%s: Result: UNSUPPORTED "
2757
mmc_hostname(test->card->host));
2760
pr_info("%s: Result: ERROR (%d)\n",
2761
mmc_hostname(test->card->host), ret);
2764
/* Save the result */
2768
if (mmc_test_cases[i].cleanup) {
2769
ret = mmc_test_cases[i].cleanup(test);
2771
pr_info("%s: Warning: Cleanup "
2772
"stage failed! (%d)\n",
2773
mmc_hostname(test->card->host),
2779
mmc_release_host(test->card->host);
2781
pr_info("%s: Tests completed.\n",
2782
mmc_hostname(test->card->host));
2785
static void mmc_test_free_result(struct mmc_card *card)
2787
struct mmc_test_general_result *gr, *grs;
2789
mutex_lock(&mmc_test_lock);
2791
list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2792
struct mmc_test_transfer_result *tr, *trs;
2794
if (card && gr->card != card)
2797
list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2798
list_del(&tr->link);
2802
list_del(&gr->link);
2806
mutex_unlock(&mmc_test_lock);
2809
static LIST_HEAD(mmc_test_file_test);
2811
static int mtf_test_show(struct seq_file *sf, void *data)
2813
struct mmc_card *card = (struct mmc_card *)sf->private;
2814
struct mmc_test_general_result *gr;
2816
mutex_lock(&mmc_test_lock);
2818
list_for_each_entry(gr, &mmc_test_result, link) {
2819
struct mmc_test_transfer_result *tr;
2821
if (gr->card != card)
2824
seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2826
list_for_each_entry(tr, &gr->tr_lst, link) {
2827
seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
2828
tr->count, tr->sectors,
2829
(unsigned long)tr->ts.tv_sec,
2830
(unsigned long)tr->ts.tv_nsec,
2831
tr->rate, tr->iops / 100, tr->iops % 100);
2835
mutex_unlock(&mmc_test_lock);
2840
static int mtf_test_open(struct inode *inode, struct file *file)
2842
return single_open(file, mtf_test_show, inode->i_private);
2845
static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2846
size_t count, loff_t *pos)
2848
struct seq_file *sf = (struct seq_file *)file->private_data;
2849
struct mmc_card *card = (struct mmc_card *)sf->private;
2850
struct mmc_test_card *test;
2854
if (count >= sizeof(lbuf))
2857
if (copy_from_user(lbuf, buf, count))
2861
if (strict_strtol(lbuf, 10, &testcase))
2864
test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2869
* Remove all test cases associated with given card. Thus we have only
2870
* actual data of the last run.
2872
mmc_test_free_result(card);
2876
test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
2877
#ifdef CONFIG_HIGHMEM
2878
test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2881
#ifdef CONFIG_HIGHMEM
2882
if (test->buffer && test->highmem) {
2886
mutex_lock(&mmc_test_lock);
2887
mmc_test_run(test, testcase);
2888
mutex_unlock(&mmc_test_lock);
2891
#ifdef CONFIG_HIGHMEM
2892
__free_pages(test->highmem, BUFFER_ORDER);
2894
kfree(test->buffer);
2900
static const struct file_operations mmc_test_fops_test = {
2901
.open = mtf_test_open,
2903
.write = mtf_test_write,
2904
.llseek = seq_lseek,
2905
.release = single_release,
2908
static int mtf_testlist_show(struct seq_file *sf, void *data)
2912
mutex_lock(&mmc_test_lock);
2914
for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
2915
seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
2917
mutex_unlock(&mmc_test_lock);
2922
static int mtf_testlist_open(struct inode *inode, struct file *file)
2924
return single_open(file, mtf_testlist_show, inode->i_private);
2927
static const struct file_operations mmc_test_fops_testlist = {
2928
.open = mtf_testlist_open,
2930
.llseek = seq_lseek,
2931
.release = single_release,
2934
static void mmc_test_free_dbgfs_file(struct mmc_card *card)
2936
struct mmc_test_dbgfs_file *df, *dfs;
2938
mutex_lock(&mmc_test_lock);
2940
list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2941
if (card && df->card != card)
2943
debugfs_remove(df->file);
2944
list_del(&df->link);
2948
mutex_unlock(&mmc_test_lock);
2951
static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
2952
const char *name, mode_t mode, const struct file_operations *fops)
2954
struct dentry *file = NULL;
2955
struct mmc_test_dbgfs_file *df;
2957
if (card->debugfs_root)
2958
file = debugfs_create_file(name, mode, card->debugfs_root,
2961
if (IS_ERR_OR_NULL(file)) {
2963
"Can't create %s. Perhaps debugfs is disabled.\n",
2968
df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2970
debugfs_remove(file);
2972
"Can't allocate memory for internal usage.\n");
2979
list_add(&df->link, &mmc_test_file_test);
2983
static int mmc_test_register_dbgfs_file(struct mmc_card *card)
2987
mutex_lock(&mmc_test_lock);
2989
ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
2990
&mmc_test_fops_test);
2994
ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
2995
&mmc_test_fops_testlist);
3000
mutex_unlock(&mmc_test_lock);
3005
static int mmc_test_probe(struct mmc_card *card)
3009
if (!mmc_card_mmc(card) && !mmc_card_sd(card))
3012
ret = mmc_test_register_dbgfs_file(card);
3016
dev_info(&card->dev, "Card claimed for testing.\n");
3021
static void mmc_test_remove(struct mmc_card *card)
3023
mmc_test_free_result(card);
3024
mmc_test_free_dbgfs_file(card);
3027
static struct mmc_driver mmc_driver = {
3031
.probe = mmc_test_probe,
3032
.remove = mmc_test_remove,
3035
static int __init mmc_test_init(void)
3037
return mmc_register_driver(&mmc_driver);
3040
static void __exit mmc_test_exit(void)
3042
/* Clear stalled data if card is still plugged */
3043
mmc_test_free_result(NULL);
3044
mmc_test_free_dbgfs_file(NULL);
3046
mmc_unregister_driver(&mmc_driver);
3049
module_init(mmc_test_init);
3050
module_exit(mmc_test_exit);
3052
MODULE_LICENSE("GPL");
3053
MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3054
MODULE_AUTHOR("Pierre Ossman");