~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise-security

« back to all changes in this revision

Viewing changes to drivers/mmc/host/dw_mmc.c

  • Committer: Package Import Robot
  • Author(s): Paolo Pisati, Paolo Pisati
  • Date: 2011-12-06 15:56:07 UTC
  • Revision ID: package-import@ubuntu.com-20111206155607-pcf44kv5fmhk564f
Tags: 3.2.0-1401.1
[ Paolo Pisati ]

* Rebased on top of Ubuntu-3.2.0-3.8
* Tilt-tracking @ ef2487af4bb15bdd0689631774b5a5e3a59f74e2
* Delete debian.ti-omap4/control, it shoudln't be tracked
* Fix architecture spelling (s/armel/armhf/)
* [Config] Update configs following 3.2 import
* [Config] Fix compilation: disable CODA and ARCH_OMAP3
* [Config] Fix compilation: disable Ethernet Faraday
* Update series to precise

Show diffs side-by-side

added added

removed removed

Lines of Context:
33
33
#include <linux/mmc/dw_mmc.h>
34
34
#include <linux/bitops.h>
35
35
#include <linux/regulator/consumer.h>
 
36
#include <linux/workqueue.h>
36
37
 
37
38
#include "dw_mmc.h"
38
39
 
61
62
 
62
63
        u32             des1;   /* Buffer sizes */
63
64
#define IDMAC_SET_BUFFER1_SIZE(d, s) \
64
 
        ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff))
 
65
        ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
65
66
 
66
67
        u32             des2;   /* buffer 1 physical address */
67
68
 
100
101
        int                     last_detect_state;
101
102
};
102
103
 
 
104
static struct workqueue_struct *dw_mci_card_workqueue;
 
105
 
103
106
#if defined(CONFIG_DEBUG_FS)
104
107
static int dw_mci_req_show(struct seq_file *s, void *v)
105
108
{
284
287
/* DMA interface functions */
285
288
static void dw_mci_stop_dma(struct dw_mci *host)
286
289
{
287
 
        if (host->use_dma) {
 
290
        if (host->using_dma) {
288
291
                host->dma_ops->stop(host);
289
292
                host->dma_ops->cleanup(host);
290
293
        } else {
432
435
        unsigned int i, direction, sg_len;
433
436
        u32 temp;
434
437
 
 
438
        host->using_dma = 0;
 
439
 
435
440
        /* If we don't have a channel, we can't do DMA */
436
441
        if (!host->use_dma)
437
442
                return -ENODEV;
451
456
                        return -EINVAL;
452
457
        }
453
458
 
 
459
        host->using_dma = 1;
 
460
 
454
461
        if (data->flags & MMC_DATA_READ)
455
462
                direction = DMA_FROM_DEVICE;
456
463
        else
489
496
        host->sg = NULL;
490
497
        host->data = data;
491
498
 
 
499
        if (data->flags & MMC_DATA_READ)
 
500
                host->dir_status = DW_MCI_RECV_STATUS;
 
501
        else
 
502
                host->dir_status = DW_MCI_SEND_STATUS;
 
503
 
492
504
        if (dw_mci_submit_data_dma(host, data)) {
493
505
                host->sg = data->sg;
494
506
                host->pio_offset = 0;
495
 
                if (data->flags & MMC_DATA_READ)
496
 
                        host->dir_status = DW_MCI_RECV_STATUS;
497
 
                else
498
 
                        host->dir_status = DW_MCI_SEND_STATUS;
 
507
                host->part_buf_start = 0;
 
508
                host->part_buf_count = 0;
499
509
 
 
510
                mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
500
511
                temp = mci_readl(host, INTMASK);
501
512
                temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
502
513
                mci_writel(host, INTMASK, temp);
574
585
        }
575
586
 
576
587
        /* Set the current slot bus width */
577
 
        mci_writel(host, CTYPE, slot->ctype);
 
588
        mci_writel(host, CTYPE, (slot->ctype << slot->id));
578
589
}
579
590
 
580
591
static void dw_mci_start_request(struct dw_mci *host,
624
635
                host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
625
636
}
626
637
 
 
638
/* must be called with host->lock held */
627
639
static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
628
640
                                 struct mmc_request *mrq)
629
641
{
630
642
        dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
631
643
                 host->state);
632
644
 
633
 
        spin_lock_bh(&host->lock);
634
645
        slot->mrq = mrq;
635
646
 
636
647
        if (host->state == STATE_IDLE) {
639
650
        } else {
640
651
                list_add_tail(&slot->queue_node, &host->queue);
641
652
        }
642
 
 
643
 
        spin_unlock_bh(&host->lock);
644
653
}
645
654
 
646
655
static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
650
659
 
651
660
        WARN_ON(slot->mrq);
652
661
 
 
662
        /*
 
663
         * The check for card presence and queueing of the request must be
 
664
         * atomic, otherwise the card could be removed in between and the
 
665
         * request wouldn't fail until another card was inserted.
 
666
         */
 
667
        spin_lock_bh(&host->lock);
 
668
 
653
669
        if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
 
670
                spin_unlock_bh(&host->lock);
654
671
                mrq->cmd->error = -ENOMEDIUM;
655
672
                mmc_request_done(mmc, mrq);
656
673
                return;
657
674
        }
658
675
 
659
 
        /* We don't support multiple blocks of weird lengths. */
660
676
        dw_mci_queue_request(host, slot, mrq);
 
677
 
 
678
        spin_unlock_bh(&host->lock);
661
679
}
662
680
 
663
681
static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
681
699
        }
682
700
 
683
701
        /* DDR mode set */
684
 
        if (ios->ddr) {
 
702
        if (ios->timing == MMC_TIMING_UHS_DDR50) {
685
703
                regs = mci_readl(slot->host, UHS_REG);
686
704
                regs |= (0x1 << slot->id) << 16;
687
705
                mci_writel(slot->host, UHS_REG, regs);
746
764
        return present;
747
765
}
748
766
 
 
767
static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
 
768
{
 
769
        struct dw_mci_slot *slot = mmc_priv(mmc);
 
770
        struct dw_mci *host = slot->host;
 
771
        u32 int_mask;
 
772
 
 
773
        /* Enable/disable Slot Specific SDIO interrupt */
 
774
        int_mask = mci_readl(host, INTMASK);
 
775
        if (enb) {
 
776
                mci_writel(host, INTMASK,
 
777
                           (int_mask | (1 << SDMMC_INT_SDIO(slot->id))));
 
778
        } else {
 
779
                mci_writel(host, INTMASK,
 
780
                           (int_mask & ~(1 << SDMMC_INT_SDIO(slot->id))));
 
781
        }
 
782
}
 
783
 
749
784
static const struct mmc_host_ops dw_mci_ops = {
750
 
        .request        = dw_mci_request,
751
 
        .set_ios        = dw_mci_set_ios,
752
 
        .get_ro         = dw_mci_get_ro,
753
 
        .get_cd         = dw_mci_get_cd,
 
785
        .request                = dw_mci_request,
 
786
        .set_ios                = dw_mci_set_ios,
 
787
        .get_ro                 = dw_mci_get_ro,
 
788
        .get_cd                 = dw_mci_get_cd,
 
789
        .enable_sdio_irq        = dw_mci_enable_sdio_irq,
754
790
};
755
791
 
756
792
static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
831
867
        struct mmc_command *cmd;
832
868
        enum dw_mci_state state;
833
869
        enum dw_mci_state prev_state;
834
 
        u32 status;
 
870
        u32 status, ctrl;
835
871
 
836
872
        spin_lock(&host->lock);
837
873
 
891
927
 
892
928
                        if (status & DW_MCI_DATA_ERROR_FLAGS) {
893
929
                                if (status & SDMMC_INT_DTO) {
894
 
                                        dev_err(&host->pdev->dev,
895
 
                                                "data timeout error\n");
896
930
                                        data->error = -ETIMEDOUT;
897
931
                                } else if (status & SDMMC_INT_DCRC) {
898
 
                                        dev_err(&host->pdev->dev,
899
 
                                                "data CRC error\n");
900
932
                                        data->error = -EILSEQ;
 
933
                                } else if (status & SDMMC_INT_EBE &&
 
934
                                           host->dir_status ==
 
935
                                                        DW_MCI_SEND_STATUS) {
 
936
                                        /*
 
937
                                         * No data CRC status was returned.
 
938
                                         * The number of bytes transferred will
 
939
                                         * be exaggerated in PIO mode.
 
940
                                         */
 
941
                                        data->bytes_xfered = 0;
 
942
                                        data->error = -ETIMEDOUT;
901
943
                                } else {
902
944
                                        dev_err(&host->pdev->dev,
903
945
                                                "data FIFO error "
905
947
                                                status);
906
948
                                        data->error = -EIO;
907
949
                                }
 
950
                                /*
 
951
                                 * After an error, there may be data lingering
 
952
                                 * in the FIFO, so reset it - doing so
 
953
                                 * generates a block interrupt, hence setting
 
954
                                 * the scatter-gather pointer to NULL.
 
955
                                 */
 
956
                                host->sg = NULL;
 
957
                                ctrl = mci_readl(host, CTRL);
 
958
                                ctrl |= SDMMC_CTRL_FIFO_RESET;
 
959
                                mci_writel(host, CTRL, ctrl);
908
960
                        } else {
909
961
                                data->bytes_xfered = data->blocks * data->blksz;
910
962
                                data->error = 0;
946
998
 
947
999
}
948
1000
 
 
1001
/* push final bytes to part_buf, only use during push */
 
1002
static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
 
1003
{
 
1004
        memcpy((void *)&host->part_buf, buf, cnt);
 
1005
        host->part_buf_count = cnt;
 
1006
}
 
1007
 
 
1008
/* append bytes to part_buf, only use during push */
 
1009
static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
 
1010
{
 
1011
        cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
 
1012
        memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
 
1013
        host->part_buf_count += cnt;
 
1014
        return cnt;
 
1015
}
 
1016
 
 
1017
/* pull first bytes from part_buf, only use during pull */
 
1018
static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
 
1019
{
 
1020
        cnt = min(cnt, (int)host->part_buf_count);
 
1021
        if (cnt) {
 
1022
                memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
 
1023
                       cnt);
 
1024
                host->part_buf_count -= cnt;
 
1025
                host->part_buf_start += cnt;
 
1026
        }
 
1027
        return cnt;
 
1028
}
 
1029
 
 
1030
/* pull final bytes from the part_buf, assuming it's just been filled */
 
1031
static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
 
1032
{
 
1033
        memcpy(buf, &host->part_buf, cnt);
 
1034
        host->part_buf_start = cnt;
 
1035
        host->part_buf_count = (1 << host->data_shift) - cnt;
 
1036
}
 
1037
 
949
1038
static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
950
1039
{
951
 
        u16 *pdata = (u16 *)buf;
952
 
 
953
 
        WARN_ON(cnt % 2 != 0);
954
 
 
955
 
        cnt = cnt >> 1;
956
 
        while (cnt > 0) {
957
 
                mci_writew(host, DATA, *pdata++);
958
 
                cnt--;
 
1040
        /* try and push anything in the part_buf */
 
1041
        if (unlikely(host->part_buf_count)) {
 
1042
                int len = dw_mci_push_part_bytes(host, buf, cnt);
 
1043
                buf += len;
 
1044
                cnt -= len;
 
1045
                if (!sg_next(host->sg) || host->part_buf_count == 2) {
 
1046
                        mci_writew(host, DATA(host->data_offset),
 
1047
                                        host->part_buf16);
 
1048
                        host->part_buf_count = 0;
 
1049
                }
 
1050
        }
 
1051
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 
1052
        if (unlikely((unsigned long)buf & 0x1)) {
 
1053
                while (cnt >= 2) {
 
1054
                        u16 aligned_buf[64];
 
1055
                        int len = min(cnt & -2, (int)sizeof(aligned_buf));
 
1056
                        int items = len >> 1;
 
1057
                        int i;
 
1058
                        /* memcpy from input buffer into aligned buffer */
 
1059
                        memcpy(aligned_buf, buf, len);
 
1060
                        buf += len;
 
1061
                        cnt -= len;
 
1062
                        /* push data from aligned buffer into fifo */
 
1063
                        for (i = 0; i < items; ++i)
 
1064
                                mci_writew(host, DATA(host->data_offset),
 
1065
                                                aligned_buf[i]);
 
1066
                }
 
1067
        } else
 
1068
#endif
 
1069
        {
 
1070
                u16 *pdata = buf;
 
1071
                for (; cnt >= 2; cnt -= 2)
 
1072
                        mci_writew(host, DATA(host->data_offset), *pdata++);
 
1073
                buf = pdata;
 
1074
        }
 
1075
        /* put anything remaining in the part_buf */
 
1076
        if (cnt) {
 
1077
                dw_mci_set_part_bytes(host, buf, cnt);
 
1078
                if (!sg_next(host->sg))
 
1079
                        mci_writew(host, DATA(host->data_offset),
 
1080
                                        host->part_buf16);
959
1081
        }
960
1082
}
961
1083
 
962
1084
static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
963
1085
{
964
 
        u16 *pdata = (u16 *)buf;
965
 
 
966
 
        WARN_ON(cnt % 2 != 0);
967
 
 
968
 
        cnt = cnt >> 1;
969
 
        while (cnt > 0) {
970
 
                *pdata++ = mci_readw(host, DATA);
971
 
                cnt--;
 
1086
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 
1087
        if (unlikely((unsigned long)buf & 0x1)) {
 
1088
                while (cnt >= 2) {
 
1089
                        /* pull data from fifo into aligned buffer */
 
1090
                        u16 aligned_buf[64];
 
1091
                        int len = min(cnt & -2, (int)sizeof(aligned_buf));
 
1092
                        int items = len >> 1;
 
1093
                        int i;
 
1094
                        for (i = 0; i < items; ++i)
 
1095
                                aligned_buf[i] = mci_readw(host,
 
1096
                                                DATA(host->data_offset));
 
1097
                        /* memcpy from aligned buffer into output buffer */
 
1098
                        memcpy(buf, aligned_buf, len);
 
1099
                        buf += len;
 
1100
                        cnt -= len;
 
1101
                }
 
1102
        } else
 
1103
#endif
 
1104
        {
 
1105
                u16 *pdata = buf;
 
1106
                for (; cnt >= 2; cnt -= 2)
 
1107
                        *pdata++ = mci_readw(host, DATA(host->data_offset));
 
1108
                buf = pdata;
 
1109
        }
 
1110
        if (cnt) {
 
1111
                host->part_buf16 = mci_readw(host, DATA(host->data_offset));
 
1112
                dw_mci_pull_final_bytes(host, buf, cnt);
972
1113
        }
973
1114
}
974
1115
 
975
1116
static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
976
1117
{
977
 
        u32 *pdata = (u32 *)buf;
978
 
 
979
 
        WARN_ON(cnt % 4 != 0);
980
 
        WARN_ON((unsigned long)pdata & 0x3);
981
 
 
982
 
        cnt = cnt >> 2;
983
 
        while (cnt > 0) {
984
 
                mci_writel(host, DATA, *pdata++);
985
 
                cnt--;
 
1118
        /* try and push anything in the part_buf */
 
1119
        if (unlikely(host->part_buf_count)) {
 
1120
                int len = dw_mci_push_part_bytes(host, buf, cnt);
 
1121
                buf += len;
 
1122
                cnt -= len;
 
1123
                if (!sg_next(host->sg) || host->part_buf_count == 4) {
 
1124
                        mci_writel(host, DATA(host->data_offset),
 
1125
                                        host->part_buf32);
 
1126
                        host->part_buf_count = 0;
 
1127
                }
 
1128
        }
 
1129
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 
1130
        if (unlikely((unsigned long)buf & 0x3)) {
 
1131
                while (cnt >= 4) {
 
1132
                        u32 aligned_buf[32];
 
1133
                        int len = min(cnt & -4, (int)sizeof(aligned_buf));
 
1134
                        int items = len >> 2;
 
1135
                        int i;
 
1136
                        /* memcpy from input buffer into aligned buffer */
 
1137
                        memcpy(aligned_buf, buf, len);
 
1138
                        buf += len;
 
1139
                        cnt -= len;
 
1140
                        /* push data from aligned buffer into fifo */
 
1141
                        for (i = 0; i < items; ++i)
 
1142
                                mci_writel(host, DATA(host->data_offset),
 
1143
                                                aligned_buf[i]);
 
1144
                }
 
1145
        } else
 
1146
#endif
 
1147
        {
 
1148
                u32 *pdata = buf;
 
1149
                for (; cnt >= 4; cnt -= 4)
 
1150
                        mci_writel(host, DATA(host->data_offset), *pdata++);
 
1151
                buf = pdata;
 
1152
        }
 
1153
        /* put anything remaining in the part_buf */
 
1154
        if (cnt) {
 
1155
                dw_mci_set_part_bytes(host, buf, cnt);
 
1156
                if (!sg_next(host->sg))
 
1157
                        mci_writel(host, DATA(host->data_offset),
 
1158
                                                host->part_buf32);
986
1159
        }
987
1160
}
988
1161
 
989
1162
static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
990
1163
{
991
 
        u32 *pdata = (u32 *)buf;
992
 
 
993
 
        WARN_ON(cnt % 4 != 0);
994
 
        WARN_ON((unsigned long)pdata & 0x3);
995
 
 
996
 
        cnt = cnt >> 2;
997
 
        while (cnt > 0) {
998
 
                *pdata++ = mci_readl(host, DATA);
999
 
                cnt--;
 
1164
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 
1165
        if (unlikely((unsigned long)buf & 0x3)) {
 
1166
                while (cnt >= 4) {
 
1167
                        /* pull data from fifo into aligned buffer */
 
1168
                        u32 aligned_buf[32];
 
1169
                        int len = min(cnt & -4, (int)sizeof(aligned_buf));
 
1170
                        int items = len >> 2;
 
1171
                        int i;
 
1172
                        for (i = 0; i < items; ++i)
 
1173
                                aligned_buf[i] = mci_readl(host,
 
1174
                                                DATA(host->data_offset));
 
1175
                        /* memcpy from aligned buffer into output buffer */
 
1176
                        memcpy(buf, aligned_buf, len);
 
1177
                        buf += len;
 
1178
                        cnt -= len;
 
1179
                }
 
1180
        } else
 
1181
#endif
 
1182
        {
 
1183
                u32 *pdata = buf;
 
1184
                for (; cnt >= 4; cnt -= 4)
 
1185
                        *pdata++ = mci_readl(host, DATA(host->data_offset));
 
1186
                buf = pdata;
 
1187
        }
 
1188
        if (cnt) {
 
1189
                host->part_buf32 = mci_readl(host, DATA(host->data_offset));
 
1190
                dw_mci_pull_final_bytes(host, buf, cnt);
1000
1191
        }
1001
1192
}
1002
1193
 
1003
1194
static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1004
1195
{
1005
 
        u64 *pdata = (u64 *)buf;
1006
 
 
1007
 
        WARN_ON(cnt % 8 != 0);
1008
 
 
1009
 
        cnt = cnt >> 3;
1010
 
        while (cnt > 0) {
1011
 
                mci_writeq(host, DATA, *pdata++);
1012
 
                cnt--;
 
1196
        /* try and push anything in the part_buf */
 
1197
        if (unlikely(host->part_buf_count)) {
 
1198
                int len = dw_mci_push_part_bytes(host, buf, cnt);
 
1199
                buf += len;
 
1200
                cnt -= len;
 
1201
                if (!sg_next(host->sg) || host->part_buf_count == 8) {
 
1202
                        mci_writew(host, DATA(host->data_offset),
 
1203
                                        host->part_buf);
 
1204
                        host->part_buf_count = 0;
 
1205
                }
 
1206
        }
 
1207
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 
1208
        if (unlikely((unsigned long)buf & 0x7)) {
 
1209
                while (cnt >= 8) {
 
1210
                        u64 aligned_buf[16];
 
1211
                        int len = min(cnt & -8, (int)sizeof(aligned_buf));
 
1212
                        int items = len >> 3;
 
1213
                        int i;
 
1214
                        /* memcpy from input buffer into aligned buffer */
 
1215
                        memcpy(aligned_buf, buf, len);
 
1216
                        buf += len;
 
1217
                        cnt -= len;
 
1218
                        /* push data from aligned buffer into fifo */
 
1219
                        for (i = 0; i < items; ++i)
 
1220
                                mci_writeq(host, DATA(host->data_offset),
 
1221
                                                aligned_buf[i]);
 
1222
                }
 
1223
        } else
 
1224
#endif
 
1225
        {
 
1226
                u64 *pdata = buf;
 
1227
                for (; cnt >= 8; cnt -= 8)
 
1228
                        mci_writeq(host, DATA(host->data_offset), *pdata++);
 
1229
                buf = pdata;
 
1230
        }
 
1231
        /* put anything remaining in the part_buf */
 
1232
        if (cnt) {
 
1233
                dw_mci_set_part_bytes(host, buf, cnt);
 
1234
                if (!sg_next(host->sg))
 
1235
                        mci_writeq(host, DATA(host->data_offset),
 
1236
                                        host->part_buf);
1013
1237
        }
1014
1238
}
1015
1239
 
1016
1240
static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1017
1241
{
1018
 
        u64 *pdata = (u64 *)buf;
1019
 
 
1020
 
        WARN_ON(cnt % 8 != 0);
1021
 
 
1022
 
        cnt = cnt >> 3;
1023
 
        while (cnt > 0) {
1024
 
                *pdata++ = mci_readq(host, DATA);
1025
 
                cnt--;
1026
 
        }
 
1242
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 
1243
        if (unlikely((unsigned long)buf & 0x7)) {
 
1244
                while (cnt >= 8) {
 
1245
                        /* pull data from fifo into aligned buffer */
 
1246
                        u64 aligned_buf[16];
 
1247
                        int len = min(cnt & -8, (int)sizeof(aligned_buf));
 
1248
                        int items = len >> 3;
 
1249
                        int i;
 
1250
                        for (i = 0; i < items; ++i)
 
1251
                                aligned_buf[i] = mci_readq(host,
 
1252
                                                DATA(host->data_offset));
 
1253
                        /* memcpy from aligned buffer into output buffer */
 
1254
                        memcpy(buf, aligned_buf, len);
 
1255
                        buf += len;
 
1256
                        cnt -= len;
 
1257
                }
 
1258
        } else
 
1259
#endif
 
1260
        {
 
1261
                u64 *pdata = buf;
 
1262
                for (; cnt >= 8; cnt -= 8)
 
1263
                        *pdata++ = mci_readq(host, DATA(host->data_offset));
 
1264
                buf = pdata;
 
1265
        }
 
1266
        if (cnt) {
 
1267
                host->part_buf = mci_readq(host, DATA(host->data_offset));
 
1268
                dw_mci_pull_final_bytes(host, buf, cnt);
 
1269
        }
 
1270
}
 
1271
 
 
1272
static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
 
1273
{
 
1274
        int len;
 
1275
 
 
1276
        /* get remaining partial bytes */
 
1277
        len = dw_mci_pull_part_bytes(host, buf, cnt);
 
1278
        if (unlikely(len == cnt))
 
1279
                return;
 
1280
        buf += len;
 
1281
        cnt -= len;
 
1282
 
 
1283
        /* get the rest of the data */
 
1284
        host->pull_data(host, buf, cnt);
1027
1285
}
1028
1286
 
1029
1287
static void dw_mci_read_data_pio(struct dw_mci *host)
1037
1295
        unsigned int nbytes = 0, len;
1038
1296
 
1039
1297
        do {
1040
 
                len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
 
1298
                len = host->part_buf_count +
 
1299
                        (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
1041
1300
                if (offset + len <= sg->length) {
1042
 
                        host->pull_data(host, (void *)(buf + offset), len);
 
1301
                        dw_mci_pull_data(host, (void *)(buf + offset), len);
1043
1302
 
1044
1303
                        offset += len;
1045
1304
                        nbytes += len;
1055
1314
                        }
1056
1315
                } else {
1057
1316
                        unsigned int remaining = sg->length - offset;
1058
 
                        host->pull_data(host, (void *)(buf + offset),
1059
 
                                        remaining);
 
1317
                        dw_mci_pull_data(host, (void *)(buf + offset),
 
1318
                                         remaining);
1060
1319
                        nbytes += remaining;
1061
1320
 
1062
1321
                        flush_dcache_page(sg_page(sg));
1066
1325
 
1067
1326
                        offset = len - remaining;
1068
1327
                        buf = sg_virt(sg);
1069
 
                        host->pull_data(host, buf, offset);
 
1328
                        dw_mci_pull_data(host, buf, offset);
1070
1329
                        nbytes += offset;
1071
1330
                }
1072
1331
 
1083
1342
                        return;
1084
1343
                }
1085
1344
        } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
1086
 
        len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
1087
1345
        host->pio_offset = offset;
1088
1346
        data->bytes_xfered += nbytes;
1089
1347
        return;
1105
1363
        unsigned int nbytes = 0, len;
1106
1364
 
1107
1365
        do {
1108
 
                len = SDMMC_FIFO_SZ -
1109
 
                        (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
 
1366
                len = ((host->fifo_depth -
 
1367
                        SDMMC_GET_FCNT(mci_readl(host, STATUS))) << shift)
 
1368
                        - host->part_buf_count;
1110
1369
                if (offset + len <= sg->length) {
1111
1370
                        host->push_data(host, (void *)(buf + offset), len);
1112
1371
 
1151
1410
                        return;
1152
1411
                }
1153
1412
        } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1154
 
 
1155
1413
        host->pio_offset = offset;
1156
1414
        data->bytes_xfered += nbytes;
1157
 
 
1158
1415
        return;
1159
1416
 
1160
1417
done:
1179
1436
        struct dw_mci *host = dev_id;
1180
1437
        u32 status, pending;
1181
1438
        unsigned int pass_count = 0;
 
1439
        int i;
1182
1440
 
1183
1441
        do {
1184
1442
                status = mci_readl(host, RINTSTS);
1202
1460
                        host->cmd_status = status;
1203
1461
                        smp_wmb();
1204
1462
                        set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1205
 
                        tasklet_schedule(&host->tasklet);
1206
1463
                }
1207
1464
 
1208
1465
                if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1211
1468
                        host->data_status = status;
1212
1469
                        smp_wmb();
1213
1470
                        set_bit(EVENT_DATA_ERROR, &host->pending_events);
1214
 
                        tasklet_schedule(&host->tasklet);
 
1471
                        if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC |
 
1472
                                         SDMMC_INT_SBE | SDMMC_INT_EBE)))
 
1473
                                tasklet_schedule(&host->tasklet);
1215
1474
                }
1216
1475
 
1217
1476
                if (pending & SDMMC_INT_DATA_OVER) {
1229
1488
 
1230
1489
                if (pending & SDMMC_INT_RXDR) {
1231
1490
                        mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1232
 
                        if (host->sg)
 
1491
                        if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1233
1492
                                dw_mci_read_data_pio(host);
1234
1493
                }
1235
1494
 
1236
1495
                if (pending & SDMMC_INT_TXDR) {
1237
1496
                        mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1238
 
                        if (host->sg)
 
1497
                        if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1239
1498
                                dw_mci_write_data_pio(host);
1240
1499
                }
1241
1500
 
1246
1505
 
1247
1506
                if (pending & SDMMC_INT_CD) {
1248
1507
                        mci_writel(host, RINTSTS, SDMMC_INT_CD);
1249
 
                        tasklet_schedule(&host->card_tasklet);
 
1508
                        queue_work(dw_mci_card_workqueue, &host->card_work);
 
1509
                }
 
1510
 
 
1511
                /* Handle SDIO Interrupts */
 
1512
                for (i = 0; i < host->num_slots; i++) {
 
1513
                        struct dw_mci_slot *slot = host->slot[i];
 
1514
                        if (pending & SDMMC_INT_SDIO(i)) {
 
1515
                                mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
 
1516
                                mmc_signal_sdio_irq(slot->mmc);
 
1517
                        }
1250
1518
                }
1251
1519
 
1252
1520
        } while (pass_count++ < 5);
1265
1533
        return IRQ_HANDLED;
1266
1534
}
1267
1535
 
1268
 
static void dw_mci_tasklet_card(unsigned long data)
 
1536
static void dw_mci_work_routine_card(struct work_struct *work)
1269
1537
{
1270
 
        struct dw_mci *host = (struct dw_mci *)data;
 
1538
        struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1271
1539
        int i;
1272
1540
 
1273
1541
        for (i = 0; i < host->num_slots; i++) {
1279
1547
 
1280
1548
                present = dw_mci_get_cd(mmc);
1281
1549
                while (present != slot->last_detect_state) {
1282
 
                        spin_lock(&host->lock);
1283
 
 
1284
1550
                        dev_dbg(&slot->mmc->class_dev, "card %s\n",
1285
1551
                                present ? "inserted" : "removed");
1286
1552
 
 
1553
                        /* Power up slot (before spin_lock, may sleep) */
 
1554
                        if (present != 0 && host->pdata->setpower)
 
1555
                                host->pdata->setpower(slot->id, mmc->ocr_avail);
 
1556
 
 
1557
                        spin_lock_bh(&host->lock);
 
1558
 
1287
1559
                        /* Card change detected */
1288
1560
                        slot->last_detect_state = present;
1289
1561
 
1290
 
                        /* Power up slot */
1291
 
                        if (present != 0) {
1292
 
                                if (host->pdata->setpower)
1293
 
                                        host->pdata->setpower(slot->id,
1294
 
                                                              mmc->ocr_avail);
1295
 
 
 
1562
                        /* Mark card as present if applicable */
 
1563
                        if (present != 0)
1296
1564
                                set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1297
 
                        }
1298
1565
 
1299
1566
                        /* Clean up queue if present */
1300
1567
                        mrq = slot->mrq;
1344
1611
 
1345
1612
                        /* Power down slot */
1346
1613
                        if (present == 0) {
1347
 
                                if (host->pdata->setpower)
1348
 
                                        host->pdata->setpower(slot->id, 0);
1349
1614
                                clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1350
1615
 
1351
1616
                                /*
1367
1632
 
1368
1633
                        }
1369
1634
 
1370
 
                        spin_unlock(&host->lock);
 
1635
                        spin_unlock_bh(&host->lock);
 
1636
 
 
1637
                        /* Power down slot (after spin_unlock, may sleep) */
 
1638
                        if (present == 0 && host->pdata->setpower)
 
1639
                                host->pdata->setpower(slot->id, 0);
 
1640
 
1371
1641
                        present = dw_mci_get_cd(mmc);
1372
1642
                }
1373
1643
 
1416
1686
                        mmc->caps |= MMC_CAP_4_BIT_DATA;
1417
1687
 
1418
1688
        if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
1419
 
                mmc->caps |= MMC_CAP_SD_HIGHSPEED;
 
1689
                mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
1420
1690
 
1421
1691
#ifdef CONFIG_MMC_DW_IDMAC
1422
1692
        mmc->max_segs = host->ring_size;
1443
1713
 
1444
1714
        host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
1445
1715
        if (IS_ERR(host->vmmc)) {
1446
 
                printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
 
1716
                pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
1447
1717
                host->vmmc = NULL;
1448
1718
        } else
1449
1719
                regulator_enable(host->vmmc);
1467
1737
         * Card may have been plugged in prior to boot so we
1468
1738
         * need to run the detect tasklet
1469
1739
         */
1470
 
        tasklet_schedule(&host->card_tasklet);
 
1740
        queue_work(dw_mci_card_workqueue, &host->card_work);
1471
1741
 
1472
1742
        return 0;
1473
1743
}
1595
1865
        INIT_LIST_HEAD(&host->queue);
1596
1866
 
1597
1867
        ret = -ENOMEM;
1598
 
        host->regs = ioremap(regs->start, regs->end - regs->start + 1);
 
1868
        host->regs = ioremap(regs->start, resource_size(regs));
1599
1869
        if (!host->regs)
1600
1870
                goto err_freehost;
1601
1871
 
1645
1915
         * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
1646
1916
         *                          Tx Mark = fifo_size / 2 DMA Size = 8
1647
1917
         */
1648
 
        fifo_size = mci_readl(host, FIFOTH);
1649
 
        fifo_size = (fifo_size >> 16) & 0x7ff;
 
1918
        if (!host->pdata->fifo_depth) {
 
1919
                /*
 
1920
                 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
 
1921
                 * have been overwritten by the bootloader, just like we're
 
1922
                 * about to do, so if you know the value for your hardware, you
 
1923
                 * should put it in the platform data.
 
1924
                 */
 
1925
                fifo_size = mci_readl(host, FIFOTH);
 
1926
                fifo_size = 1 + ((fifo_size >> 16) & 0x7ff);
 
1927
        } else {
 
1928
                fifo_size = host->pdata->fifo_depth;
 
1929
        }
 
1930
        host->fifo_depth = fifo_size;
1650
1931
        host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
1651
1932
                        ((fifo_size/2) << 0));
1652
1933
        mci_writel(host, FIFOTH, host->fifoth_val);
1656
1937
        mci_writel(host, CLKSRC, 0);
1657
1938
 
1658
1939
        tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
1659
 
        tasklet_init(&host->card_tasklet,
1660
 
                     dw_mci_tasklet_card, (unsigned long)host);
 
1940
        dw_mci_card_workqueue = alloc_workqueue("dw-mci-card",
 
1941
                        WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
 
1942
        if (!dw_mci_card_workqueue)
 
1943
                goto err_dmaunmap;
 
1944
        INIT_WORK(&host->card_work, dw_mci_work_routine_card);
1661
1945
 
1662
1946
        ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host);
1663
1947
        if (ret)
1664
 
                goto err_dmaunmap;
 
1948
                goto err_workqueue;
1665
1949
 
1666
1950
        platform_set_drvdata(pdev, host);
1667
1951
 
1680
1964
        }
1681
1965
 
1682
1966
        /*
 
1967
         * In 2.40a spec, Data offset is changed.
 
1968
         * Need to check the version-id and set data-offset for DATA register.
 
1969
         */
 
1970
        host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
 
1971
        dev_info(&pdev->dev, "Version ID is %04x\n", host->verid);
 
1972
 
 
1973
        if (host->verid < DW_MMC_240A)
 
1974
                host->data_offset = DATA_OFFSET;
 
1975
        else
 
1976
                host->data_offset = DATA_240A_OFFSET;
 
1977
 
 
1978
        /*
1683
1979
         * Enable interrupts for command done, data over, data empty, card det,
1684
1980
         * receive ready and error such as transmit, receive timeout, crc error
1685
1981
         */
1690
1986
        mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
1691
1987
 
1692
1988
        dev_info(&pdev->dev, "DW MMC controller at irq %d, "
1693
 
                 "%d bit host data width\n", irq, width);
 
1989
                 "%d bit host data width, "
 
1990
                 "%u deep fifo\n",
 
1991
                 irq, width, fifo_size);
1694
1992
        if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
1695
1993
                dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
1696
1994
 
1705
2003
        }
1706
2004
        free_irq(irq, host);
1707
2005
 
 
2006
err_workqueue:
 
2007
        destroy_workqueue(dw_mci_card_workqueue);
 
2008
 
1708
2009
err_dmaunmap:
1709
2010
        if (host->use_dma && host->dma_ops->exit)
1710
2011
                host->dma_ops->exit(host);
1744
2045
        mci_writel(host, CLKSRC, 0);
1745
2046
 
1746
2047
        free_irq(platform_get_irq(pdev, 0), host);
 
2048
        destroy_workqueue(dw_mci_card_workqueue);
1747
2049
        dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
1748
2050
 
1749
2051
        if (host->use_dma && host->dma_ops->exit)