51
void set_phy_tuning(struct mvs_info *mvi, int phy_id,
52
struct phy_tuning phy_tuning)
54
u32 tmp, setting_0 = 0, setting_1 = 0;
57
/* Remap information for B0 chip:
59
* R0Ch -> R118h[15:0] (Adapted DFE F3 - F5 coefficient)
60
* R0Dh -> R118h[31:16] (Generation 1 Setting 0)
61
* R0Eh -> R11Ch[15:0] (Generation 1 Setting 1)
62
* R0Fh -> R11Ch[31:16] (Generation 2 Setting 0)
63
* R10h -> R120h[15:0] (Generation 2 Setting 1)
64
* R11h -> R120h[31:16] (Generation 3 Setting 0)
65
* R12h -> R124h[15:0] (Generation 3 Setting 1)
66
* R13h -> R124h[31:16] (Generation 4 Setting 0 (Reserved))
69
/* A0 has a different set of registers */
70
if (mvi->pdev->revision == VANIR_A0_REV)
73
for (i = 0; i < 3; i++) {
74
/* loop 3 times, set Gen 1, Gen 2, Gen 3 */
77
setting_0 = GENERATION_1_SETTING;
78
setting_1 = GENERATION_1_2_SETTING;
81
setting_0 = GENERATION_1_2_SETTING;
82
setting_1 = GENERATION_2_3_SETTING;
85
setting_0 = GENERATION_2_3_SETTING;
86
setting_1 = GENERATION_3_4_SETTING;
92
* Transmitter Emphasis Enable
93
* Transmitter Emphasis Amplitude
94
* Transmitter Amplitude
96
mvs_write_port_vsr_addr(mvi, phy_id, setting_0);
97
tmp = mvs_read_port_vsr_data(mvi, phy_id);
98
tmp &= ~(0xFBE << 16);
99
tmp |= (((phy_tuning.trans_emp_en << 11) |
100
(phy_tuning.trans_emp_amp << 7) |
101
(phy_tuning.trans_amp << 1)) << 16);
102
mvs_write_port_vsr_data(mvi, phy_id, tmp);
104
/* Set Transmitter Amplitude Adjust */
105
mvs_write_port_vsr_addr(mvi, phy_id, setting_1);
106
tmp = mvs_read_port_vsr_data(mvi, phy_id);
108
tmp |= (phy_tuning.trans_amp_adj << 14);
109
mvs_write_port_vsr_data(mvi, phy_id, tmp);
113
void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
114
struct ffe_control ffe)
118
/* Don't run this if A0/B0 */
119
if ((mvi->pdev->revision == VANIR_A0_REV)
120
|| (mvi->pdev->revision == VANIR_B0_REV))
123
/* FFE Resistor and Capacitor */
124
/* R10Ch DFE Resolution Control/Squelch and FFE Setting
130
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_FFE_CONTROL);
131
tmp = mvs_read_port_vsr_data(mvi, phy_id);
134
/* Read from HBA_Info_Page */
136
(ffe.ffe_rss_sel << 4) |
137
(ffe.ffe_cap_sel << 0));
139
mvs_write_port_vsr_data(mvi, phy_id, tmp);
141
/* R064h PHY Mode Register 1
145
mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
146
tmp = mvs_read_port_vsr_data(mvi, phy_id);
149
/* No defines in HBA_Info_Page */
151
mvs_write_port_vsr_data(mvi, phy_id, tmp);
153
/* R110h DFE F0-F1 Coefficient Control/DFE Update Control
155
* DFE_UPDATE_EN [11:6]
158
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_DFE_UPDATE_CRTL);
159
tmp = mvs_read_port_vsr_data(mvi, phy_id);
162
/* No defines in HBA_Info_Page */
163
tmp |= ((0x3F << 6) | (0x0 << 0));
164
mvs_write_port_vsr_data(mvi, phy_id, tmp);
166
/* R1A0h Interface and Digital Reference Clock Control/Reserved_50h
170
mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
171
tmp = mvs_read_port_vsr_data(mvi, phy_id);
174
/* No defines in HBA_Info_Page */
176
mvs_write_port_vsr_data(mvi, phy_id, tmp);
179
/*Notice: this function must be called when phy is disabled*/
180
void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate)
182
union reg_phy_cfg phy_cfg, phy_cfg_tmp;
183
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
184
phy_cfg_tmp.v = mvs_read_port_vsr_data(mvi, phy_id);
186
phy_cfg.u.disable_phy = phy_cfg_tmp.u.disable_phy;
187
phy_cfg.u.sas_support = 1;
188
phy_cfg.u.sata_support = 1;
189
phy_cfg.u.sata_host_mode = 1;
193
/* support 1.5 Gbps */
194
phy_cfg.u.speed_support = 1;
195
phy_cfg.u.snw_3_support = 0;
196
phy_cfg.u.tx_lnk_parity = 1;
197
phy_cfg.u.tx_spt_phs_lnk_rate = 0x30;
201
/* support 1.5, 3.0 Gbps */
202
phy_cfg.u.speed_support = 3;
203
phy_cfg.u.tx_spt_phs_lnk_rate = 0x3c;
204
phy_cfg.u.tx_lgcl_lnk_rate = 0x08;
208
/* support 1.5, 3.0, 6.0 Gbps */
209
phy_cfg.u.speed_support = 7;
210
phy_cfg.u.snw_3_support = 1;
211
phy_cfg.u.tx_lnk_parity = 1;
212
phy_cfg.u.tx_spt_phs_lnk_rate = 0x3f;
213
phy_cfg.u.tx_lgcl_lnk_rate = 0x09;
216
mvs_write_port_vsr_data(mvi, phy_id, phy_cfg.v);
219
static void __devinit
220
mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id)
223
temp = (u32)(*(u32 *)&mvi->hba_info_param.phy_tuning[phy_id]);
224
if (temp == 0xFFFFFFFFL) {
225
mvi->hba_info_param.phy_tuning[phy_id].trans_emp_amp = 0x6;
226
mvi->hba_info_param.phy_tuning[phy_id].trans_amp = 0x1A;
227
mvi->hba_info_param.phy_tuning[phy_id].trans_amp_adj = 0x3;
230
temp = (u8)(*(u8 *)&mvi->hba_info_param.ffe_ctl[phy_id]);
232
switch (mvi->pdev->revision) {
235
mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
236
mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0x7;
242
mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
243
mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0xC;
248
temp = (u8)(*(u8 *)&mvi->hba_info_param.phy_rate[phy_id]);
250
/*set default phy_rate = 6Gbps*/
251
mvi->hba_info_param.phy_rate[phy_id] = 0x2;
253
set_phy_tuning(mvi, phy_id,
254
mvi->hba_info_param.phy_tuning[phy_id]);
255
set_phy_ffe_tuning(mvi, phy_id,
256
mvi->hba_info_param.ffe_ctl[phy_id]);
257
set_phy_rate(mvi, phy_id,
258
mvi->hba_info_param.phy_rate[phy_id]);
51
261
static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
53
263
void __iomem *regs = mvi->regs;
91
311
static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
93
mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4);
94
mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
95
mvs_write_port_vsr_addr(mvi, phy_id, 0x104);
96
mvs_write_port_vsr_data(mvi, phy_id, 0x00018080);
316
revision = mvi->pdev->revision;
317
if (revision == VANIR_A0_REV) {
318
mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
319
mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
321
if (revision == VANIR_B0_REV) {
322
mvs_write_port_vsr_addr(mvi, phy_id, CMD_APP_MEM_CTL);
323
mvs_write_port_vsr_data(mvi, phy_id, 0x08001006);
324
mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
325
mvs_write_port_vsr_data(mvi, phy_id, 0x0000705f);
97
328
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
98
mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff);
329
tmp = mvs_read_port_vsr_data(mvi, phy_id);
331
mvs_write_port_vsr_data(mvi, phy_id, tmp & 0xfd7fffff);
101
334
static int __devinit mvs_94xx_init(struct mvs_info *mvi)
371
/* disable Multiplexing, enable phy implemented */
372
mw32(MVS_PORTS_IMP, 0xFF);
374
if (revision == VANIR_A0_REV) {
375
mw32(MVS_PA_VSR_ADDR, CMD_CMWK_OOB_DET);
376
mw32(MVS_PA_VSR_PORT, 0x00018080);
378
mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE2);
379
if (revision == VANIR_A0_REV || revision == VANIR_B0_REV)
380
/* set 6G/3G/1.5G, multiplexing, without SSC */
381
mw32(MVS_PA_VSR_PORT, 0x0084d4fe);
383
/* set 6G/3G/1.5G, multiplexing, with and without SSC */
384
mw32(MVS_PA_VSR_PORT, 0x0084fffe);
386
if (revision == VANIR_B0_REV) {
387
mw32(MVS_PA_VSR_ADDR, CMD_APP_MEM_CTL);
388
mw32(MVS_PA_VSR_PORT, 0x08001006);
389
mw32(MVS_PA_VSR_ADDR, CMD_HOST_RD_DATA);
390
mw32(MVS_PA_VSR_PORT, 0x0000705f);
136
393
/* reset control */
137
394
mw32(MVS_PCS, 0); /* MVS_PCS */
138
395
mw32(MVS_STP_REG_SET_0, 0);
346
621
static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
349
mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32));
351
tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3));
352
} while (tmp & 1 << (slot_idx % 32));
624
tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
625
if (tmp && 1 << (slot_idx % 32)) {
626
mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx);
627
mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
628
1 << (slot_idx % 32));
631
MVS_COMMAND_ACTIVE + (slot_idx >> 3));
632
} while (tmp & 1 << (slot_idx % 32));
636
void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
638
void __iomem *regs = mvi->regs;
642
tmp = mr32(MVS_INT_STAT_SRS_0);
644
mv_dprintk("check SRS 0 %08X.\n", tmp);
645
mw32(MVS_INT_STAT_SRS_0, tmp);
647
tmp = mr32(MVS_INT_STAT_SRS_1);
649
mv_dprintk("check SRS 1 %08X.\n", tmp);
650
mw32(MVS_INT_STAT_SRS_1, tmp);
654
tmp = mr32(MVS_INT_STAT_SRS_1);
656
tmp = mr32(MVS_INT_STAT_SRS_0);
658
if (tmp & (1 << (reg_set % 32))) {
659
mv_dprintk("register set 0x%x was stopped.\n", reg_set);
661
mw32(MVS_INT_STAT_SRS_1, 1 << (reg_set % 32));
663
mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32));
355
668
static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
358
671
void __iomem *regs = mvi->regs;
673
mvs_94xx_clear_srs_irq(mvi, 0, 1);
361
if (type == PORT_TYPE_SATA) {
362
tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
363
mw32(MVS_INT_STAT_SRS_0, tmp);
365
mw32(MVS_INT_STAT, CINT_CI_STOP);
675
tmp = mr32(MVS_INT_STAT);
676
mw32(MVS_INT_STAT, tmp | CINT_CI_STOP);
366
677
tmp = mr32(MVS_PCS) | 0xFF00;
367
678
mw32(MVS_PCS, tmp);
681
static void mvs_94xx_non_spec_ncq_error(struct mvs_info *mvi)
683
void __iomem *regs = mvi->regs;
686
struct mvs_device *device;
688
err_0 = mr32(MVS_NON_NCQ_ERR_0);
689
err_1 = mr32(MVS_NON_NCQ_ERR_1);
691
mv_dprintk("non specific ncq error err_0:%x,err_1:%x.\n",
693
for (i = 0; i < 32; i++) {
694
if (err_0 & bit(i)) {
695
device = mvs_find_dev_by_reg_set(mvi, i);
697
mvs_release_task(mvi, device->sas_device);
699
if (err_1 & bit(i)) {
700
device = mvs_find_dev_by_reg_set(mvi, i+32);
702
mvs_release_task(mvi, device->sas_device);
706
mw32(MVS_NON_NCQ_ERR_0, err_0);
707
mw32(MVS_NON_NCQ_ERR_1, err_1);
370
710
static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
372
712
void __iomem *regs = mvi->regs;
374
713
u8 reg_set = *tfs;
376
715
if (*tfs == MVS_ID_NOT_MAPPED)
379
718
mvi->sata_reg_set &= ~bit(reg_set);
381
720
w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
382
tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set;
384
mw32(MVS_INT_STAT_SRS_0, tmp);
386
w_reg_set_enable(reg_set, mvi->sata_reg_set);
387
tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
389
mw32(MVS_INT_STAT_SRS_1, tmp);
722
w_reg_set_enable(reg_set, (u32)(mvi->sata_reg_set >> 32));
392
724
*tfs = MVS_ID_NOT_MAPPED;
606
#ifndef DISABLE_HOTPLUG_DMA_FIX
607
void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
954
void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
955
int buf_len, int from, void *prd)
610
958
struct mvs_prd *buf_prd = prd;
960
struct mvs_prd_imt im_len;
612
for (i = 0; i < MAX_SG_ENTRY - from; i++) {
613
buf_prd->addr = cpu_to_le64(buf_dma);
614
buf_prd->im_len.len = cpu_to_le32(buf_len);
965
#define PRD_CHAINED_ENTRY 0x01
966
if ((mvi->pdev->revision == VANIR_A0_REV) ||
967
(mvi->pdev->revision == VANIR_B0_REV))
968
buf_dma = (phy_mask <= 0x08) ?
969
mvi->bulk_buffer_dma : mvi->bulk_buffer_dma1;
973
for (i = from; i < MAX_SG_ENTRY; i++, ++buf_prd) {
974
if (i == MAX_SG_ENTRY - 1) {
975
buf_prd->addr = cpu_to_le64(virt_to_phys(buf_prd - 1));
977
im_len.misc_ctl = PRD_CHAINED_ENTRY;
979
buf_prd->addr = cpu_to_le64(buf_dma);
980
im_len.len = buf_len;
982
buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len);
621
* FIXME JEJB: temporary nop clear_srs_irq to make 94xx still work
624
static void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set,
986
static void mvs_94xx_tune_interrupt(struct mvs_info *mvi, u32 time)
988
void __iomem *regs = mvi->regs;
991
* the max count is 0x1ff, while our max slot is 0x200,
992
* it will make count 0.
995
mw32(MVS_INT_COAL, 0);
996
mw32(MVS_INT_COAL_TMOUT, 0x10000);
998
if (MVS_CHIP_SLOT_SZ > 0x1ff)
999
mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
1001
mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
1003
tmp = 0x10000 | time;
1004
mw32(MVS_INT_COAL_TMOUT, tmp);
629
1009
const struct mvs_dispatch mvs_94xx_dispatch = {