2
* Copyright (c) 2010 Broadcom Corporation
4
* Permission to use, copy, modify, and/or distribute this software for any
5
* purpose with or without fee is hereby granted, provided that the above
6
* copyright notice and this permission notice appear in all copies.
8
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
#include <linux/delay.h>
18
#include <linux/kernel.h>
19
#include <linux/string.h>
21
#include <linux/module.h>
22
#include <linux/pci.h>
30
/* ********** from siutils.c *********** */
32
#include <pcie_core.h>
38
#define BCM47162_DMP() ((sih->chip == BCM47162_CHIP_ID) && \
39
(sih->chiprev == 0) && \
40
(sii->coreid[sii->curidx] == MIPS74K_CORE_ID))
45
get_erom_ent(si_t *sih, u32 **eromptr, u32 mask, u32 match)
48
uint inv = 0, nom = 0;
51
ent = R_REG(*eromptr);
57
if ((ent & ER_VALID) == 0) {
62
if (ent == (ER_END | ER_VALID))
65
if ((ent & mask) == match)
71
SI_VMSG(("%s: Returning ent 0x%08x\n", __func__, ent));
73
SI_VMSG((" after %d invalid and %d non-matching entries\n",
80
get_asd(si_t *sih, u32 **eromptr, uint sp, uint ad, uint st,
81
u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
85
asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
86
if (((asd & ER_TAG1) != ER_ADD) ||
87
(((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
88
((asd & AD_ST_MASK) != st)) {
89
/* This is not what we want, "push" it back */
93
*addrl = asd & AD_ADDR_MASK;
95
*addrh = get_erom_ent(sih, eromptr, 0, 0);
99
sz = asd & AD_SZ_MASK;
100
if (sz == AD_SZ_SZD) {
101
szd = get_erom_ent(sih, eromptr, 0, 0);
102
*sizel = szd & SD_SZ_MASK;
104
*sizeh = get_erom_ent(sih, eromptr, 0, 0);
106
*sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
108
SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
109
sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
114
static void ai_hwfixup(si_info_t *sii)
118
/* parse the enumeration rom to identify all cores */
119
void ai_scan(si_t *sih, void *regs, uint devid)
121
si_info_t *sii = SI_INFO(sih);
122
chipcregs_t *cc = (chipcregs_t *) regs;
123
u32 erombase, *eromptr, *eromlim;
125
erombase = R_REG(&cc->eromptr);
127
switch (sih->bustype) {
129
eromptr = (u32 *) REG_MAP(erombase, SI_CORE_SIZE);
133
/* Set wrappers address */
134
sii->curwrap = (void *)((unsigned long)regs + SI_CORE_SIZE);
136
/* Now point the window at the erom */
137
pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase);
143
eromptr = (u32 *)(unsigned long)erombase;
147
SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n",
151
eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
153
SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", regs, erombase, eromptr, eromlim));
154
while (eromptr < eromlim) {
155
u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
156
u32 mpd, asd, addrl, addrh, sizel, sizeh;
163
/* Grok a component */
164
cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
165
if (cia == (ER_END | ER_VALID)) {
166
SI_VMSG(("Found END of erom after %d cores\n",
172
cib = get_erom_ent(sih, &eromptr, 0, 0);
174
if ((cib & ER_TAG) != ER_CI) {
175
SI_ERROR(("CIA not followed by CIB\n"));
179
cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
180
mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
181
crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
182
nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
183
nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
184
nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
185
nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
187
SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " "nsw = %d, nmp = %d & nsp = %d\n", mfg, cid, crev, base, nmw, nsw, nmp, nsp));
189
if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
191
if ((nmw + nsw == 0)) {
192
/* A component which is not a core */
193
if (cid == OOB_ROUTER_CORE_ID) {
194
asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
195
&addrl, &addrh, &sizel, &sizeh);
197
sii->oob_router = addrl;
204
/* sii->eromptr[idx] = base; */
207
sii->coreid[idx] = cid;
209
for (i = 0; i < nmp; i++) {
210
mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
211
if ((mpd & ER_TAG) != ER_MP) {
212
SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
215
SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
216
(mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
217
(mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
220
/* First Slave Address Descriptor should be port 0:
221
* the main register space for the core
224
get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
227
/* Try again to see if it is a bridge */
229
get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
230
&addrh, &sizel, &sizeh);
233
else if ((addrh != 0) || (sizeh != 0)
234
|| (sizel != SI_CORE_SIZE)) {
235
SI_ERROR(("First Slave ASD for core 0x%04x malformed " "(0x%08x)\n", cid, asd));
239
sii->coresba[idx] = addrl;
240
sii->coresba_size[idx] = sizel;
241
/* Get any more ASDs in port 0 */
245
get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
246
&addrh, &sizel, &sizeh);
247
if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
248
sii->coresba2[idx] = addrl;
249
sii->coresba2_size[idx] = sizel;
254
/* Go through the ASDs for other slave ports */
255
for (i = 1; i < nsp; i++) {
259
get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
260
&addrl, &addrh, &sizel, &sizeh);
263
SI_ERROR((" SP %d has no address descriptors\n",
269
/* Now get master wrappers */
270
for (i = 0; i < nmw; i++) {
272
get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
273
&addrh, &sizel, &sizeh);
275
SI_ERROR(("Missing descriptor for MW %d\n", i));
278
if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
279
SI_ERROR(("Master wrapper %d is not 4KB\n", i));
283
sii->wrapba[idx] = addrl;
286
/* And finally slave wrappers */
287
for (i = 0; i < nsw; i++) {
288
uint fwp = (nsp == 1) ? 0 : 1;
290
get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
291
&addrl, &addrh, &sizel, &sizeh);
293
SI_ERROR(("Missing descriptor for SW %d\n", i));
296
if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
297
SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
300
if ((nmw == 0) && (i == 0))
301
sii->wrapba[idx] = addrl;
304
/* Don't record bridges */
312
SI_ERROR(("Reached end of erom without finding END"));
319
/* This function changes the logical "focus" to the indicated core.
320
* Return the current core's virtual address.
322
void *ai_setcoreidx(si_t *sih, uint coreidx)
324
si_info_t *sii = SI_INFO(sih);
325
u32 addr = sii->coresba[coreidx];
326
u32 wrap = sii->wrapba[coreidx];
329
if (coreidx >= sii->numcores)
332
switch (sih->bustype) {
335
if (!sii->regs[coreidx]) {
336
sii->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
338
sii->curmap = regs = sii->regs[coreidx];
339
if (!sii->wrappers[coreidx]) {
340
sii->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
342
sii->curwrap = sii->wrappers[coreidx];
346
/* point bar0 window */
347
pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr);
349
/* point bar0 2nd 4KB window */
350
pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap);
355
sii->curmap = regs = (void *)(unsigned long)addr;
356
sii->curwrap = (void *)(unsigned long)wrap;
365
sii->curidx = coreidx;
370
/* Return the number of address spaces in current core */
371
int ai_numaddrspaces(si_t *sih)
376
/* Return the address of the nth address space in the current core */
377
u32 ai_addrspace(si_t *sih, uint asidx)
386
return sii->coresba[cidx];
388
return sii->coresba2[cidx];
390
SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
395
/* Return the size of the nth address space in the current core */
396
u32 ai_addrspacesize(si_t *sih, uint asidx)
405
return sii->coresba_size[cidx];
407
return sii->coresba2_size[cidx];
409
SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
414
uint ai_flag(si_t *sih)
420
if (BCM47162_DMP()) {
421
SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __func__));
426
return R_REG(&ai->oobselouta30) & 0x1f;
429
void ai_setint(si_t *sih, int siflag)
433
uint ai_corevendor(si_t *sih)
439
cia = sii->cia[sii->curidx];
440
return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
443
uint ai_corerev(si_t *sih)
449
cib = sii->cib[sii->curidx];
450
return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
453
bool ai_iscoreup(si_t *sih)
461
return (((R_REG(&ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
463
&& ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0));
466
void ai_core_cflags_wo(si_t *sih, u32 mask, u32 val)
474
if (BCM47162_DMP()) {
475
SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
483
w = ((R_REG(&ai->ioctrl) & ~mask) | val);
484
W_REG(&ai->ioctrl, w);
488
u32 ai_core_cflags(si_t *sih, u32 mask, u32 val)
495
if (BCM47162_DMP()) {
496
SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
504
w = ((R_REG(&ai->ioctrl) & ~mask) | val);
505
W_REG(&ai->ioctrl, w);
508
return R_REG(&ai->ioctrl);
511
u32 ai_core_sflags(si_t *sih, u32 mask, u32 val)
518
if (BCM47162_DMP()) {
519
SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0", __func__));
526
w = ((R_REG(&ai->iostatus) & ~mask) | val);
527
W_REG(&ai->iostatus, w);
530
return R_REG(&ai->iostatus);
533
/* *************** from siutils.c ************** */
534
/* local prototypes */
535
static si_info_t *ai_doattach(si_info_t *sii, uint devid, void *regs,
536
uint bustype, void *sdh, char **vars,
538
static bool ai_buscore_prep(si_info_t *sii, uint bustype, uint devid,
540
static bool ai_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype,
541
u32 savewin, uint *origidx, void *regs);
542
static void ai_nvram_process(si_info_t *sii, char *pvars);
544
/* dev path concatenation util */
545
static char *ai_devpathvar(si_t *sih, char *var, int len, const char *name);
546
static bool _ai_clkctl_cc(si_info_t *sii, uint mode);
547
static bool ai_ispcie(si_info_t *sii);
549
/* global variable to indicate reservation/release of gpio's */
550
static u32 ai_gpioreservation;
553
* Allocate a si handle.
554
* devid - pci device id (used to determine chip#)
555
* osh - opaque OS handle
556
* regs - virtual address of initial core registers
557
* bustype - pci/sb/sdio/etc
558
* vars - pointer to a pointer area for "environment" variables
559
* varsz - pointer to int to return the size of the vars
561
si_t *ai_attach(uint devid, void *regs, uint bustype,
562
void *sdh, char **vars, uint *varsz)
566
/* alloc si_info_t */
567
sii = kmalloc(sizeof(si_info_t), GFP_ATOMIC);
569
SI_ERROR(("si_attach: malloc failed!\n"));
573
if (ai_doattach(sii, devid, regs, bustype, sdh, vars, varsz) ==
578
sii->vars = vars ? *vars : NULL;
579
sii->varsz = varsz ? *varsz : 0;
584
/* global kernel resource */
585
static si_info_t ksii;
587
static bool ai_buscore_prep(si_info_t *sii, uint bustype, uint devid,
590
/* kludge to enable the clock on the 4306 which lacks a slowclock */
591
if (bustype == PCI_BUS && !ai_ispcie(sii))
592
ai_clkctl_xtal(&sii->pub, XTAL | PLL, ON);
596
static bool ai_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype,
597
u32 savewin, uint *origidx, void *regs)
601
uint pciidx, pcieidx, pcirev, pcierev;
603
cc = ai_setcoreidx(&sii->pub, SI_CC_IDX);
605
/* get chipcommon rev */
606
sii->pub.ccrev = (int)ai_corerev(&sii->pub);
608
/* get chipcommon chipstatus */
609
if (sii->pub.ccrev >= 11)
610
sii->pub.chipst = R_REG(&cc->chipstatus);
612
/* get chipcommon capabilites */
613
sii->pub.cccaps = R_REG(&cc->capabilities);
614
/* get chipcommon extended capabilities */
616
if (sii->pub.ccrev >= 35)
617
sii->pub.cccaps_ext = R_REG(&cc->capabilities_ext);
619
/* get pmu rev and caps */
620
if (sii->pub.cccaps & CC_CAP_PMU) {
621
sii->pub.pmucaps = R_REG(&cc->pmucapabilities);
622
sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
625
/* figure out bus/orignal core idx */
626
sii->pub.buscoretype = NODEV_CORE_ID;
627
sii->pub.buscorerev = NOREV;
628
sii->pub.buscoreidx = BADIDX;
631
pcirev = pcierev = NOREV;
632
pciidx = pcieidx = BADIDX;
634
for (i = 0; i < sii->numcores; i++) {
637
ai_setcoreidx(&sii->pub, i);
638
cid = ai_coreid(&sii->pub);
639
crev = ai_corerev(&sii->pub);
641
/* Display cores found */
642
SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
643
i, cid, crev, sii->coresba[i], sii->regs[i]));
645
if (bustype == PCI_BUS) {
646
if (cid == PCI_CORE_ID) {
650
} else if (cid == PCIE_CORE_ID) {
657
/* find the core idx before entering this func. */
658
if ((savewin && (savewin == sii->coresba[i])) ||
659
(regs == sii->regs[i]))
670
sii->pub.buscoretype = PCI_CORE_ID;
671
sii->pub.buscorerev = pcirev;
672
sii->pub.buscoreidx = pciidx;
674
sii->pub.buscoretype = PCIE_CORE_ID;
675
sii->pub.buscorerev = pcierev;
676
sii->pub.buscoreidx = pcieidx;
679
SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx,
680
sii->pub.buscoretype, sii->pub.buscorerev));
682
/* fixup necessary chip/core configurations */
683
if (sii->pub.bustype == PCI_BUS) {
686
sii->pch = (void *)pcicore_init(
687
&sii->pub, sii->pbus,
688
(void *)PCIEREGS(sii));
689
if (sii->pch == NULL)
693
if (ai_pci_fixcfg(&sii->pub)) {
694
SI_ERROR(("si_doattach: si_pci_fixcfg failed\n"));
699
/* return to the original core */
700
ai_setcoreidx(&sii->pub, *origidx);
705
static __used void ai_nvram_process(si_info_t *sii, char *pvars)
709
/* get boardtype and boardrev */
710
switch (sii->pub.bustype) {
712
/* do a pci config read to get subsystem id and subvendor id */
713
pci_read_config_dword(sii->pbus, PCI_SUBSYSTEM_VENDOR_ID, &w);
714
/* Let nvram variables override subsystem Vend/ID */
715
sii->pub.boardvendor = (u16)ai_getdevpathintvar(&sii->pub,
717
if (sii->pub.boardvendor == 0)
718
sii->pub.boardvendor = w & 0xffff;
720
SI_ERROR(("Overriding boardvendor: 0x%x instead of "
721
"0x%x\n", sii->pub.boardvendor, w & 0xffff));
722
sii->pub.boardtype = (u16)ai_getdevpathintvar(&sii->pub,
724
if (sii->pub.boardtype == 0)
725
sii->pub.boardtype = (w >> 16) & 0xffff;
727
SI_ERROR(("Overriding boardtype: 0x%x instead of 0x%x\n"
728
, sii->pub.boardtype, (w >> 16) & 0xffff));
731
sii->pub.boardvendor = getintvar(pvars, "manfid");
732
sii->pub.boardtype = getintvar(pvars, "prodid");
737
sii->pub.boardvendor = PCI_VENDOR_ID_BROADCOM;
738
sii->pub.boardtype = getintvar(pvars, "prodid");
739
if (pvars == NULL || (sii->pub.boardtype == 0)) {
740
sii->pub.boardtype = getintvar(NULL, "boardtype");
741
if (sii->pub.boardtype == 0)
742
sii->pub.boardtype = 0xffff;
747
if (sii->pub.boardtype == 0) {
748
SI_ERROR(("si_doattach: unknown board type\n"));
751
sii->pub.boardflags = getintvar(pvars, "boardflags");
754
static si_info_t *ai_doattach(si_info_t *sii, uint devid,
755
void *regs, uint bustype, void *pbus,
756
char **vars, uint *varsz)
758
struct si_pub *sih = &sii->pub;
765
memset((unsigned char *) sii, 0, sizeof(si_info_t));
769
sih->buscoreidx = BADIDX;
774
/* check to see if we are a si core mimic'ing a pci core */
775
if (bustype == PCI_BUS) {
776
pci_read_config_dword(sii->pbus, PCI_SPROM_CONTROL, &w);
777
if (w == 0xffffffff) {
778
SI_ERROR(("%s: incoming bus is PCI but it's a lie, "
779
" switching to SI devid:0x%x\n",
785
/* find Chipcommon address */
786
if (bustype == PCI_BUS) {
787
pci_read_config_dword(sii->pbus, PCI_BAR0_WIN, &savewin);
788
if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
789
savewin = SI_ENUM_BASE;
790
pci_write_config_dword(sii->pbus, PCI_BAR0_WIN,
792
cc = (chipcregs_t *) regs;
794
cc = (chipcregs_t *) REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
797
sih->bustype = bustype;
799
/* bus/core/clk setup for register access */
800
if (!ai_buscore_prep(sii, bustype, devid, pbus)) {
801
SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n",
807
* ChipID recognition.
808
* We assume we can read chipid at offset 0 from the regs arg.
809
* If we add other chiptypes (or if we need to support old sdio
810
* hosts w/o chipcommon), some way of recognizing them needs to
813
w = R_REG(&cc->chipid);
814
socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
815
/* Might as wll fill in chip id rev & pkg */
816
sih->chip = w & CID_ID_MASK;
817
sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
818
sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
820
sih->issim = IS_SIM(sih->chippkg);
823
if (socitype == SOCI_AI) {
824
SI_MSG(("Found chip type AI (0x%08x)\n", w));
825
/* pass chipc address instead of original core base */
826
ai_scan(&sii->pub, (void *)cc, devid);
828
SI_ERROR(("Found chip of unknown type (0x%08x)\n", w));
831
/* no cores found, bail out */
832
if (sii->numcores == 0) {
833
SI_ERROR(("si_doattach: could not find any cores\n"));
836
/* bus/core/clk setup */
838
if (!ai_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
839
SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
843
/* assume current core is CC */
844
if ((sii->pub.ccrev == 0x25)
846
((sih->chip == BCM43236_CHIP_ID
847
|| sih->chip == BCM43235_CHIP_ID
848
|| sih->chip == BCM43238_CHIP_ID)
849
&& (sii->pub.chiprev <= 2))) {
851
if ((cc->chipstatus & CST43236_BP_CLK) != 0) {
853
clkdiv = R_REG(&cc->clkdiv);
854
/* otp_clk_div is even number, 120/14 < 9mhz */
855
clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT);
856
W_REG(&cc->clkdiv, clkdiv);
857
SI_ERROR(("%s: set clkdiv to %x\n", __func__, clkdiv));
862
/* Init nvram from flash if it exists */
865
/* Init nvram from sprom/otp if they exist */
867
(&sii->pub, bustype, regs, vars, varsz)) {
868
SI_ERROR(("si_doattach: srom_var_init failed: bad srom\n"));
871
pvars = vars ? *vars : NULL;
872
ai_nvram_process(sii, pvars);
874
/* === NVRAM, clock is ready === */
875
cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
876
W_REG(&cc->gpiopullup, 0);
877
W_REG(&cc->gpiopulldown, 0);
878
ai_setcoreidx(sih, origidx);
880
/* PMU specific initializations */
881
if (PMUCTL_ENAB(sih)) {
884
si_pmu_chip_init(sih);
885
xtalfreq = getintvar(pvars, "xtalfreq");
886
/* If xtalfreq var not available, try to measure it */
888
xtalfreq = si_pmu_measure_alpclk(sih);
889
si_pmu_pll_init(sih, xtalfreq);
890
si_pmu_res_init(sih);
891
si_pmu_swreg_init(sih);
894
/* setup the GPIO based LED powersave register */
895
w = getintvar(pvars, "leddc");
897
w = DEFAULT_GPIOTIMERVAL;
898
ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, gpiotimerval), ~0, w);
901
pcicore_attach(sii->pch, pvars, SI_DOATTACH);
904
if ((sih->chip == BCM43224_CHIP_ID) ||
905
(sih->chip == BCM43421_CHIP_ID)) {
907
* enable 12 mA drive strenth for 43224 and
908
* set chipControl register bit 15
910
if (sih->chiprev == 0) {
911
SI_MSG(("Applying 43224A0 WARs\n"));
912
ai_corereg(sih, SI_CC_IDX,
913
offsetof(chipcregs_t, chipcontrol),
914
CCTRL43224_GPIO_TOGGLE,
915
CCTRL43224_GPIO_TOGGLE);
916
si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
917
CCTRL_43224A0_12MA_LED_DRIVE);
919
if (sih->chiprev >= 1) {
920
SI_MSG(("Applying 43224B0+ WARs\n"));
921
si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
922
CCTRL_43224B0_12MA_LED_DRIVE);
926
if (sih->chip == BCM4313_CHIP_ID) {
928
* enable 12 mA drive strenth for 4313 and
929
* set chipControl register bit 1
931
SI_MSG(("Applying 4313 WARs\n"));
932
si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
933
CCTRL_4313_12MA_LED_DRIVE);
936
if (sih->chip == BCM4331_CHIP_ID) {
937
/* Enable Ext PA lines depending on chip package option */
938
ai_chipcontrl_epa4331(sih, true);
943
if (sih->bustype == PCI_BUS) {
945
pcicore_deinit(sii->pch);
952
/* may be called with core in reset */
953
void ai_detach(si_t *sih)
958
struct si_pub *si_local = NULL;
959
bcopy(&sih, &si_local, sizeof(si_t **));
966
if (sih->bustype == SI_BUS)
967
for (idx = 0; idx < SI_MAXCORES; idx++)
968
if (sii->regs[idx]) {
969
iounmap(sii->regs[idx]);
970
sii->regs[idx] = NULL;
973
nvram_exit(); /* free up nvram buffers */
975
if (sih->bustype == PCI_BUS) {
977
pcicore_deinit(sii->pch);
985
/* register driver interrupt disabling and restoring callback functions */
987
ai_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
988
void *intrsenabled_fn, void *intr_arg)
993
sii->intr_arg = intr_arg;
994
sii->intrsoff_fn = (si_intrsoff_t) intrsoff_fn;
995
sii->intrsrestore_fn = (si_intrsrestore_t) intrsrestore_fn;
996
sii->intrsenabled_fn = (si_intrsenabled_t) intrsenabled_fn;
997
/* save current core id. when this function called, the current core
998
* must be the core which provides driver functions(il, et, wl, etc.)
1000
sii->dev_coreid = sii->coreid[sii->curidx];
1003
void ai_deregister_intr_callback(si_t *sih)
1008
sii->intrsoff_fn = NULL;
1011
uint ai_coreid(si_t *sih)
1016
return sii->coreid[sii->curidx];
1019
uint ai_coreidx(si_t *sih)
1027
bool ai_backplane64(si_t *sih)
1029
return (sih->cccaps & CC_CAP_BKPLN64) != 0;
1032
/* return index of coreid or BADIDX if not found */
1033
uint ai_findcoreidx(si_t *sih, uint coreid, uint coreunit)
1043
for (i = 0; i < sii->numcores; i++)
1044
if (sii->coreid[i] == coreid) {
1045
if (found == coreunit)
1054
* This function changes logical "focus" to the indicated core;
1055
* must be called with interrupts off.
1056
* Moreover, callers should keep interrupts off during switching
1057
* out of and back to d11 core.
1059
void *ai_setcore(si_t *sih, uint coreid, uint coreunit)
1063
idx = ai_findcoreidx(sih, coreid, coreunit);
1067
return ai_setcoreidx(sih, idx);
1070
/* Turn off interrupt as required by ai_setcore, before switch core */
1071
void *ai_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
1079
/* Overloading the origidx variable to remember the coreid,
1080
* this works because the core ids cannot be confused with
1084
if (coreid == CC_CORE_ID)
1085
return (void *)CCREGS_FAST(sii);
1086
else if (coreid == sih->buscoretype)
1087
return (void *)PCIEREGS(sii);
1089
INTR_OFF(sii, *intr_val);
1090
*origidx = sii->curidx;
1091
cc = ai_setcore(sih, coreid, 0);
1095
/* restore coreidx and restore interrupt */
1096
void ai_restore_core(si_t *sih, uint coreid, uint intr_val)
1102
&& ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
1105
ai_setcoreidx(sih, coreid);
1106
INTR_RESTORE(sii, intr_val);
1109
void ai_write_wrapperreg(si_t *sih, u32 offset, u32 val)
1111
si_info_t *sii = SI_INFO(sih);
1112
u32 *w = (u32 *) sii->curwrap;
1113
W_REG(w + (offset / 4), val);
1118
* Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
1119
* operation, switch back to the original core, and return the new value.
1121
* When using the silicon backplane, no fiddling with interrupts or core
1122
* switches is needed.
1124
* Also, when using pci/pcie, we can optimize away the core switching for pci
1125
* registers and (on newer pci cores) chipcommon registers.
1127
uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
1138
if (coreidx >= SI_MAXCORES)
1141
if (sih->bustype == SI_BUS) {
1142
/* If internal bus, we can always get at everything */
1144
/* map if does not exist */
1145
if (!sii->regs[coreidx]) {
1146
sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
1149
r = (u32 *) ((unsigned char *) sii->regs[coreidx] + regoff);
1150
} else if (sih->bustype == PCI_BUS) {
1152
* If pci/pcie, we can get at pci/pcie regs
1153
* and on newer cores to chipc
1155
if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
1156
/* Chipc registers are mapped at 12KB */
1159
r = (u32 *) ((char *)sii->curmap +
1160
PCI_16KB0_CCREGS_OFFSET + regoff);
1161
} else if (sii->pub.buscoreidx == coreidx) {
1163
* pci registers are at either in the last 2KB of
1164
* an 8KB window or, in pcie and pci rev 13 at 8KB
1168
r = (u32 *) ((char *)sii->curmap +
1169
PCI_16KB0_PCIREGS_OFFSET +
1172
r = (u32 *) ((char *)sii->curmap +
1173
((regoff >= SBCONFIGOFF) ?
1174
PCI_BAR0_PCISBR_OFFSET :
1175
PCI_BAR0_PCIREGS_OFFSET) +
1181
INTR_OFF(sii, intr_val);
1183
/* save current core index */
1184
origidx = ai_coreidx(&sii->pub);
1187
r = (u32 *) ((unsigned char *) ai_setcoreidx(&sii->pub, coreidx)
1193
w = (R_REG(r) & ~mask) | val;
1201
/* restore core index */
1202
if (origidx != coreidx)
1203
ai_setcoreidx(&sii->pub, origidx);
1205
INTR_RESTORE(sii, intr_val);
1211
void ai_core_disable(si_t *sih, u32 bits)
1221
/* if core is already in reset, just return */
1222
if (R_REG(&ai->resetctrl) & AIRC_RESET)
1225
W_REG(&ai->ioctrl, bits);
1226
dummy = R_REG(&ai->ioctrl);
1229
W_REG(&ai->resetctrl, AIRC_RESET);
1233
/* reset and re-enable a core
1235
* bits - core specific bits that are set during and after reset sequence
1236
* resetbits - core specific bits that are set only during reset sequence
1238
void ai_core_reset(si_t *sih, u32 bits, u32 resetbits)
1248
* Must do the disable sequence first to work
1249
* for arbitrary current core state.
1251
ai_core_disable(sih, (bits | resetbits));
1254
* Now do the initialization sequence.
1256
W_REG(&ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
1257
dummy = R_REG(&ai->ioctrl);
1258
W_REG(&ai->resetctrl, 0);
1261
W_REG(&ai->ioctrl, (bits | SICF_CLOCK_EN));
1262
dummy = R_REG(&ai->ioctrl);
1266
/* return the slow clock source - LPO, XTAL, or PCI */
1267
static uint ai_slowclk_src(si_info_t *sii)
1272
if (sii->pub.ccrev < 6) {
1273
if (sii->pub.bustype == PCI_BUS) {
1274
pci_read_config_dword(sii->pbus, PCI_GPIO_OUT,
1276
if (val & PCI_CFG_GPIO_SCS)
1280
} else if (sii->pub.ccrev < 10) {
1281
cc = (chipcregs_t *) ai_setcoreidx(&sii->pub, sii->curidx);
1282
return R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK;
1283
} else /* Insta-clock */
1288
* return the ILP (slowclock) min or max frequency
1289
* precondition: we've established the chip has dynamic clk control
1291
static uint ai_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc)
1296
slowclk = ai_slowclk_src(sii);
1297
if (sii->pub.ccrev < 6) {
1298
if (slowclk == SCC_SS_PCI)
1299
return max_freq ? (PCIMAXFREQ / 64)
1300
: (PCIMINFREQ / 64);
1302
return max_freq ? (XTALMAXFREQ / 32)
1303
: (XTALMINFREQ / 32);
1304
} else if (sii->pub.ccrev < 10) {
1306
(((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >>
1308
if (slowclk == SCC_SS_LPO)
1309
return max_freq ? LPOMAXFREQ : LPOMINFREQ;
1310
else if (slowclk == SCC_SS_XTAL)
1311
return max_freq ? (XTALMAXFREQ / div)
1312
: (XTALMINFREQ / div);
1313
else if (slowclk == SCC_SS_PCI)
1314
return max_freq ? (PCIMAXFREQ / div)
1315
: (PCIMINFREQ / div);
1317
/* Chipc rev 10 is InstaClock */
1318
div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT;
1319
div = 4 * (div + 1);
1320
return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
1325
static void ai_clkctl_setdelay(si_info_t *sii, void *chipcregs)
1327
chipcregs_t *cc = (chipcregs_t *) chipcregs;
1328
uint slowmaxfreq, pll_delay, slowclk;
1329
uint pll_on_delay, fref_sel_delay;
1331
pll_delay = PLL_DELAY;
1334
* If the slow clock is not sourced by the xtal then
1335
* add the xtal_on_delay since the xtal will also be
1336
* powered down by dynamic clk control logic.
1339
slowclk = ai_slowclk_src(sii);
1340
if (slowclk != SCC_SS_XTAL)
1341
pll_delay += XTAL_ON_DELAY;
1343
/* Starting with 4318 it is ILP that is used for the delays */
1345
ai_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? false : true, cc);
1347
pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
1348
fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
1350
W_REG(&cc->pll_on_delay, pll_on_delay);
1351
W_REG(&cc->fref_sel_delay, fref_sel_delay);
1354
/* initialize power control delay registers */
1355
void ai_clkctl_init(si_t *sih)
1362
if (!CCCTL_ENAB(sih))
1366
fast = SI_FAST(sii);
1368
origidx = sii->curidx;
1369
cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
1373
cc = (chipcregs_t *) CCREGS_FAST(sii);
1378
/* set all Instaclk chip ILP to 1 MHz */
1379
if (sih->ccrev >= 10)
1380
SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK,
1381
(ILP_DIV_1MHZ << SYCC_CD_SHIFT));
1383
ai_clkctl_setdelay(sii, (void *)cc);
1386
ai_setcoreidx(sih, origidx);
1390
* return the value suitable for writing to the
1391
* dot11 core FAST_PWRUP_DELAY register
1393
u16 ai_clkctl_fast_pwrup_delay(si_t *sih)
1404
if (PMUCTL_ENAB(sih)) {
1405
INTR_OFF(sii, intr_val);
1406
fpdelay = si_pmu_fast_pwrup_delay(sih);
1407
INTR_RESTORE(sii, intr_val);
1411
if (!CCCTL_ENAB(sih))
1414
fast = SI_FAST(sii);
1417
origidx = sii->curidx;
1418
INTR_OFF(sii, intr_val);
1419
cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
1423
cc = (chipcregs_t *) CCREGS_FAST(sii);
1428
slowminfreq = ai_slowclk_freq(sii, false, cc);
1429
fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) +
1430
(slowminfreq - 1)) / slowminfreq;
1434
ai_setcoreidx(sih, origidx);
1435
INTR_RESTORE(sii, intr_val);
1440
/* turn primary xtal and/or pll off/on */
1441
int ai_clkctl_xtal(si_t *sih, uint what, bool on)
1448
switch (sih->bustype) {
1451
/* pcie core doesn't have any mapping to control the xtal pu */
1455
pci_read_config_dword(sii->pbus, PCI_GPIO_IN, &in);
1456
pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &out);
1457
pci_read_config_dword(sii->pbus, PCI_GPIO_OUTEN, &outen);
1460
* Avoid glitching the clock if GPRS is already using it.
1461
* We can't actually read the state of the PLLPD so we infer it
1462
* by the value of XTAL_PU which *is* readable via gpioin.
1464
if (on && (in & PCI_CFG_GPIO_XTAL))
1468
outen |= PCI_CFG_GPIO_XTAL;
1470
outen |= PCI_CFG_GPIO_PLL;
1473
/* turn primary xtal on */
1475
out |= PCI_CFG_GPIO_XTAL;
1477
out |= PCI_CFG_GPIO_PLL;
1478
pci_write_config_dword(sii->pbus,
1480
pci_write_config_dword(sii->pbus,
1481
PCI_GPIO_OUTEN, outen);
1482
udelay(XTAL_ON_DELAY);
1487
out &= ~PCI_CFG_GPIO_PLL;
1488
pci_write_config_dword(sii->pbus,
1494
out &= ~PCI_CFG_GPIO_XTAL;
1496
out |= PCI_CFG_GPIO_PLL;
1497
pci_write_config_dword(sii->pbus,
1499
pci_write_config_dword(sii->pbus,
1500
PCI_GPIO_OUTEN, outen);
1511
* clock control policy function throught chipcommon
1513
* set dynamic clk control mode (forceslow, forcefast, dynamic)
1514
* returns true if we are forcing fast clock
1515
* this is a wrapper over the next internal function
1516
* to allow flexible policy settings for outside caller
1518
bool ai_clkctl_cc(si_t *sih, uint mode)
1524
/* chipcommon cores prior to rev6 don't support dynamic clock control */
1528
if (PCI_FORCEHT(sii))
1529
return mode == CLK_FAST;
1531
return _ai_clkctl_cc(sii, mode);
1534
/* clk control mechanism through chipcommon, no policy checking */
1535
static bool _ai_clkctl_cc(si_info_t *sii, uint mode)
1541
bool fast = SI_FAST(sii);
1543
/* chipcommon cores prior to rev6 don't support dynamic clock control */
1544
if (sii->pub.ccrev < 6)
1548
INTR_OFF(sii, intr_val);
1549
origidx = sii->curidx;
1551
if ((sii->pub.bustype == SI_BUS) &&
1552
ai_setcore(&sii->pub, MIPS33_CORE_ID, 0) &&
1553
(ai_corerev(&sii->pub) <= 7) && (sii->pub.ccrev >= 10))
1556
cc = (chipcregs_t *) ai_setcore(&sii->pub, CC_CORE_ID, 0);
1558
cc = (chipcregs_t *) CCREGS_FAST(sii);
1563
if (!CCCTL_ENAB(&sii->pub) && (sii->pub.ccrev < 20))
1567
case CLK_FAST: /* FORCEHT, fast (pll) clock */
1568
if (sii->pub.ccrev < 10) {
1570
* don't forget to force xtal back
1571
* on before we clear SCC_DYN_XTAL..
1573
ai_clkctl_xtal(&sii->pub, XTAL, ON);
1574
SET_REG(&cc->slow_clk_ctl,
1575
(SCC_XC | SCC_FS | SCC_IP), SCC_IP);
1576
} else if (sii->pub.ccrev < 20) {
1577
OR_REG(&cc->system_clk_ctl, SYCC_HR);
1579
OR_REG(&cc->clk_ctl_st, CCS_FORCEHT);
1582
/* wait for the PLL */
1583
if (PMUCTL_ENAB(&sii->pub)) {
1584
u32 htavail = CCS_HTAVAIL;
1585
SPINWAIT(((R_REG(&cc->clk_ctl_st) & htavail)
1586
== 0), PMU_MAX_TRANSITION_DLY);
1592
case CLK_DYNAMIC: /* enable dynamic clock control */
1593
if (sii->pub.ccrev < 10) {
1594
scc = R_REG(&cc->slow_clk_ctl);
1595
scc &= ~(SCC_FS | SCC_IP | SCC_XC);
1596
if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
1598
W_REG(&cc->slow_clk_ctl, scc);
1601
* for dynamic control, we have to
1602
* release our xtal_pu "force on"
1605
ai_clkctl_xtal(&sii->pub, XTAL, OFF);
1606
} else if (sii->pub.ccrev < 20) {
1608
AND_REG(&cc->system_clk_ctl, ~SYCC_HR);
1610
AND_REG(&cc->clk_ctl_st, ~CCS_FORCEHT);
1620
ai_setcoreidx(&sii->pub, origidx);
1621
INTR_RESTORE(sii, intr_val);
1623
return mode == CLK_FAST;
1626
/* Build device path. Support SI, PCI, and JTAG for now. */
1627
int ai_devpath(si_t *sih, char *path, int size)
1631
if (!path || size <= 0)
1634
switch (sih->bustype) {
1637
slen = snprintf(path, (size_t) size, "sb/%u/", ai_coreidx(sih));
1640
slen = snprintf(path, (size_t) size, "pci/%u/%u/",
1641
((struct pci_dev *)((SI_INFO(sih))->pbus))->bus->number,
1643
((struct pci_dev *)((SI_INFO(sih))->pbus))->devfn));
1651
if (slen < 0 || slen >= size) {
1659
/* Get a variable, but only if it has a devpath prefix */
1660
char *ai_getdevpathvar(si_t *sih, const char *name)
1662
char varname[SI_DEVPATH_BUFSZ + 32];
1664
ai_devpathvar(sih, varname, sizeof(varname), name);
1666
return getvar(NULL, varname);
1669
/* Get a variable, but only if it has a devpath prefix */
1670
int ai_getdevpathintvar(si_t *sih, const char *name)
1672
#if defined(BCMBUSTYPE) && (BCMBUSTYPE == SI_BUS)
1673
return getintvar(NULL, name);
1675
char varname[SI_DEVPATH_BUFSZ + 32];
1677
ai_devpathvar(sih, varname, sizeof(varname), name);
1679
return getintvar(NULL, varname);
1683
char *ai_getnvramflvar(si_t *sih, const char *name)
1685
return getvar(NULL, name);
1688
/* Concatenate the dev path with a varname into the given 'var' buffer
1689
* and return the 'var' pointer. Nothing is done to the arguments if
1690
* len == 0 or var is NULL, var is still returned. On overflow, the
1691
* first char will be set to '\0'.
1693
static char *ai_devpathvar(si_t *sih, char *var, int len, const char *name)
1697
if (!var || len <= 0)
1700
if (ai_devpath(sih, var, len) == 0) {
1701
path_len = strlen(var);
1703
if (strlen(name) + 1 > (uint) (len - path_len))
1706
strncpy(var + path_len, name, len - path_len - 1);
1712
/* return true if PCIE capability exists in the pci config space */
1713
static __used bool ai_ispcie(si_info_t *sii)
1717
if (sii->pub.bustype != PCI_BUS)
1721
pcicore_find_pci_capability(sii->pbus, PCI_CAP_ID_EXP, NULL,
1729
bool ai_pci_war16165(si_t *sih)
1735
return PCI(sii) && (sih->buscorerev <= 10);
1738
void ai_pci_up(si_t *sih)
1744
/* if not pci bus, we're done */
1745
if (sih->bustype != PCI_BUS)
1748
if (PCI_FORCEHT(sii))
1749
_ai_clkctl_cc(sii, CLK_FAST);
1752
pcicore_up(sii->pch, SI_PCIUP);
1756
/* Unconfigure and/or apply various WARs when system is going to sleep mode */
1757
void ai_pci_sleep(si_t *sih)
1763
pcicore_sleep(sii->pch);
1766
/* Unconfigure and/or apply various WARs when going down */
1767
void ai_pci_down(si_t *sih)
1773
/* if not pci bus, we're done */
1774
if (sih->bustype != PCI_BUS)
1777
/* release FORCEHT since chip is going to "down" state */
1778
if (PCI_FORCEHT(sii))
1779
_ai_clkctl_cc(sii, CLK_DYNAMIC);
1781
pcicore_down(sii->pch, SI_PCIDOWN);
1785
* Configure the pci core for pci client (NIC) action
1786
* coremask is the bitvec of cores by index to be enabled.
1788
void ai_pci_setup(si_t *sih, uint coremask)
1791
struct sbpciregs *pciregs = NULL;
1797
if (sii->pub.bustype != PCI_BUS)
1801
/* get current core index */
1804
/* we interrupt on this backplane flag number */
1805
siflag = ai_flag(sih);
1807
/* switch over to pci core */
1808
pciregs = ai_setcoreidx(sih, sii->pub.buscoreidx);
1812
* Enable sb->pci interrupts. Assume
1813
* PCI rev 2.3 support was added in pci core rev 6 and things changed..
1815
if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
1816
/* pci config write to set this core bit in PCIIntMask */
1817
pci_read_config_dword(sii->pbus, PCI_INT_MASK, &w);
1818
w |= (coremask << PCI_SBIM_SHIFT);
1819
pci_write_config_dword(sii->pbus, PCI_INT_MASK, w);
1821
/* set sbintvec bit for our flag number */
1822
ai_setint(sih, siflag);
1826
OR_REG(&pciregs->sbtopci2,
1827
(SBTOPCI_PREF | SBTOPCI_BURST));
1828
if (sii->pub.buscorerev >= 11) {
1829
OR_REG(&pciregs->sbtopci2,
1830
SBTOPCI_RC_READMULTI);
1831
w = R_REG(&pciregs->clkrun);
1832
W_REG(&pciregs->clkrun,
1833
(w | PCI_CLKRUN_DSBL));
1834
w = R_REG(&pciregs->clkrun);
1837
/* switch back to previous core */
1838
ai_setcoreidx(sih, idx);
1843
* Fixup SROMless PCI device's configuration.
1844
* The current core may be changed upon return.
1846
int ai_pci_fixcfg(si_t *sih)
1848
uint origidx, pciidx;
1849
struct sbpciregs *pciregs = NULL;
1850
sbpcieregs_t *pcieregs = NULL;
1852
u16 val16, *reg16 = NULL;
1854
si_info_t *sii = SI_INFO(sih);
1856
/* Fixup PI in SROM shadow area to enable the correct PCI core access */
1857
/* save the current index */
1858
origidx = ai_coreidx(&sii->pub);
1860
/* check 'pi' is correct and fix it if not */
1861
if (sii->pub.buscoretype == PCIE_CORE_ID) {
1862
pcieregs = ai_setcore(&sii->pub, PCIE_CORE_ID, 0);
1864
reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
1865
} else if (sii->pub.buscoretype == PCI_CORE_ID) {
1866
pciregs = ai_setcore(&sii->pub, PCI_CORE_ID, 0);
1868
reg16 = &pciregs->sprom[SRSH_PI_OFFSET];
1870
pciidx = ai_coreidx(&sii->pub);
1871
val16 = R_REG(reg16);
1872
if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (u16) pciidx) {
1874
(u16) (pciidx << SRSH_PI_SHIFT) | (val16 &
1876
W_REG(reg16, val16);
1879
/* restore the original index */
1880
ai_setcoreidx(&sii->pub, origidx);
1882
pcicore_hwup(sii->pch);
1886
/* mask&set gpiocontrol bits */
1887
u32 ai_gpiocontrol(si_t *sih, u32 mask, u32 val, u8 priority)
1893
/* gpios could be shared on router platforms
1894
* ignore reservation if it's high priority (e.g., test apps)
1896
if ((priority != GPIO_HI_PRIORITY) &&
1897
(sih->bustype == SI_BUS) && (val || mask)) {
1898
mask = priority ? (ai_gpioreservation & mask) :
1899
((ai_gpioreservation | mask) & ~(ai_gpioreservation));
1903
regoff = offsetof(chipcregs_t, gpiocontrol);
1904
return ai_corereg(sih, SI_CC_IDX, regoff, mask, val);
1907
void ai_chipcontrl_epa4331(si_t *sih, bool on)
1915
origidx = ai_coreidx(sih);
1917
cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
1919
val = R_REG(&cc->chipcontrol);
1922
if (sih->chippkg == 9 || sih->chippkg == 0xb) {
1923
/* Ext PA Controls for 4331 12x9 Package */
1924
W_REG(&cc->chipcontrol, val |
1925
(CCTRL4331_EXTPA_EN |
1926
CCTRL4331_EXTPA_ON_GPIO2_5));
1928
/* Ext PA Controls for 4331 12x12 Package */
1929
W_REG(&cc->chipcontrol,
1930
val | (CCTRL4331_EXTPA_EN));
1933
val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
1934
W_REG(&cc->chipcontrol, val);
1937
ai_setcoreidx(sih, origidx);
1940
/* Enable BT-COEX & Ex-PA for 4313 */
1941
void ai_epa_4313war(si_t *sih)
1948
origidx = ai_coreidx(sih);
1950
cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
1953
W_REG(&cc->gpiocontrol,
1954
R_REG(&cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
1956
ai_setcoreidx(sih, origidx);
1959
/* check if the device is removed */
1960
bool ai_deviceremoved(si_t *sih)
1967
switch (sih->bustype) {
1969
pci_read_config_dword(sii->pbus, PCI_VENDOR_ID, &w);
1970
if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM)
1977
bool ai_is_sprom_available(si_t *sih)
1979
if (sih->ccrev >= 31) {
1985
if ((sih->cccaps & CC_CAP_SROM) == 0)
1989
origidx = sii->curidx;
1990
cc = ai_setcoreidx(sih, SI_CC_IDX);
1991
sromctrl = R_REG(&cc->sromcontrol);
1992
ai_setcoreidx(sih, origidx);
1993
return sromctrl & SRC_PRESENT;
1996
switch (sih->chip) {
1997
case BCM4329_CHIP_ID:
1998
return (sih->chipst & CST4329_SPROM_SEL) != 0;
1999
case BCM4319_CHIP_ID:
2000
return (sih->chipst & CST4319_SPROM_SEL) != 0;
2001
case BCM4336_CHIP_ID:
2002
return (sih->chipst & CST4336_SPROM_PRESENT) != 0;
2003
case BCM4330_CHIP_ID:
2004
return (sih->chipst & CST4330_SPROM_PRESENT) != 0;
2005
case BCM4313_CHIP_ID:
2006
return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
2007
case BCM4331_CHIP_ID:
2008
return (sih->chipst & CST4331_SPROM_PRESENT) != 0;
2014
bool ai_is_otp_disabled(si_t *sih)
2016
switch (sih->chip) {
2017
case BCM4329_CHIP_ID:
2018
return (sih->chipst & CST4329_SPROM_OTP_SEL_MASK) ==
2020
case BCM4319_CHIP_ID:
2021
return (sih->chipst & CST4319_SPROM_OTP_SEL_MASK) ==
2023
case BCM4336_CHIP_ID:
2024
return (sih->chipst & CST4336_OTP_PRESENT) == 0;
2025
case BCM4330_CHIP_ID:
2026
return (sih->chipst & CST4330_OTP_PRESENT) == 0;
2027
case BCM4313_CHIP_ID:
2028
return (sih->chipst & CST4313_OTP_PRESENT) == 0;
2029
/* These chips always have their OTP on */
2030
case BCM43224_CHIP_ID:
2031
case BCM43225_CHIP_ID:
2032
case BCM43421_CHIP_ID:
2033
case BCM43235_CHIP_ID:
2034
case BCM43236_CHIP_ID:
2035
case BCM43238_CHIP_ID:
2036
case BCM4331_CHIP_ID:
2042
bool ai_is_otp_powered(si_t *sih)
2044
if (PMUCTL_ENAB(sih))
2045
return si_pmu_is_otp_powered(sih);
2049
void ai_otp_power(si_t *sih, bool on)
2051
if (PMUCTL_ENAB(sih))
2052
si_pmu_otp_power(sih, on);