2
* TE410P Quad-T1/E1 PCI Driver version 0.1, 12/16/02
4
* Written by Mark Spencer <markster@digium.com>
5
* Based on previous works, designs, and archetectures conceived and
6
* written by Jim Dixon <jim@lambdatel.com>.
8
* Copyright (C) 2001 Jim Dixon / Zapata Telephony.
9
* Copyright (C) 2001-2005, Digium, Inc.
11
* All rights reserved.
13
* This program is free software; you can redistribute it and/or modify
14
* it under the terms of the GNU General Public License as published by
15
* the Free Software Foundation; either version 2 of the License, or
16
* (at your option) any later version.
18
* This program is distributed in the hope that it will be useful,
19
* but WITHOUT ANY WARRANTY; without even the implied warranty of
20
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21
* GNU General Public License for more details.
23
* You should have received a copy of the GNU General Public License
24
* along with this program; if not, write to the Free Software
25
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29
#include <linux/kernel.h>
30
#include <linux/errno.h>
31
#include <linux/module.h>
32
#include <linux/pci.h>
33
#include <linux/init.h>
34
#include <linux/sched.h>
35
#include <linux/interrupt.h>
36
#include <linux/spinlock.h>
38
#include <linux/version.h>
39
#include <linux/delay.h>
42
#include <linux/moduleparam.h>
48
/* Work queues are a way to better distribute load on SMP systems */
49
#if defined(LINUX26) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
51
* Work queues can significantly improve performance and scalability
52
* on multi-processor machines, but requires bypassing some kernel
53
* API's, so it's not guaranteed to be compatible with all kernels.
55
/* #define ENABLE_WORKQUEUES */
58
/* Enable prefetching may help performance */
59
#define ENABLE_PREFETCH
61
/* Support first generation cards? */
64
/* Define to get more attention-grabbing but slightly more I/O using
68
/* Define to support Digium Voice Processing Module expansion card */
71
#define DEBUG_MAIN (1 << 0)
72
#define DEBUG_DTMF (1 << 1)
73
#define DEBUG_REGS (1 << 2)
74
#define DEBUG_TSI (1 << 3)
75
#define DEBUG_ECHOCAN (1 << 4)
76
#define DEBUG_RBS (1 << 5)
77
#define DEBUG_FRAMER (1 << 6)
79
#ifdef ENABLE_WORKQUEUES
80
#include <linux/cpu.h>
82
/* XXX UGLY!!!! XXX We have to access the direct structures of the workqueue which
83
are only defined within workqueue.c because they don't give us a routine to allow us
84
to nail a work to a particular thread of the CPU. Nailing to threads gives us substantially
85
higher scalability in multi-CPU environments though! */
88
* The per-CPU workqueue (if single thread, we always use cpu 0's).
90
* The sequence counters are for flush_scheduled_work(). It wants to wait
91
* until until all currently-scheduled works are completed, but it doesn't
92
* want to be livelocked by new, incoming ones. So it waits until
93
* remove_sequence is >= the insert_sequence which pertained when
94
* flush_scheduled_work() was called.
97
struct cpu_workqueue_struct {
101
long remove_sequence; /* Least-recently added (next to run) */
102
long insert_sequence; /* Next to add */
104
struct list_head worklist;
105
wait_queue_head_t more_work;
106
wait_queue_head_t work_done;
108
struct workqueue_struct *wq;
111
int run_depth; /* Detect run_workqueue() recursion depth */
112
} ____cacheline_aligned;
115
* The externally visible workqueue abstraction is an array of
116
* per-CPU workqueues:
118
struct workqueue_struct {
119
/* TODO: Find out exactly where the API changed */
120
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,15)
121
struct cpu_workqueue_struct *cpu_wq;
123
struct cpu_workqueue_struct cpu_wq[NR_CPUS];
126
struct list_head list; /* Empty if single thread */
129
/* Preempt must be disabled. */
130
static void __t4_queue_work(struct cpu_workqueue_struct *cwq,
131
struct work_struct *work)
135
spin_lock_irqsave(&cwq->lock, flags);
137
list_add_tail(&work->entry, &cwq->worklist);
138
cwq->insert_sequence++;
139
wake_up(&cwq->more_work);
140
spin_unlock_irqrestore(&cwq->lock, flags);
144
* Queue work on a workqueue. Return non-zero if it was successfully
147
* We queue the work to the CPU it was submitted, but there is no
148
* guarantee that it will be processed by that CPU.
150
static inline int t4_queue_work(struct workqueue_struct *wq, struct work_struct *work, int cpu)
154
if (!test_and_set_bit(0, &work->pending)) {
155
BUG_ON(!list_empty(&work->entry));
156
__t4_queue_work(wq->cpu_wq + cpu, work);
165
static int pedanticpci = 1;
167
static int timingcable = 0;
168
static int highestorder;
169
static int t1e1override = -1; //0xFF; // -1 = jumper; 0xFF = E1
170
static int j1mode = 0;
171
static int sigmode = FRMR_MODE_NO_ADDR_CMP;
172
static int loopback = 0;
173
static int alarmdebounce = 0;
175
static int vpmsupport = 1;
176
/* If set to auto, vpmdtmfsupport is enabled for VPM400M and disabled for VPM450M */
177
static int vpmdtmfsupport = -1; /* -1=auto, 0=disabled, 1=enabled*/
178
static int vpmspans = 4;
179
#define VPM_DEFAULT_DTMFTHRESHOLD 1000
180
static int dtmfthreshold = VPM_DEFAULT_DTMFTHRESHOLD;
181
static int lastdtmfthreshold = VPM_DEFAULT_DTMFTHRESHOLD;
183
/* Enabling bursting can more efficiently utilize PCI bus bandwidth, but
184
can also cause PCI bus starvation, especially in combination with other
185
aggressive cards. Please note that burst mode has no effect on CPU
186
utilization / max number of calls / etc. */
187
static int noburst = 1;
188
/* For 56kbps links, set this module parameter to 0x7f */
189
static int hardhdlcmode = 0xff;
192
static int altab[] = {
193
0, 0, 0, 1, 2, 3, 4, 6, 8, 9, 11, 13, 16, 18, 20, 22, 24, 25, 27, 28, 29, 30, 31, 31, 32, 31, 31, 30, 29, 28, 27, 25, 23, 22, 20, 18, 16, 13, 11, 9, 8, 6, 4, 3, 2, 1, 0, 0,
199
#define FLAG_STARTED (1 << 0)
200
#define FLAG_NMF (1 << 1)
201
#define FLAG_SENDINGYELLOW (1 << 2)
204
#define TYPE_T1 1 /* is a T1 card */
205
#define TYPE_E1 2 /* is an E1 card */
206
#define TYPE_J1 3 /* is a running J1 */
208
#define FLAG_2NDGEN (1 << 3)
209
#define FLAG_2PORT (1 << 4)
210
#define FLAG_VPM2GEN (1 << 5)
211
#define FLAG_OCTOPT (1 << 6)
212
#define FLAG_3RDGEN (1 << 7)
213
#define FLAG_BURST (1 << 8)
214
#define FLAG_EXPRESS (1 << 9)
216
#define CANARY 0xc0de
223
static struct devtype wct4xxp = { "Wildcard TE410P/TE405P (1st Gen)", 0 };
224
static struct devtype wct420p4 = { "Wildcard TE420 (4th Gen)", FLAG_BURST | FLAG_2NDGEN | FLAG_3RDGEN | FLAG_EXPRESS };
225
static struct devtype wct410p4 = { "Wildcard TE410P (4th Gen)", FLAG_BURST | FLAG_2NDGEN | FLAG_3RDGEN };
226
static struct devtype wct410p3 = { "Wildcard TE410P (3rd Gen)", FLAG_2NDGEN | FLAG_3RDGEN };
227
static struct devtype wct405p4 = { "Wildcard TE405P (4th Gen)", FLAG_BURST | FLAG_2NDGEN | FLAG_3RDGEN };
228
static struct devtype wct405p3 = { "Wildcard TE405P (3rd Gen)", FLAG_2NDGEN | FLAG_3RDGEN };
229
static struct devtype wct410p2 = { "Wildcard TE410P (2nd Gen)", FLAG_2NDGEN };
230
static struct devtype wct405p2 = { "Wildcard TE405P (2nd Gen)", FLAG_2NDGEN };
231
static struct devtype wct220p4 = { "Wildcard TE220 (4th Gen)", FLAG_BURST | FLAG_2NDGEN | FLAG_3RDGEN | FLAG_2PORT | FLAG_EXPRESS };
232
static struct devtype wct205p4 = { "Wildcard TE205P (4th Gen)", FLAG_BURST | FLAG_2NDGEN | FLAG_3RDGEN | FLAG_2PORT };
233
static struct devtype wct205p3 = { "Wildcard TE205P (3rd Gen)", FLAG_2NDGEN | FLAG_3RDGEN | FLAG_2PORT };
234
static struct devtype wct210p4 = { "Wildcard TE210P (4th Gen)", FLAG_BURST | FLAG_2NDGEN | FLAG_3RDGEN | FLAG_2PORT };
235
static struct devtype wct210p3 = { "Wildcard TE210P (3rd Gen)", FLAG_2NDGEN | FLAG_3RDGEN | FLAG_2PORT };
236
static struct devtype wct205 = { "Wildcard TE205P ", FLAG_2NDGEN | FLAG_2PORT };
237
static struct devtype wct210 = { "Wildcard TE210P ", FLAG_2NDGEN | FLAG_2PORT };
244
unsigned int *writechunk; /* Double-word aligned write memory */
245
unsigned int *readchunk; /* Double-word aligned read memory */
246
int spantype; /* card type, T1 or E1 or J1 */
256
int e1check; /* E1 check */
259
unsigned char txsigs[16]; /* Transmit sigs */
263
unsigned char ec_chunk1[31][ZT_CHUNKSIZE]; /* first EC chunk buffer */
264
unsigned char ec_chunk2[31][ZT_CHUNKSIZE]; /* second EC chunk buffer */
268
/* HDLC controller fields */
269
struct zt_chan *sigchan;
270
unsigned char sigmode;
276
unsigned long dtmfactive;
277
unsigned long dtmfmask;
278
unsigned long dtmfmutemask;
279
short dtmfenergy[31];
282
#ifdef ENABLE_WORKQUEUES
283
struct work_struct swork;
285
struct zt_chan chans[0]; /* Individual channels */
289
/* This structure exists one per card */
290
struct pci_dev *dev; /* Pointer to PCI device */
291
unsigned int intcount;
292
int num; /* Which card we are */
293
int t1e1; /* T1/E1 select pins */
294
int globalconfig; /* Whether global setup has been done */
295
int syncsrc; /* active sync source */
296
struct t4_span *tspans[4]; /* Individual spans */
297
int numspans; /* Number of spans on the card */
302
int irq; /* IRQ used by device */
303
int order; /* Order */
304
int flags; /* Device flags */
305
int master; /* Are we master */
306
int ledreg; /* LED Register */
308
unsigned int gpioctl;
309
int e1recover; /* E1 recovery timer */
310
spinlock_t reglock; /* lock register access */
311
int spansstarted; /* number of spans started */
312
volatile unsigned int *writechunk; /* Double-word aligned write memory */
313
volatile unsigned int *readchunk; /* Double-word aligned read memory */
314
unsigned short canary;
315
#ifdef ENABLE_WORKQUEUES
317
struct workqueue_struct *workq;
319
unsigned int passno; /* number of interrupt passes */
321
int last0; /* for detecting double-missed IRQ */
323
/* DMA related fields */
324
unsigned int dmactrl;
327
unsigned long memaddr; /* Base address of card */
328
unsigned long memlen;
329
volatile unsigned int *membase; /* Base address of card */
331
/* Add this for our softlockup protector */
332
unsigned int oct_rw_count;
334
/* Flags for our bottom half */
335
unsigned long checkflag;
336
struct tasklet_struct t4_tlet;
337
unsigned int vpm400checkstatus;
340
struct vpm450m *vpm450m;
346
#define T4_VPM_PRESENT (1 << 28)
350
static void t4_vpm400_init(struct t4 *wc);
351
static void t4_vpm450_init(struct t4 *wc);
352
static void t4_vpm_set_dtmf_threshold(struct t4 *wc, unsigned int threshold);
354
static void __set_clear(struct t4 *wc, int span);
355
static int t4_startup(struct zt_span *span);
356
static int t4_shutdown(struct zt_span *span);
357
static int t4_rbsbits(struct zt_chan *chan, int bits);
358
static int t4_maint(struct zt_span *span, int cmd);
360
static int t4_reset_dma(struct t4 *wc);
362
static void t4_hdlc_hard_xmit(struct zt_chan *chan);
363
static int t4_ioctl(struct zt_chan *chan, unsigned int cmd, unsigned long data);
364
static void t4_tsi_assign(struct t4 *wc, int fromspan, int fromchan, int tospan, int tochan);
365
static void t4_tsi_unassign(struct t4 *wc, int tospan, int tochan);
366
static void __t4_set_timing_source(struct t4 *wc, int unit, int master, int slave);
367
static void t4_check_alarms(struct t4 *wc, int span);
368
static void t4_check_sigbits(struct t4 *wc, int span);
375
/* #define WC_GPIO 5 */
382
#define WC_LCS (1 << 11)
383
#define WC_LCS2 (1 << 12)
384
#define WC_LALE (1 << 13)
385
#define WC_LFRMR_CS (1 << 10) /* Framer's ChipSelect signal */
386
#define WC_ACTIVATE (1 << 12)
387
#define WC_LREAD (1 << 15)
388
#define WC_LWRITE (1 << 16)
393
#define WC_YELLOW (3)
395
#define MAX_T4_CARDS 64
397
static void t4_isr_bh(unsigned long data);
399
static struct t4 *cards[MAX_T4_CARDS];
402
#define MAX_TDM_CHAN 32
403
#define MAX_DTMF_DET 16
405
#define HDLC_IMR0_MASK (FRMR_IMR0_RME | FRMR_IMR0_RPF)
407
#define HDLC_IMR1_MASK (FRMR_IMR1_ALLS | FRMR_IMR1_XDU | FRMR_IMR1_XPR)
409
#define HDLC_IMR1_MASK (FRMR_IMR1_XDU | FRMR_IMR1_XPR)
412
static inline unsigned int __t4_pci_in(struct t4 *wc, const unsigned int addr)
414
unsigned int res = readl(&wc->membase[addr]);
416
/* Even though we do not support fast back-to-back
417
* transactions, some host bridges appear to generate them.
418
* This delay prevents this.
420
if (!test_bit(T4_LOADING_FW, &wc->checkflag))
426
static inline void __t4_pci_out(struct t4 *wc, const unsigned int addr, const unsigned int value)
429
writel(value, &wc->membase[addr]);
431
/* Even though we do not support fast back-to-back
432
* transactions, some host bridges appear to generate them.
433
* This delay prevents this.
435
if (!test_bit(T4_LOADING_FW, &wc->checkflag))
437
tmp = __t4_pci_in(wc, WC_VERSION);
438
if ((tmp & 0xffff0000) != 0xc01a0000)
439
printk("TE4XXP: Version Synchronization Error!\n");
442
tmp = __t4_pci_in(wc, addr);
443
if ((value != tmp) && (addr != WC_LEDS) && (addr != WC_LDATA) &&
444
(addr != WC_GPIO) && (addr != WC_INTR))
445
printk("Tried to load %08x into %08x, but got %08x instead\n", value, addr, tmp);
449
static inline void __t4_gpio_set(struct t4 *wc, unsigned bits, unsigned int val)
451
unsigned int newgpio;
452
newgpio = wc->gpio & (~bits);
454
if (newgpio != wc->gpio) {
456
__t4_pci_out(wc, WC_GPIO, wc->gpio);
460
static inline void __t4_gpio_setdir(struct t4 *wc, unsigned int bits, unsigned int val)
462
unsigned int newgpioctl;
463
newgpioctl = wc->gpioctl & (~bits);
465
if (newgpioctl != wc->gpioctl) {
466
wc->gpioctl = newgpioctl;
467
__t4_pci_out(wc, WC_GPIOCTL, wc->gpioctl);
471
static inline void t4_gpio_setdir(struct t4 *wc, unsigned int bits, unsigned int val)
474
spin_lock_irqsave(&wc->reglock, flags);
475
__t4_gpio_setdir(wc, bits, val);
476
spin_unlock_irqrestore(&wc->reglock, flags);
479
static inline void t4_gpio_set(struct t4 *wc, unsigned int bits, unsigned int val)
482
spin_lock_irqsave(&wc->reglock, flags);
483
__t4_gpio_set(wc, bits, val);
484
spin_unlock_irqrestore(&wc->reglock, flags);
487
static inline void t4_pci_out(struct t4 *wc, const unsigned int addr, const unsigned int value)
490
spin_lock_irqsave(&wc->reglock, flags);
491
__t4_pci_out(wc, addr, value);
492
spin_unlock_irqrestore(&wc->reglock, flags);
495
static inline void __t4_set_led(struct t4 *wc, int span, int color)
497
int oldreg = wc->ledreg;
498
wc->ledreg &= ~(0x3 << (span << 1));
499
wc->ledreg |= (color << (span << 1));
500
if (oldreg != wc->ledreg)
501
__t4_pci_out(wc, WC_LEDS, wc->ledreg);
504
static inline void t4_activate(struct t4 *wc)
506
wc->ledreg |= WC_ACTIVATE;
507
t4_pci_out(wc, WC_LEDS, wc->ledreg);
510
static inline unsigned int t4_pci_in(struct t4 *wc, const unsigned int addr)
515
spin_lock_irqsave(&wc->reglock, flags);
516
ret = __t4_pci_in(wc, addr);
517
spin_unlock_irqrestore(&wc->reglock, flags);
521
static inline unsigned int __t4_framer_in(struct t4 *wc, int unit, const unsigned int addr)
525
__t4_pci_out(wc, WC_LADDR, (unit << 8) | (addr & 0xff));
526
__t4_pci_out(wc, WC_LADDR, (unit << 8) | (addr & 0xff) | WC_LFRMR_CS | WC_LREAD);
528
__t4_pci_out(wc, WC_VERSION, 0);
530
ret = __t4_pci_in(wc, WC_LDATA);
531
__t4_pci_out(wc, WC_LADDR, (unit << 8) | (addr & 0xff));
535
static inline unsigned int t4_framer_in(struct t4 *wc, int unit, const unsigned int addr)
539
spin_lock_irqsave(&wc->reglock, flags);
540
ret = __t4_framer_in(wc, unit, addr);
541
spin_unlock_irqrestore(&wc->reglock, flags);
546
static inline void __t4_framer_out(struct t4 *wc, int unit, const unsigned int addr, const unsigned int value)
549
if (unlikely(debug & DEBUG_REGS))
550
printk("Writing %02x to address %02x of unit %d\n", value, addr, unit);
551
__t4_pci_out(wc, WC_LADDR, (unit << 8) | (addr & 0xff));
552
__t4_pci_out(wc, WC_LDATA, value);
553
__t4_pci_out(wc, WC_LADDR, (unit << 8) | (addr & 0xff) | WC_LFRMR_CS | WC_LWRITE);
554
__t4_pci_out(wc, WC_LADDR, (unit << 8) | (addr & 0xff));
555
if (unlikely(debug & DEBUG_REGS)) printk("Write complete\n");
557
if ((addr != FRMR_TXFIFO) && (addr != FRMR_CMDR) && (addr != 0xbc))
559
tmp = __t4_framer_in(wc, unit, addr);
561
printk("Expected %d from unit %d register %d but got %d instead\n", value, unit, addr, tmp);
566
static inline void t4_framer_out(struct t4 *wc, int unit, const unsigned int addr, const unsigned int value)
569
spin_lock_irqsave(&wc->reglock, flags);
570
__t4_framer_out(wc, unit, addr, value);
571
spin_unlock_irqrestore(&wc->reglock, flags);
576
static inline void wait_a_little(void)
578
unsigned long newjiffies=jiffies+2;
579
while(jiffies < newjiffies);
582
static inline unsigned int __t4_vpm_in(struct t4 *wc, int unit, const unsigned int addr)
586
__t4_pci_out(wc, WC_LADDR, (addr & 0x1ff) | ( unit << 12));
587
__t4_pci_out(wc, WC_LADDR, (addr & 0x1ff) | ( unit << 12) | (1 << 11) | WC_LREAD);
588
ret = __t4_pci_in(wc, WC_LDATA);
589
__t4_pci_out(wc, WC_LADDR, 0);
593
static inline void __t4_raw_oct_out(struct t4 *wc, const unsigned int addr, const unsigned int value)
595
int octopt = wc->tspans[0]->spanflags & FLAG_OCTOPT;
597
__t4_gpio_set(wc, 0xff, (addr >> 8));
598
__t4_pci_out(wc, WC_LDATA, 0x10000 | (addr & 0xffff));
600
__t4_pci_out(wc, WC_LADDR, (WC_LWRITE));
601
__t4_pci_out(wc, WC_LADDR, (WC_LWRITE | WC_LALE));
603
__t4_gpio_set(wc, 0xff, (value >> 8));
604
__t4_pci_out(wc, WC_LDATA, (value & 0xffff));
605
__t4_pci_out(wc, WC_LADDR, (WC_LWRITE | WC_LALE | WC_LCS));
606
__t4_pci_out(wc, WC_LADDR, (0));
609
static inline unsigned int __t4_raw_oct_in(struct t4 *wc, const unsigned int addr)
612
int octopt = wc->tspans[0]->spanflags & FLAG_OCTOPT;
614
__t4_gpio_set(wc, 0xff, (addr >> 8));
615
__t4_pci_out(wc, WC_LDATA, 0x10000 | (addr & 0xffff));
617
__t4_pci_out(wc, WC_LADDR, (WC_LWRITE));
618
__t4_pci_out(wc, WC_LADDR, (WC_LWRITE | WC_LALE));
619
#ifdef PEDANTIC_OCTASIC_CHECKING
620
__t4_pci_out(wc, WC_LADDR, (WC_LALE));
623
__t4_gpio_setdir(wc, 0xff, 0x00);
624
__t4_gpio_set(wc, 0xff, 0x00);
626
__t4_pci_out(wc, WC_LADDR, (WC_LREAD | WC_LALE | WC_LCS));
628
ret = __t4_pci_in(wc, WC_LDATA) & 0xffff;
630
ret = __t4_pci_in(wc, WC_LDATA) & 0xff;
631
ret |= (__t4_pci_in(wc, WC_GPIO) & 0xff) << 8;
633
__t4_pci_out(wc, WC_LADDR, (0));
635
__t4_gpio_setdir(wc, 0xff, 0xff);
639
static inline unsigned int __t4_oct_in(struct t4 *wc, unsigned int addr)
641
#ifdef PEDANTIC_OCTASIC_CHECKING
644
__t4_raw_oct_out(wc, 0x0008, (addr >> 20));
645
__t4_raw_oct_out(wc, 0x000a, (addr >> 4) & ((1 << 16) - 1));
646
__t4_raw_oct_out(wc, 0x0000, (((addr >> 1) & 0x7) << 9) | (1 << 8) | (1));
647
#ifdef PEDANTIC_OCTASIC_CHECKING
648
while((__t4_raw_oct_in(wc, 0x0000) & (1 << 8)) && --count);
650
printk("Yah, read can be slow...\n");
652
printk("Read timed out!\n");
654
return __t4_raw_oct_in(wc, 0x0004);
657
static inline unsigned int t4_oct_in(struct t4 *wc, const unsigned int addr)
662
spin_lock_irqsave(&wc->reglock, flags);
663
ret = __t4_oct_in(wc, addr);
664
spin_unlock_irqrestore(&wc->reglock, flags);
668
static inline unsigned int t4_vpm_in(struct t4 *wc, int unit, const unsigned int addr)
672
spin_lock_irqsave(&wc->reglock, flags);
673
ret = __t4_vpm_in(wc, unit, addr);
674
spin_unlock_irqrestore(&wc->reglock, flags);
678
static inline void __t4_vpm_out(struct t4 *wc, int unit, const unsigned int addr, const unsigned int value)
681
if (debug & DEBUG_REGS)
682
printk("Writing %02x to address %02x of ec unit %d\n", value, addr, unit);
683
__t4_pci_out(wc, WC_LADDR, (addr & 0xff));
684
__t4_pci_out(wc, WC_LDATA, value);
685
__t4_pci_out(wc, WC_LADDR, (unit << 12) | (addr & 0x1ff) | (1 << 11));
686
__t4_pci_out(wc, WC_LADDR, (unit << 12) | (addr & 0x1ff) | (1 << 11) | WC_LWRITE);
687
__t4_pci_out(wc, WC_LADDR, (unit << 12) | (addr & 0x1ff) | (1 << 11));
688
__t4_pci_out(wc, WC_LADDR, (unit << 12) | (addr & 0x1ff));
689
__t4_pci_out(wc, WC_LADDR, 0);
690
if (debug & DEBUG_REGS) printk("Write complete\n");
695
tmp = t4_vpm_in(wc, unit, addr);
697
printk("Expected %d from unit %d echo register %d but got %d instead\n", value, unit, addr, tmp);
702
static inline void __t4_oct_out(struct t4 *wc, unsigned int addr, unsigned int value)
704
#ifdef PEDANTIC_OCTASIC_CHECKING
707
__t4_raw_oct_out(wc, 0x0008, (addr >> 20));
708
__t4_raw_oct_out(wc, 0x000a, (addr >> 4) & ((1 << 16) - 1));
709
__t4_raw_oct_out(wc, 0x0004, value);
710
__t4_raw_oct_out(wc, 0x0000, (((addr >> 1) & 0x7) << 9) | (1 << 8) | (3 << 12) | 1);
711
#ifdef PEDANTIC_OCTASIC_CHECKING
712
while((__t4_raw_oct_in(wc, 0x0000) & (1 << 8)) && --count);
714
printk("Yah, write can be slow\n");
716
printk("Write timed out!\n");
720
static inline void t4_oct_out(struct t4 *wc, const unsigned int addr, const unsigned int value)
724
spin_lock_irqsave(&wc->reglock, flags);
725
__t4_oct_out(wc, addr, value);
726
spin_unlock_irqrestore(&wc->reglock, flags);
729
static inline void t4_vpm_out(struct t4 *wc, int unit, const unsigned int addr, const unsigned int value)
732
spin_lock_irqsave(&wc->reglock, flags);
733
__t4_vpm_out(wc, unit, addr, value);
734
spin_unlock_irqrestore(&wc->reglock, flags);
737
static const char vpm_digits[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', '*', '#'};
739
static void t4_check_vpm450(struct t4 *wc)
741
int channel, tone, start, span;
743
if (vpm450m_checkirq(wc->vpm450m)) {
744
while(vpm450m_getdtmf(wc->vpm450m, &channel, &tone, &start)) {
745
span = channel & 0x3;
752
printk("Got tone %s of '%c' on channel %d of span %d\n",
753
(start ? "START" : "STOP"), tone, channel, span + 1);
754
if (test_bit(channel, &wc->tspans[span]->dtmfmask) && (tone != 'u')) {
756
/* The octasic is supposed to mute us, but... Yah, you
758
if (test_bit(channel, &wc->tspans[span]->dtmfmutemask)) {
760
struct zt_chan *chan = &wc->tspans[span]->span.chans[channel];
762
spin_lock_irqsave(&chan->lock, flags);
763
for (y=0;y<chan->numbufs;y++) {
764
if ((chan->inreadbuf > -1) && (chan->readidx[y]))
765
memset(chan->readbuf[chan->inreadbuf], ZT_XLAW(0, chan), chan->readidx[y]);
767
spin_unlock_irqrestore(&chan->lock, flags);
769
set_bit(channel, &wc->tspans[span]->dtmfactive);
770
zt_qevent_lock(&wc->tspans[span]->span.chans[channel], (ZT_EVENT_DTMFDOWN | tone));
772
clear_bit(channel, &wc->tspans[span]->dtmfactive);
773
zt_qevent_lock(&wc->tspans[span]->span.chans[channel], (ZT_EVENT_DTMFUP | tone));
780
static void t4_check_vpm400(struct t4 *wc, unsigned int newio)
782
unsigned int digit, regval = 0;
783
unsigned int regbyte;
786
static unsigned int lastio = 0;
789
if (debug && (newio != lastio))
790
printk("Last was %08x, new is %08x\n", lastio, newio);
794
for(x = 0; x < 8; x++) {
795
if (newio & (1 << (7 - x)))
797
ts = wc->tspans[x%4];
798
/* Start of DTMF detection process */
799
regbyte = t4_vpm_in(wc, x, 0xb8);
800
t4_vpm_out(wc, x, 0xb8, regbyte); /* Write 1 to clear */
801
regval = regbyte << 8;
802
regbyte = t4_vpm_in(wc, x, 0xb9);
803
t4_vpm_out(wc, x, 0xb9, regbyte);
806
for(i = 0; (i < MAX_DTMF_DET) && regval; i++) {
807
if(regval & 0x0001) {
808
int channel = (i << 1) + (x >> 2);
809
int base = channel - 1;
813
regbyte = t4_vpm_in(wc, x, 0xa8 + i);
814
digit = vpm_digits[regbyte];
815
if (!(wc->tspans[0]->spanflags & FLAG_VPM2GEN)) {
816
energy = t4_vpm_in(wc, x, 0x58 + channel);
817
energy = ZT_XLAW(energy, ts->chans);
818
ts->dtmfenergy[base] = energy;
820
set_bit(base, &ts->dtmfactive);
821
if (ts->dtmfdigit[base]) {
822
if (ts->dtmfmask & (1 << base))
823
zt_qevent_lock(&ts->span.chans[base], (ZT_EVENT_DTMFUP | ts->dtmfdigit[base]));
825
ts->dtmfdigit[base] = digit;
826
if (test_bit(base, &ts->dtmfmask))
827
zt_qevent_lock(&ts->span.chans[base], (ZT_EVENT_DTMFDOWN | digit));
828
if (test_bit(base, &ts->dtmfmutemask)) {
829
/* Mute active receive buffer*/
831
struct zt_chan *chan = &ts->span.chans[base];
833
spin_lock_irqsave(&chan->lock, flags);
834
for (y=0;y<chan->numbufs;y++) {
835
if ((chan->inreadbuf > -1) && (chan->readidx[y]))
836
memset(chan->readbuf[chan->inreadbuf], ZT_XLAW(0, chan), chan->readidx[y]);
838
spin_unlock_irqrestore(&chan->lock, flags);
841
printk("Digit Seen: %d, Span: %d, channel: %d, energy: %02x, 'channel %d' chip %d\n", digit, x % 4, base + 1, energy, channel, x);
844
regval = regval >> 1;
846
if (!(wc->tspans[0]->spanflags & FLAG_VPM2GEN))
849
/* Start of DTMF off detection process */
850
regbyte = t4_vpm_in(wc, x, 0xbc);
851
t4_vpm_out(wc, x, 0xbc, regbyte); /* Write 1 to clear */
852
regval = regbyte << 8;
853
regbyte = t4_vpm_in(wc, x, 0xbd);
854
t4_vpm_out(wc, x, 0xbd, regbyte);
857
for(i = 0; (i < MAX_DTMF_DET) && regval; i++) {
858
if(regval & 0x0001) {
859
int channel = (i << 1) + (x >> 2);
860
int base = channel - 1;
864
clear_bit(base, &ts->dtmfactive);
865
if (ts->dtmfdigit[base]) {
866
if (test_bit(base, &ts->dtmfmask))
867
zt_qevent_lock(&ts->span.chans[base], (ZT_EVENT_DTMFUP | ts->dtmfdigit[base]));
869
digit = ts->dtmfdigit[base];
870
ts->dtmfdigit[base] = 0;
872
printk("Digit Gone: %d, Span: %d, channel: %d, energy: %02x, 'channel %d' chip %d\n", digit, x % 4, base + 1, energy, channel, x);
875
regval = regval >> 1;
882
static void hdlc_stop(struct t4 *wc, unsigned int span)
884
struct t4_span *t = wc->tspans[span];
885
unsigned char imr0, imr1, mode;
888
if (debug & DEBUG_FRAMER) printk("Stopping HDLC controller on span %d\n", span+1);
890
/* Clear receive and transmit timeslots */
891
for (i = 0; i < 4; i++) {
892
t4_framer_out(wc, span, FRMR_RTR_BASE + i, 0x00);
893
t4_framer_out(wc, span, FRMR_TTR_BASE + i, 0x00);
896
imr0 = t4_framer_in(wc, span, FRMR_IMR0);
897
imr1 = t4_framer_in(wc, span, FRMR_IMR1);
899
/* Disable HDLC interrupts */
900
imr0 |= HDLC_IMR0_MASK;
901
t4_framer_out(wc, span, FRMR_IMR0, imr0);
903
imr1 |= HDLC_IMR1_MASK;
904
t4_framer_out(wc, span, FRMR_IMR1, imr1);
906
mode = t4_framer_in(wc, span, FRMR_MODE);
907
mode &= ~FRMR_MODE_HRAC;
908
t4_framer_out(wc, span, FRMR_MODE, mode);
913
static inline void __t4_framer_cmd(struct t4 *wc, unsigned int span, int cmd)
915
__t4_framer_out(wc, span, FRMR_CMDR, cmd);
918
static inline void t4_framer_cmd_wait(struct t4 *wc, unsigned int span, int cmd)
923
/* XXX could be time consuming XXX */
925
sis = t4_framer_in(wc, span, FRMR_SIS);
928
if (!loops++ && (debug & DEBUG_FRAMER)) {
929
printk("!!!SIS Waiting before cmd %02x\n", cmd);
932
if (loops && (debug & DEBUG_FRAMER))
933
printk("!!!SIS waited %d loops\n", loops);
935
t4_framer_out(wc, span, FRMR_CMDR, cmd);
938
static int hdlc_start(struct t4 *wc, unsigned int span, struct zt_chan *chan, unsigned char mode)
940
struct t4_span *t = wc->tspans[span];
941
unsigned char imr0, imr1;
942
int offset = chan->chanpos;
945
if (debug & DEBUG_FRAMER) printk("Starting HDLC controller for channel %d span %d\n", offset, span+1);
947
if (mode != FRMR_MODE_NO_ADDR_CMP)
950
mode |= FRMR_MODE_HRAC;
952
/* Make sure we're in the right mode */
953
t4_framer_out(wc, span, FRMR_MODE, mode);
954
t4_framer_out(wc, span, FRMR_TSEO, 0x00);
955
t4_framer_out(wc, span, FRMR_TSBS1, hardhdlcmode);
957
/* Set the interframe gaps, etc */
958
t4_framer_out(wc, span, FRMR_CCR1, FRMR_CCR1_ITF|FRMR_CCR1_EITS);
960
t4_framer_out(wc, span, FRMR_CCR2, FRMR_CCR2_RCRC);
962
/* Set up the time slot that we want to tx/rx on */
963
t4_framer_out(wc, span, FRMR_TTR_BASE + (offset / 8), (0x80 >> (offset % 8)));
964
t4_framer_out(wc, span, FRMR_RTR_BASE + (offset / 8), (0x80 >> (offset % 8)));
966
imr0 = t4_framer_in(wc, span, FRMR_IMR0);
967
imr1 = t4_framer_in(wc, span, FRMR_IMR1);
969
/* Enable our interrupts again */
970
imr0 &= ~HDLC_IMR0_MASK;
971
t4_framer_out(wc, span, FRMR_IMR0, imr0);
973
imr1 &= ~HDLC_IMR1_MASK;
974
t4_framer_out(wc, span, FRMR_IMR1, imr1);
976
/* Reset the signaling controller */
977
t4_framer_cmd_wait(wc, span, FRMR_CMDR_SRES);
979
spin_lock_irqsave(&wc->reglock, flags);
981
spin_unlock_irqrestore(&wc->reglock, flags);
988
static void __set_clear(struct t4 *wc, int span)
992
unsigned short val=0;
993
struct t4_span *ts = wc->tspans[span];
995
oldnotclear = ts->notclear;
996
if ((ts->spantype == TYPE_T1) || (ts->spantype == TYPE_J1)) {
999
if (ts->span.chans[i].flags & ZT_FLAG_CLEAR) {
1000
val |= 1 << (7 - (i % 8));
1001
ts->notclear &= ~(1 << i);
1003
ts->notclear |= (1 << i);
1006
printk("Putting %d in register %02x on span %d\n",
1007
val, 0x2f + j, span + 1);
1008
__t4_framer_out(wc, span, 0x2f + j, val);
1013
for (i=0;i<31;i++) {
1014
if (ts->span.chans[i].flags & ZT_FLAG_CLEAR)
1015
ts->notclear &= ~(1 << i);
1017
ts->notclear |= (1 << i);
1020
if (ts->notclear != oldnotclear) {
1022
reg = __t4_framer_in(wc, span, FRMR_IMR0);
1027
__t4_framer_out(wc, span, FRMR_IMR0, reg);
1032
static void set_clear(struct t4 *wc, int span)
1034
unsigned long flags;
1035
spin_lock_irqsave(&wc->reglock, flags);
1036
__set_clear(wc, span);
1037
spin_unlock_irqrestore(&wc->reglock, flags);
1041
static int t4_dacs(struct zt_chan *dst, struct zt_chan *src)
1046
ts = wc->tspans[dst->span->offset];
1047
if (src && (src->pvt != dst->pvt)) {
1048
if (ts->spanflags & FLAG_2NDGEN)
1049
t4_tsi_unassign(wc, dst->span->offset, dst->chanpos);
1051
if (ts->spanflags & FLAG_2NDGEN)
1052
t4_tsi_unassign(wc, src->span->offset, src->chanpos);
1054
printk("Unassigning %d/%d by default and...\n", src->span->offset, src->chanpos);
1056
printk("Unassigning %d/%d by default\n", dst->span->offset, dst->chanpos);
1060
t4_tsi_assign(wc, src->span->offset, src->chanpos, dst->span->offset, dst->chanpos);
1062
printk("Assigning channel %d/%d -> %d/%d!\n", src->span->offset, src->chanpos, dst->span->offset, dst->chanpos);
1064
t4_tsi_unassign(wc, dst->span->offset, dst->chanpos);
1066
printk("Unassigning channel %d/%d!\n", dst->span->offset, dst->chanpos);
1073
void oct_set_reg(void *data, unsigned int reg, unsigned int val)
1075
struct t4 *wc = data;
1076
t4_oct_out(wc, reg, val);
1079
unsigned int oct_get_reg(void *data, unsigned int reg)
1081
struct t4 *wc = data;
1083
ret = t4_oct_in(wc, reg);
1087
static int t4_vpm_unit(int span, int channel)
1093
unit += (channel & 1) << 2;
1097
unit += (channel & 0x3) << 1;
1101
unit += (channel & 0x7);
1106
static int t4_echocan(struct zt_chan *chan, int eclen)
1108
struct t4 *wc = chan->pvt;
1115
if (chan->span->offset >= vpmspans)
1119
channel = chan->chanpos;
1121
channel = chan->chanpos + 4;
1123
channel = channel << 2;
1124
channel |= chan->span->offset;
1125
if(debug & DEBUG_ECHOCAN)
1126
printk("echocan: Card is %d, Channel is %d, Span is %d, offset is %d length %d\n",
1127
wc->num, chan->chanpos, chan->span->offset, channel, eclen);
1128
vpm450m_setec(wc->vpm450m, channel, eclen);
1130
// msleep(100); // longer test
1132
unit = t4_vpm_unit(chan->span->offset, channel);
1133
if(debug & DEBUG_ECHOCAN)
1134
printk("echocan: Card is %d, Channel is %d, Span is %d, unit is %d, unit offset is %d length %d\n",
1135
wc->num, chan->chanpos, chan->span->offset, unit, channel, eclen);
1137
t4_vpm_out(wc,unit,channel,0x3e);
1139
t4_vpm_out(wc,unit,channel,0x01);
1145
static int t4_ioctl(struct zt_chan *chan, unsigned int cmd, unsigned long data)
1147
struct t4_regs regs;
1149
struct t4 *wc = chan->pvt;
1153
struct t4_span *ts = wc->tspans[chan->span->offset];
1157
if (dtmfthreshold == 0)
1158
dtmfthreshold = VPM_DEFAULT_DTMFTHRESHOLD;
1159
if (lastdtmfthreshold != dtmfthreshold) {
1160
lastdtmfthreshold = dtmfthreshold;
1161
t4_vpm_set_dtmf_threshold(wc, dtmfthreshold);
1167
for (x=0;x<NUM_PCI;x++)
1168
regs.pci[x] = t4_pci_in(wc, x);
1169
for (x=0;x<NUM_REGS;x++)
1170
regs.regs[x] = t4_framer_in(wc, chan->span->offset, x);
1171
if (copy_to_user((struct t4_regs *)data, ®s, sizeof(regs)))
1176
if (get_user(j, (int *)data))
1180
if (j && (vpmdtmfsupport == 0))
1182
if (j & ZT_TONEDETECT_ON)
1183
set_bit(chan->chanpos - 1, &ts->dtmfmask);
1185
clear_bit(chan->chanpos - 1, &ts->dtmfmask);
1186
if (j & ZT_TONEDETECT_MUTE)
1187
set_bit(chan->chanpos - 1, &ts->dtmfmutemask);
1189
clear_bit(chan->chanpos - 1, &ts->dtmfmutemask);
1191
channel = (chan->chanpos) << 2;
1193
channel += (4 << 2);
1194
channel |= chan->span->offset;
1195
vpm450m_setdtmf(wc->vpm450m, channel, j & ZT_TONEDETECT_ON, j & ZT_TONEDETECT_MUTE);
1205
static void inline t4_hdlc_xmit_fifo(struct t4 *wc, unsigned int span, struct t4_span *ts)
1207
int res, i, size = 32;
1208
unsigned char buf[32];
1210
res = zt_hdlc_getbuf(ts->sigchan, buf, &size);
1211
if (debug & DEBUG_FRAMER) printk("Got buffer sized %d and res %d for %d\n", size, res, span);
1215
if (debug & DEBUG_FRAMER) {
1217
for (i = 0; i < size; i++)
1218
printk((i ? " %02x" : "%02x"), buf[i]);
1222
for (i = 0; i < size; i++)
1223
t4_framer_out(wc, span, FRMR_TXFIFO, buf[i]);
1225
if (res) /* End of message */ {
1226
if (debug & DEBUG_FRAMER) printk("transmiting XHF|XME\n");
1227
t4_framer_cmd_wait(wc, span, FRMR_CMDR_XHF | FRMR_CMDR_XME);
1229
ts->sigactive = (__t4_framer_in(wc, span, FRMR_SIS) & FRMR_SIS_XFW) ? 0 : 1;
1232
if ((debug & DEBUG_FRAMER) && !(ts->frames_out & 0x0f))
1233
printk("Transmitted %d frames on span %d\n", ts->frames_out, span);
1234
} else { /* Still more to transmit */
1235
if (debug & DEBUG_FRAMER) printk("transmiting XHF\n");
1236
t4_framer_cmd_wait(wc, span, FRMR_CMDR_XHF);
1243
static void t4_hdlc_hard_xmit(struct zt_chan *chan)
1245
struct t4 *wc = chan->pvt;
1246
int span = chan->span->offset;
1247
struct t4_span *ts = wc->tspans[span];
1248
unsigned long flags;
1250
spin_lock_irqsave(&wc->reglock, flags);
1252
printk("t4_hdlc_hard_xmit: Invalid (NULL) signalling channel\n");
1253
spin_unlock_irqrestore(&wc->reglock, flags);
1256
spin_unlock_irqrestore(&wc->reglock, flags);
1258
if (debug & DEBUG_FRAMER) printk("t4_hdlc_hard_xmit on channel %s (sigchan %s), sigactive=%d\n", chan->name, ts->sigchan->name, ts->sigactive);
1260
if ((ts->sigchan == chan) && !ts->sigactive)
1261
t4_hdlc_xmit_fifo(wc, span, ts);
1264
static int t4_maint(struct zt_span *span, int cmd)
1266
struct t4_span *ts = span->pvt;
1267
struct t4 *wc = ts->owner;
1269
if (ts->spantype == TYPE_E1) {
1272
printk("XXX Turn off local and remote loops E1 XXX\n");
1274
case ZT_MAINT_LOCALLOOP:
1275
printk("XXX Turn on local loopback E1 XXX\n");
1277
case ZT_MAINT_REMOTELOOP:
1278
printk("XXX Turn on remote loopback E1 XXX\n");
1280
case ZT_MAINT_LOOPUP:
1281
printk("XXX Send loopup code E1 XXX\n");
1283
case ZT_MAINT_LOOPDOWN:
1284
printk("XXX Send loopdown code E1 XXX\n");
1286
case ZT_MAINT_LOOPSTOP:
1287
printk("XXX Stop sending loop codes E1 XXX\n");
1290
printk("TE%dXXP: Unknown E1 maint command: %d\n", wc->numspans, cmd);
1296
printk("XXX Turn off local and remote loops T1 XXX\n");
1298
case ZT_MAINT_LOCALLOOP:
1299
printk("XXX Turn on local loop and no remote loop XXX\n");
1301
case ZT_MAINT_REMOTELOOP:
1302
printk("XXX Turn on remote loopup XXX\n");
1304
case ZT_MAINT_LOOPUP:
1305
t4_framer_out(wc, span->offset, 0x21, 0x50); /* FMR5: Nothing but RBS mode */
1307
case ZT_MAINT_LOOPDOWN:
1308
t4_framer_out(wc, span->offset, 0x21, 0x60); /* FMR5: Nothing but RBS mode */
1310
case ZT_MAINT_LOOPSTOP:
1311
t4_framer_out(wc, span->offset, 0x21, 0x40); /* FMR5: Nothing but RBS mode */
1314
printk("TE%dXXP: Unknown T1 maint command: %d\n", wc->numspans, cmd);
1321
static int t4_rbsbits(struct zt_chan *chan, int bits)
1325
struct t4 *wc = chan->pvt;
1326
struct t4_span *ts = wc->tspans[chan->span->offset];
1327
unsigned long flags;
1329
if(debug & DEBUG_RBS) printk("Setting bits to %d on channel %s\n", bits, chan->name);
1330
spin_lock_irqsave(&wc->reglock, flags);
1331
k = chan->span->offset;
1332
if (ts->spantype == TYPE_E1) { /* do it E1 way */
1333
if (chan->chanpos == 16) {
1334
spin_unlock_irqrestore(&wc->reglock, flags);
1337
n = chan->chanpos - 1;
1338
if (chan->chanpos > 15) n--;
1341
m = (n / 15) << 2; /* nibble selector */
1342
c &= (0xf << m); /* keep the other nibble */
1343
c |= (bits & 0xf) << (4 - m); /* put our new nibble here */
1345
/* output them to the chip */
1346
__t4_framer_out(wc,k,0x71 + b,c);
1347
} else if (ts->span.lineconfig & ZT_CONFIG_D4) {
1348
n = chan->chanpos - 1;
1351
m = ((3 - (n % 4)) << 1); /* nibble selector */
1352
c &= ~(0x3 << m); /* keep the other nibble */
1353
c |= ((bits >> 2) & 0x3) << m; /* put our new nibble here */
1355
/* output them to the chip */
1356
__t4_framer_out(wc,k,0x70 + b,c);
1357
__t4_framer_out(wc,k,0x70 + b + 6,c);
1358
} else if (ts->span.lineconfig & ZT_CONFIG_ESF) {
1359
n = chan->chanpos - 1;
1362
m = ((n % 2) << 2); /* nibble selector */
1363
c &= (0xf << m); /* keep the other nibble */
1364
c |= (bits & 0xf) << (4 - m); /* put our new nibble here */
1366
/* output them to the chip */
1367
__t4_framer_out(wc,k,0x70 + b,c);
1369
spin_unlock_irqrestore(&wc->reglock, flags);
1370
if (debug & DEBUG_RBS)
1371
printk("Finished setting RBS bits\n");
1375
static int t4_shutdown(struct zt_span *span)
1379
unsigned long flags;
1380
struct t4_span *ts = span->pvt;
1381
struct t4 *wc = ts->owner;
1383
tspan = span->offset + 1;
1385
printk("T%dXXP: Span '%d' isn't us?\n", wc->numspans, span->spanno);
1389
if (debug & DEBUG_MAIN) printk("Shutting down span %d (%s)\n", span->spanno, span->name);
1391
/* Stop HDLC controller if runned */
1393
hdlc_stop(wc, span->offset);
1395
spin_lock_irqsave(&wc->reglock, flags);
1396
wasrunning = span->flags & ZT_FLAG_RUNNING;
1398
span->flags &= ~ZT_FLAG_RUNNING;
1399
__t4_set_led(wc, span->offset, WC_OFF);
1400
if (((wc->numspans == 4) &&
1401
(!(wc->tspans[0]->span.flags & ZT_FLAG_RUNNING)) &&
1402
(!(wc->tspans[1]->span.flags & ZT_FLAG_RUNNING)) &&
1403
(!(wc->tspans[2]->span.flags & ZT_FLAG_RUNNING)) &&
1404
(!(wc->tspans[3]->span.flags & ZT_FLAG_RUNNING)))
1406
((wc->numspans == 2) &&
1407
(!(wc->tspans[0]->span.flags & ZT_FLAG_RUNNING)) &&
1408
(!(wc->tspans[1]->span.flags & ZT_FLAG_RUNNING)))) {
1409
/* No longer in use, disable interrupts */
1410
printk("TE%dXXP: Disabling interrupts since there are no active spans\n", wc->numspans);
1411
set_bit(T4_STOP_DMA, &wc->checkflag);
1413
set_bit(T4_CHECK_TIMING, &wc->checkflag);
1415
spin_unlock_irqrestore(&wc->reglock, flags);
1417
/* Wait for interrupt routine to shut itself down */
1422
if (debug & DEBUG_MAIN)
1423
printk("Span %d (%s) shutdown\n", span->spanno, span->name);
1427
static int t4_spanconfig(struct zt_span *span, struct zt_lineconfig *lc)
1430
struct t4_span *ts = span->pvt;
1431
struct t4 *wc = ts->owner;
1433
printk("About to enter spanconfig!\n");
1434
if (debug & DEBUG_MAIN)
1435
printk("TE%dXXP: Configuring span %d\n", wc->numspans, span->spanno);
1442
/* remove this span number from the current sync sources, if there */
1443
for(i = 0; i < wc->numspans; i++) {
1444
if (wc->tspans[i]->sync == span->spanno) {
1445
wc->tspans[i]->sync = 0;
1446
wc->tspans[i]->psync = 0;
1449
wc->tspans[span->offset]->syncpos = lc->sync;
1450
/* if a sync src, put it in proper place */
1452
wc->tspans[lc->sync - 1]->sync = span->spanno;
1453
wc->tspans[lc->sync - 1]->psync = span->offset + 1;
1455
set_bit(T4_CHECK_TIMING, &wc->checkflag);
1457
/* Make sure this is clear in case of multiple startup and shutdown
1459
clear_bit(T4_STOP_DMA, &wc->checkflag);
1461
/* If we're already running, then go ahead and apply the changes */
1462
if (span->flags & ZT_FLAG_RUNNING)
1463
return t4_startup(span);
1464
printk("Done with spanconfig!\n");
1468
static int t4_chanconfig(struct zt_chan *chan, int sigtype)
1471
unsigned long flags;
1472
struct t4 *wc = chan->pvt;
1473
struct t4_span *ts = wc->tspans[chan->span->offset];
1475
alreadyrunning = ts->span.flags & ZT_FLAG_RUNNING;
1476
if (debug & DEBUG_MAIN) {
1478
printk("TE%dXXP: Reconfigured channel %d (%s) sigtype %d\n", wc->numspans, chan->channo, chan->name, sigtype);
1480
printk("TE%dXXP: Configured channel %d (%s) sigtype %d\n", wc->numspans, chan->channo, chan->name, sigtype);
1483
spin_lock_irqsave(&wc->reglock, flags);
1486
__set_clear(wc, chan->span->offset);
1488
spin_unlock_irqrestore(&wc->reglock, flags);
1490
/* (re)configure signalling channel */
1491
if ((sigtype == ZT_SIG_HARDHDLC) || (ts->sigchan == chan)) {
1492
if (debug & DEBUG_FRAMER)
1493
printk("%sonfiguring hardware HDLC on %s\n", ((sigtype == ZT_SIG_HARDHDLC) ? "C" : "Unc"), chan->name);
1494
if (alreadyrunning) {
1496
hdlc_stop(wc, ts->sigchan->span->offset);
1497
if (sigtype == ZT_SIG_HARDHDLC) {
1498
if (hdlc_start(wc, chan->span->offset, chan, ts->sigmode)) {
1499
printk("Error initializing signalling controller\n");
1503
spin_lock_irqsave(&wc->reglock, flags);
1505
spin_unlock_irqrestore(&wc->reglock, flags);
1510
spin_lock_irqsave(&wc->reglock, flags);
1511
ts->sigchan = (sigtype == ZT_SIG_HARDHDLC) ? chan : NULL;
1512
spin_unlock_irqrestore(&wc->reglock, flags);
1519
static int t4_open(struct zt_chan *chan)
1524
try_module_get(THIS_MODULE);
1530
static int t4_close(struct zt_chan *chan)
1535
module_put(THIS_MODULE);
1540
/* The number of cards we have seen with each
1541
possible 'order' switch setting.
1543
static unsigned int order_index[16];
1545
static void init_spans(struct t4 *wc)
1552
gen2 = (wc->tspans[0]->spanflags & FLAG_2NDGEN);
1555
for (x = 0; x < wc->numspans; x++) {
1557
sprintf(ts->span.name, "TE%d/%d/%d", wc->numspans, wc->num, x + 1);
1558
snprintf(ts->span.desc, sizeof(ts->span.desc) - 1,
1559
"T%dXXP (PCI) Card %d Span %d", wc->numspans, wc->num, x+1);
1560
ts->span.manufacturer = "Digium";
1561
zap_copy_string(ts->span.devicetype, wc->variety, sizeof(ts->span.devicetype));
1562
if (wc->vpm == T4_VPM_PRESENT) {
1564
strncat(ts->span.devicetype, " with VPM400M", sizeof(ts->span.devicetype) - 1);
1566
strncat(ts->span.devicetype, (wc->numspans > 2) ? " with VPMOCT128" : " with VPMOCT064",
1567
sizeof(ts->span.devicetype) - 1);
1569
if (order_index[wc->order] == 1)
1570
snprintf(ts->span.location, sizeof(ts->span.location) - 1, "Board ID Switch %d", wc->order);
1572
snprintf(ts->span.location, sizeof(ts->span.location) - 1,
1573
"PCI%s Bus %02d Slot %02d", (ts->spanflags & FLAG_EXPRESS) ? " Express" : " ",
1574
wc->dev->bus->number, PCI_SLOT(wc->dev->devfn) + 1);
1575
switch (ts->spantype) {
1577
ts->span.spantype = "T1";
1580
ts->span.spantype = "E1";
1583
ts->span.spantype = "J1";
1586
ts->span.spanconfig = t4_spanconfig;
1587
ts->span.chanconfig = t4_chanconfig;
1588
ts->span.irq = wc->dev->irq;
1589
ts->span.startup = t4_startup;
1590
ts->span.shutdown = t4_shutdown;
1591
ts->span.rbsbits = t4_rbsbits;
1592
ts->span.maint = t4_maint;
1593
ts->span.open = t4_open;
1594
ts->span.close = t4_close;
1596
/* HDLC Specific init */
1598
ts->sigmode = sigmode;
1601
if (ts->spantype == TYPE_T1 || ts->spantype == TYPE_J1) {
1602
ts->span.channels = 24;
1603
ts->span.deflaw = ZT_LAW_MULAW;
1604
ts->span.linecompat = ZT_CONFIG_AMI | ZT_CONFIG_B8ZS | ZT_CONFIG_D4 | ZT_CONFIG_ESF;
1606
ts->span.channels = 31;
1607
ts->span.deflaw = ZT_LAW_ALAW;
1608
ts->span.linecompat = ZT_CONFIG_HDB3 | ZT_CONFIG_CCS | ZT_CONFIG_CRC4;
1610
ts->span.chans = ts->chans;
1611
ts->span.flags = ZT_FLAG_RBS;
1612
ts->span.ioctl = t4_ioctl;
1613
ts->span.hdlc_hard_xmit = t4_hdlc_hard_xmit;
1616
ts->span.echocan = t4_echocan;
1618
ts->span.dacs = t4_dacs;
1622
ts->span.offset = x;
1623
ts->writechunk = (void *)(wc->writechunk + x * 32 * 2);
1624
ts->readchunk = (void *)(wc->readchunk + x * 32 * 2);
1625
init_waitqueue_head(&ts->span.maintq);
1626
for (y=0;y<wc->tspans[x]->span.channels;y++) {
1627
struct zt_chan *mychans = ts->chans + y;
1628
sprintf(mychans->name, "TE%d/%d/%d/%d", wc->numspans, wc->num, x + 1, y + 1);
1629
mychans->sigcap = ZT_SIG_EM | ZT_SIG_CLEAR | ZT_SIG_FXSLS | ZT_SIG_FXSGS | ZT_SIG_FXSKS | ZT_SIG_HARDHDLC | ZT_SIG_MTP2 |
1630
ZT_SIG_FXOLS | ZT_SIG_FXOGS | ZT_SIG_FXOKS | ZT_SIG_CAS | ZT_SIG_EM_E1 | ZT_SIG_DACS_RBS;
1632
mychans->chanpos = y + 1;
1634
mychans->writechunk = (void *)(wc->writechunk + (x * 32 + y + offset) * 2);
1635
mychans->readchunk = (void *)(wc->readchunk + (x * 32 + y + offset) * 2);
1641
static void t4_serial_setup(struct t4 *wc, int unit)
1643
if (!wc->globalconfig) {
1644
wc->globalconfig = 1;
1645
printk("TE%dXXP: Setting up global serial parameters\n", wc->numspans);
1646
t4_framer_out(wc, 0, 0x85, 0xe0); /* GPC1: Multiplex mode enabled, FSC is output, active low, RCLK from channel 0 */
1647
t4_framer_out(wc, 0, 0x08, 0x01); /* IPC: Interrupt push/pull active low */
1649
/* Global clocks (8.192 Mhz CLK) */
1650
t4_framer_out(wc, 0, 0x92, 0x00);
1651
t4_framer_out(wc, 0, 0x93, 0x18);
1652
t4_framer_out(wc, 0, 0x94, 0xfb);
1653
t4_framer_out(wc, 0, 0x95, 0x0b);
1654
t4_framer_out(wc, 0, 0x96, 0x00);
1655
t4_framer_out(wc, 0, 0x97, 0x0b);
1656
t4_framer_out(wc, 0, 0x98, 0xdb);
1657
t4_framer_out(wc, 0, 0x99, 0xdf);
1660
/* Configure interrupts */
1661
t4_framer_out(wc, unit, FRMR_GCR, 0x00); /* GCR: Interrupt on Activation/Deactivation of each */
1663
/* Configure system interface */
1664
t4_framer_out(wc, unit, FRMR_SIC1, 0xc2); /* SIC1: 8.192 Mhz clock/bus, double buffer receive / transmit, byte interleaved */
1665
t4_framer_out(wc, unit, FRMR_SIC2, 0x20 | (unit << 1)); /* SIC2: No FFS, no center receive eliastic buffer, phase */
1666
t4_framer_out(wc, unit, FRMR_SIC3, 0x04); /* SIC3: Edges for capture */
1667
t4_framer_out(wc, unit, FRMR_CMR2, 0x00); /* CMR2: We provide sync and clock for tx and rx. */
1668
if (!wc->t1e1) { /* T1 mode */
1669
t4_framer_out(wc, unit, FRMR_XC0, 0x03); /* XC0: Normal operation of Sa-bits */
1670
t4_framer_out(wc, unit, FRMR_XC1, 0x84); /* XC1: 0 offset */
1671
if (wc->tspans[unit]->spantype == TYPE_J1)
1672
t4_framer_out(wc, unit, FRMR_RC0, 0x83); /* RC0: Just shy of 1023 */
1674
t4_framer_out(wc, unit, FRMR_RC0, 0x03); /* RC0: Just shy of 1023 */
1675
t4_framer_out(wc, unit, FRMR_RC1, 0x84); /* RC1: The rest of RC0 */
1676
} else { /* E1 mode */
1677
t4_framer_out(wc, unit, FRMR_XC0, 0x00); /* XC0: Normal operation of Sa-bits */
1678
t4_framer_out(wc, unit, FRMR_XC1, 0x04); /* XC1: 0 offset */
1679
t4_framer_out(wc, unit, FRMR_RC0, 0x04); /* RC0: Just shy of 1023 */
1680
t4_framer_out(wc, unit, FRMR_RC1, 0x04); /* RC1: The rest of RC0 */
1683
/* Configure ports */
1684
t4_framer_out(wc, unit, 0x80, 0x00); /* PC1: SPYR/SPYX input on RPA/XPA */
1685
t4_framer_out(wc, unit, 0x81, 0x22); /* PC2: RMFB/XSIG output/input on RPB/XPB */
1686
t4_framer_out(wc, unit, 0x82, 0x65); /* PC3: Some unused stuff */
1687
t4_framer_out(wc, unit, 0x83, 0x35); /* PC4: Some more unused stuff */
1688
t4_framer_out(wc, unit, 0x84, 0x01); /* PC5: XMFS active low, SCLKR is input, RCLK is output */
1689
if (debug & DEBUG_MAIN)
1690
printk("Successfully initialized serial bus for unit %d\n", unit);
1693
static int syncsrc = 0;
1694
static int syncnum = 0 /* -1 */;
1695
static int syncspan = 0;
1696
#ifdef DEFINE_SPINLOCK
1697
static DEFINE_SPINLOCK(synclock);
1699
static spinlock_t synclock = SPIN_LOCK_UNLOCKED;
1702
static void __t4_set_timing_source(struct t4 *wc, int unit, int master, int slave)
1704
unsigned int timing;
1706
if (unit != wc->syncsrc) {
1707
timing = 0x34; /* CMR1: RCLK unit, 8.192 Mhz TCLK, RCLK is 8.192 Mhz */
1708
if ((unit > -1) && (unit < 4)) {
1709
timing |= (unit << 6);
1710
for (x=0;x<wc->numspans;x++) /* set all 4 receive reference clocks to unit */
1711
__t4_framer_out(wc, x, 0x44, timing);
1712
wc->dmactrl |= (1 << 29);
1714
for (x=0;x<wc->numspans;x++) /* set each receive reference clock to itself */
1715
__t4_framer_out(wc, x, 0x44, timing | (x << 6));
1716
wc->dmactrl &= ~(1 << 29);
1719
wc->dmactrl |= (1 << 25);
1721
wc->dmactrl &= ~(1 << 25);
1723
wc->dmactrl |= (1 << 24);
1725
wc->dmactrl &= ~(1 << 24);
1726
__t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
1727
if (!master && !slave)
1729
if ((unit < 0) || (unit > 3))
1733
if (!master && !slave) {
1734
for (x=0;x<wc->numspans;x++)
1735
wc->tspans[x]->span.syncsrc = unit;
1738
if (debug & DEBUG_MAIN)
1739
printk("TE%dXXP: Timing source already set to %d\n", wc->numspans, unit);
1742
printk("wct4xxp: Timing source set to %d\n",unit);
1746
static inline void __t4_update_timing(struct t4 *wc)
1749
/* update sync src info */
1750
if (wc->syncsrc != syncsrc) {
1751
printk("Swapping card %d from %d to %d\n", wc->num, wc->syncsrc, syncsrc);
1752
wc->syncsrc = syncsrc;
1753
/* Update sync sources */
1754
for (i = 0; i < wc->numspans; i++) {
1755
wc->tspans[i]->span.syncsrc = wc->syncsrc;
1757
if (syncnum == wc->num) {
1758
__t4_set_timing_source(wc, syncspan-1, 1, 0);
1759
if (debug) printk("Card %d, using sync span %d, master\n", wc->num, syncspan);
1761
__t4_set_timing_source(wc, syncspan-1, 0, 1);
1762
if (debug) printk("Card %d, using Timing Bus, NOT master\n", wc->num);
1767
static int __t4_findsync(struct t4 *wc)
1771
unsigned long flags;
1774
int newsyncsrc = 0; /* Zaptel span number */
1775
int newsyncnum = 0; /* wct4xxp card number */
1776
int newsyncspan = 0; /* span on given wct4xxp card */
1777
spin_lock_irqsave(&synclock, flags);
1780
/* If we're the first card, go through all the motions, up to 8 levels
1785
for (x=0;cards[x];x++) {
1786
for (i = 0; i < wc->numspans; i++) {
1787
if (cards[x]->tspans[i]->syncpos) {
1789
if ((cards[x]->tspans[i]->syncpos == p) &&
1790
!(cards[x]->tspans[i]->span.alarms & (ZT_ALARM_RED | ZT_ALARM_BLUE | ZT_ALARM_LOOPBACK)) &&
1791
(cards[x]->tspans[i]->span.flags & ZT_FLAG_RUNNING)) {
1792
/* This makes a good sync source */
1793
newsyncsrc = cards[x]->tspans[i]->span.spanno;
1795
newsyncspan = i + 1;
1808
if ((syncnum != newsyncnum) || (syncsrc != newsyncsrc) || (newsyncspan != syncspan)) {
1809
if (debug) printk("New syncnum: %d (was %d), syncsrc: %d (was %d), syncspan: %d (was %d)\n", newsyncnum, syncnum, newsyncsrc, syncsrc, newsyncspan, syncspan);
1810
syncnum = newsyncnum;
1811
syncsrc = newsyncsrc;
1812
syncspan = newsyncspan;
1813
for (x=0;cards[x];x++) {
1814
__t4_update_timing(cards[x]);
1818
set_bit(T4_CHECK_TIMING, &cards[0]->checkflag);
1820
spin_unlock_irqrestore(&synclock, flags);
1824
static void __t4_set_timing_source_auto(struct t4 *wc)
1827
printk("timing source auto card %d!\n", wc->num);
1828
clear_bit(T4_CHECK_TIMING, &wc->checkflag);
1832
for (x=0;x<wc->numspans;x++) {
1833
if (wc->tspans[x]->sync) {
1834
if ((wc->tspans[wc->tspans[x]->psync - 1]->span.flags & ZT_FLAG_RUNNING) &&
1835
!(wc->tspans[wc->tspans[x]->psync - 1]->span.alarms & (ZT_ALARM_RED | ZT_ALARM_BLUE) )) {
1836
/* Valid timing source */
1837
__t4_set_timing_source(wc, wc->tspans[x]->psync - 1, 0, 0);
1842
__t4_set_timing_source(wc, 4, 0, 0);
1846
static void __t4_configure_t1(struct t4 *wc, int unit, int lineconfig, int txlevel)
1848
unsigned int fmr4, fmr2, fmr1, fmr0, lim2;
1849
char *framing, *line;
1851
if ((txlevel > 7) || (txlevel < 4))
1854
mytxlevel = txlevel - 4;
1855
fmr1 = 0x9c; /* FMR1: Mode 1, T1 mode, CRC on for ESF, 8.192 Mhz system data rate, no XAIS */
1856
fmr2 = 0x22; /* FMR2: no payload loopback, auto send yellow alarm */
1859
fmr4 = 0x0c; /* FMR4: Lose sync on 2 out of 5 framing bits, auto resync */
1860
lim2 = 0x21; /* LIM2: 50% peak is a "1", Advanced Loss recovery */
1861
lim2 |= (mytxlevel << 6); /* LIM2: Add line buildout */
1862
__t4_framer_out(wc, unit, 0x1d, fmr1);
1863
__t4_framer_out(wc, unit, 0x1e, fmr2);
1865
/* Configure line interface */
1866
if (lineconfig & ZT_CONFIG_AMI) {
1873
if (lineconfig & ZT_CONFIG_D4) {
1880
__t4_framer_out(wc, unit, 0x1c, fmr0);
1881
__t4_framer_out(wc, unit, 0x20, fmr4);
1882
__t4_framer_out(wc, unit, 0x21, 0x40); /* FMR5: Enable RBS mode */
1884
__t4_framer_out(wc, unit, 0x37, 0xf0 ); /* LIM1: Clear data in case of LOS, Set receiver threshold (0.5V), No remote loop, no DRS */
1885
__t4_framer_out(wc, unit, 0x36, 0x08); /* LIM0: Enable auto long haul mode, no local loop (must be after LIM1) */
1887
__t4_framer_out(wc, unit, 0x02, 0x50); /* CMDR: Reset the receiver and transmitter line interface */
1888
__t4_framer_out(wc, unit, 0x02, 0x00); /* CMDR: Reset the receiver and transmitter line interface */
1890
__t4_framer_out(wc, unit, 0x3a, lim2); /* LIM2: 50% peak amplitude is a "1" */
1891
__t4_framer_out(wc, unit, 0x38, 0x0a); /* PCD: LOS after 176 consecutive "zeros" */
1892
__t4_framer_out(wc, unit, 0x39, 0x15); /* PCR: 22 "ones" clear LOS */
1894
/* Generate pulse mask for T1 */
1897
__t4_framer_out(wc, unit, 0x26, 0x07); /* XPM0 */
1898
__t4_framer_out(wc, unit, 0x27, 0x01); /* XPM1 */
1899
__t4_framer_out(wc, unit, 0x28, 0x00); /* XPM2 */
1902
__t4_framer_out(wc, unit, 0x26, 0x8c); /* XPM0 */
1903
__t4_framer_out(wc, unit, 0x27, 0x11); /* XPM1 */
1904
__t4_framer_out(wc, unit, 0x28, 0x01); /* XPM2 */
1907
__t4_framer_out(wc, unit, 0x26, 0x8c); /* XPM0 */
1908
__t4_framer_out(wc, unit, 0x27, 0x01); /* XPM1 */
1909
__t4_framer_out(wc, unit, 0x28, 0x00); /* XPM2 */
1913
__t4_framer_out(wc, unit, 0x26, 0xd7); /* XPM0 */
1914
__t4_framer_out(wc, unit, 0x27, 0x22); /* XPM1 */
1915
__t4_framer_out(wc, unit, 0x28, 0x01); /* XPM2 */
1919
/* Don't mask framer interrupts if hardware HDLC is in use */
1920
__t4_framer_out(wc, unit, FRMR_IMR0, 0xff & ~((wc->tspans[unit]->sigchan) ? HDLC_IMR0_MASK : 0)); /* IMR0: We care about CAS changes, etc */
1921
__t4_framer_out(wc, unit, FRMR_IMR1, 0xff & ~((wc->tspans[unit]->sigchan) ? HDLC_IMR1_MASK : 0)); /* IMR1: We care about nothing */
1922
__t4_framer_out(wc, unit, 0x16, 0x00); /* IMR2: We care about all the alarm stuff! */
1923
__t4_framer_out(wc, unit, 0x17, 0xf4); /* IMR3: We care about AIS and friends */
1924
__t4_framer_out(wc, unit, 0x18, 0x3f); /* IMR4: We care about slips on transmit */
1926
printk("TE%dXXP: Span %d configured for %s/%s\n", wc->numspans, unit + 1, framing, line);
1929
static void __t4_configure_e1(struct t4 *wc, int unit, int lineconfig)
1931
unsigned int fmr2, fmr1, fmr0;
1932
unsigned int cas = 0;
1933
unsigned int imr3extra=0;
1935
char *framing, *line;
1936
fmr1 = 0x44; /* FMR1: E1 mode, Automatic force resync, PCM30 mode, 8.192 Mhz backplane, no XAIS */
1937
fmr2 = 0x03; /* FMR2: Auto transmit remote alarm, auto loss of multiframe recovery, no payload loopback */
1940
if (lineconfig & ZT_CONFIG_CRC4) {
1941
fmr1 |= 0x08; /* CRC4 transmit */
1942
fmr2 |= 0xc0; /* CRC4 receive */
1945
__t4_framer_out(wc, unit, 0x1d, fmr1);
1946
__t4_framer_out(wc, unit, 0x1e, fmr2);
1948
/* Configure line interface */
1949
if (lineconfig & ZT_CONFIG_AMI) {
1956
if (lineconfig & ZT_CONFIG_CCS) {
1963
__t4_framer_out(wc, unit, 0x1c, fmr0);
1965
__t4_framer_out(wc, unit, 0x37, 0xf0 /*| 0x6 */ ); /* LIM1: Clear data in case of LOS, Set receiver threshold (0.5V), No remote loop, no DRS */
1966
__t4_framer_out(wc, unit, 0x36, 0x08); /* LIM0: Enable auto long haul mode, no local loop (must be after LIM1) */
1968
__t4_framer_out(wc, unit, 0x02, 0x50); /* CMDR: Reset the receiver and transmitter line interface */
1969
__t4_framer_out(wc, unit, 0x02, 0x00); /* CMDR: Reset the receiver and transmitter line interface */
1971
/* Condition receive line interface for E1 after reset */
1972
__t4_framer_out(wc, unit, 0xbb, 0x17);
1973
__t4_framer_out(wc, unit, 0xbc, 0x55);
1974
__t4_framer_out(wc, unit, 0xbb, 0x97);
1975
__t4_framer_out(wc, unit, 0xbb, 0x11);
1976
__t4_framer_out(wc, unit, 0xbc, 0xaa);
1977
__t4_framer_out(wc, unit, 0xbb, 0x91);
1978
__t4_framer_out(wc, unit, 0xbb, 0x12);
1979
__t4_framer_out(wc, unit, 0xbc, 0x55);
1980
__t4_framer_out(wc, unit, 0xbb, 0x92);
1981
__t4_framer_out(wc, unit, 0xbb, 0x0c);
1982
__t4_framer_out(wc, unit, 0xbb, 0x00);
1983
__t4_framer_out(wc, unit, 0xbb, 0x8c);
1985
__t4_framer_out(wc, unit, 0x3a, 0x20); /* LIM2: 50% peak amplitude is a "1" */
1986
__t4_framer_out(wc, unit, 0x38, 0x0a); /* PCD: LOS after 176 consecutive "zeros" */
1987
__t4_framer_out(wc, unit, 0x39, 0x15); /* PCR: 22 "ones" clear LOS */
1989
__t4_framer_out(wc, unit, 0x20, 0x9f); /* XSW: Spare bits all to 1 */
1990
__t4_framer_out(wc, unit, 0x21, 0x1c|cas); /* XSP: E-bit set when async. AXS auto, XSIF to 1 */
1993
/* Generate pulse mask for E1 */
1994
__t4_framer_out(wc, unit, 0x26, 0x54); /* XPM0 */
1995
__t4_framer_out(wc, unit, 0x27, 0x02); /* XPM1 */
1996
__t4_framer_out(wc, unit, 0x28, 0x00); /* XPM2 */
1998
/* Don't mask framer interrupts if hardware HDLC is in use */
1999
__t4_framer_out(wc, unit, FRMR_IMR0, 0xff & ~((wc->tspans[unit]->sigchan) ? HDLC_IMR0_MASK : 0)); /* IMR0: We care about CRC errors, CAS changes, etc */
2000
__t4_framer_out(wc, unit, FRMR_IMR1, 0x3f & ~((wc->tspans[unit]->sigchan) ? HDLC_IMR1_MASK : 0)); /* IMR1: We care about loopup / loopdown */
2001
__t4_framer_out(wc, unit, 0x16, 0x00); /* IMR2: We care about all the alarm stuff! */
2002
__t4_framer_out(wc, unit, 0x17, 0xc4 | imr3extra); /* IMR3: We care about AIS and friends */
2003
__t4_framer_out(wc, unit, 0x18, 0x3f); /* IMR4: We care about slips on transmit */
2005
printk("TE%dXXP: Span %d configured for %s/%s%s\n", wc->numspans, unit + 1, framing, line, crc4);
2008
static int t4_startup(struct zt_span *span)
2014
unsigned long flags;
2016
struct t4_span *ts = span->pvt;
2017
struct t4 *wc = ts->owner;
2019
printk("About to enter startup!\n");
2020
tspan = span->offset + 1;
2022
printk("TE%dXXP: Span '%d' isn't us?\n", wc->numspans, span->spanno);
2026
spin_lock_irqsave(&wc->reglock, flags);
2028
alreadyrunning = span->flags & ZT_FLAG_RUNNING;
2031
/* initialize the start value for the entire chunk of last ec buffer */
2032
for(i = 0; i < span->channels; i++)
2034
memset(ts->ec_chunk1[i],
2035
ZT_LIN2X(0,&span->chans[i]),ZT_CHUNKSIZE);
2036
memset(ts->ec_chunk2[i],
2037
ZT_LIN2X(0,&span->chans[i]),ZT_CHUNKSIZE);
2040
/* Force re-evaluation fo timing source */
2044
if (ts->spantype == TYPE_E1) { /* if this is an E1 card */
2045
__t4_configure_e1(wc, span->offset, span->lineconfig);
2046
} else { /* is a T1 card */
2047
__t4_configure_t1(wc, span->offset, span->lineconfig, span->txlevel);
2050
/* Note clear channel status */
2051
wc->tspans[span->offset]->notclear = 0;
2052
__set_clear(wc, span->offset);
2054
if (!alreadyrunning) {
2055
span->flags |= ZT_FLAG_RUNNING;
2057
/* enable interrupts */
2058
/* Start DMA, enabling DMA interrupts on read only */
2059
wc->dmactrl = 1 << 29;
2061
/* Enable framer only interrupts */
2062
wc->dmactrl |= 1 << 27;
2064
wc->dmactrl |= (ts->spanflags & FLAG_2NDGEN) ? 0xc0000000 : 0xc0000003;
2066
wc->dmactrl |= wc->vpm;
2068
/* Seed interrupt register */
2069
__t4_pci_out(wc, WC_INTR, 0x0c);
2070
if (noburst && !(ts->spanflags & FLAG_BURST))
2071
wc->dmactrl |= (1 << 26);
2072
__t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
2074
/* Startup HDLC controller too */
2078
struct zt_chan *sigchan = ts->sigchan;
2080
spin_unlock_irqrestore(&wc->reglock, flags);
2081
if (hdlc_start(wc, span->offset, sigchan, ts->sigmode)) {
2082
printk("Error initializing signalling controller\n");
2085
spin_lock_irqsave(&wc->reglock, flags);
2088
spin_unlock_irqrestore(&wc->reglock, flags);
2090
t4_check_alarms(wc, span->offset);
2091
t4_check_sigbits(wc, span->offset);
2093
if (wc->tspans[0]->sync == span->spanno) printk("SPAN %d: Primary Sync Source\n",span->spanno);
2094
if (wc->tspans[1]->sync == span->spanno) printk("SPAN %d: Secondary Sync Source\n",span->spanno);
2095
if (wc->numspans == 4) {
2096
if (wc->tspans[2]->sync == span->spanno) printk("SPAN %d: Tertiary Sync Source\n",span->spanno);
2097
if (wc->tspans[3]->sync == span->spanno) printk("SPAN %d: Quaternary Sync Source\n",span->spanno);
2100
if (!alreadyrunning && !wc->vpm) {
2105
wc->dmactrl |= wc->vpm;
2106
t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
2109
printk("Completed startup!\n");
2114
static inline void e1_check(struct t4 *wc, int span, int val)
2116
struct t4_span *ts = wc->tspans[span];
2117
if ((ts->span.channels > 24) &&
2118
(ts->span.flags & ZT_FLAG_RUNNING) &&
2119
!(ts->span.alarms) &&
2125
if (ts->e1check > 100) {
2127
wc->e1recover = 1000 * 8;
2128
wc->tspans[0]->e1check = wc->tspans[1]->e1check = 0;
2129
if (wc->numspans == 4)
2130
wc->tspans[2]->e1check = wc->tspans[3]->e1check = 0;
2131
if (debug & DEBUG_MAIN)
2132
printk("Detected loss of E1 alignment on span %d!\n", span);
2138
static void t4_receiveprep(struct t4 *wc, int irq)
2140
volatile unsigned int *readchunk;
2149
readchunk = wc->readchunk;
2154
readchunk = wc->readchunk + ZT_CHUNKSIZE * 32;
2160
for (x=0;x<wc->numspans;x++)
2161
wc->tspans[x]->irqmisses++;
2162
if (debug & DEBUG_MAIN)
2163
printk("TE%dXXP: Double/missed interrupt detected\n", wc->numspans);
2165
for (x=0;x<ZT_CHUNKSIZE;x++) {
2166
for (z=0;z<24;z++) {
2167
/* All T1/E1 channels */
2168
tmp = readchunk[z+1+offset];
2169
if (wc->numspans == 4) {
2170
wc->tspans[3]->span.chans[z].readchunk[x] = tmp & 0xff;
2171
wc->tspans[2]->span.chans[z].readchunk[x] = (tmp & 0xff00) >> 8;
2173
wc->tspans[1]->span.chans[z].readchunk[x] = (tmp & 0xff0000) >> 16;
2174
wc->tspans[0]->span.chans[z].readchunk[x] = tmp >> 24;
2177
if (wc->e1recover > 0)
2180
if (wc->numspans == 4) {
2181
e1_check(wc, 3, (tmp & 0x7f));
2182
e1_check(wc, 2, (tmp & 0x7f00) >> 8);
2184
e1_check(wc, 1, (tmp & 0x7f0000) >> 16);
2185
e1_check(wc, 0, (tmp & 0x7f000000) >> 24);
2186
for (z=24;z<31;z++) {
2187
/* Only E1 channels now */
2188
tmp = readchunk[z+1];
2189
if (wc->numspans == 4) {
2190
if (wc->tspans[3]->span.channels > 24)
2191
wc->tspans[3]->span.chans[z].readchunk[x] = tmp & 0xff;
2192
if (wc->tspans[2]->span.channels > 24)
2193
wc->tspans[2]->span.chans[z].readchunk[x] = (tmp & 0xff00) >> 8;
2195
if (wc->tspans[1]->span.channels > 24)
2196
wc->tspans[1]->span.chans[z].readchunk[x] = (tmp & 0xff0000) >> 16;
2197
if (wc->tspans[0]->span.channels > 24)
2198
wc->tspans[0]->span.chans[z].readchunk[x] = tmp >> 24;
2201
/* Advance pointer by 4 TDM frame lengths */
2204
for (x=0;x<wc->numspans;x++) {
2205
if (wc->tspans[x]->span.flags & ZT_FLAG_RUNNING) {
2206
for (y=0;y<wc->tspans[x]->span.channels;y++) {
2207
/* Echo cancel double buffered data */
2208
zt_ec_chunk(&wc->tspans[x]->span.chans[y],
2209
wc->tspans[x]->span.chans[y].readchunk,
2210
wc->tspans[x]->ec_chunk2[y]);
2211
memcpy(wc->tspans[x]->ec_chunk2[y],wc->tspans[x]->ec_chunk1[y],
2213
memcpy(wc->tspans[x]->ec_chunk1[y],
2214
wc->tspans[x]->span.chans[y].writechunk,
2217
zt_receive(&wc->tspans[x]->span);
2223
#if (ZT_CHUNKSIZE != 8)
2224
#error Sorry, nextgen does not support chunksize != 8
2227
static inline void __receive_span(struct t4_span *ts)
2231
unsigned long merged;
2232
merged = ts->dtmfactive & ts->dtmfmutemask;
2234
for (y=0;y<ts->span.channels;y++) {
2235
/* Mute any DTMFs which are supposed to be muted */
2236
if (test_bit(y, &merged)) {
2237
memset(ts->span.chans[y].readchunk, ZT_XLAW(0, (ts->span.chans + y)), ZT_CHUNKSIZE);
2243
#ifdef ENABLE_PREFETCH
2244
prefetch((void *)(ts->readchunk));
2245
prefetch((void *)(ts->writechunk));
2246
prefetch((void *)(ts->readchunk + 8));
2247
prefetch((void *)(ts->writechunk + 8));
2248
prefetch((void *)(ts->readchunk + 16));
2249
prefetch((void *)(ts->writechunk + 16));
2250
prefetch((void *)(ts->readchunk + 24));
2251
prefetch((void *)(ts->writechunk + 24));
2252
prefetch((void *)(ts->readchunk + 32));
2253
prefetch((void *)(ts->writechunk + 32));
2254
prefetch((void *)(ts->readchunk + 40));
2255
prefetch((void *)(ts->writechunk + 40));
2256
prefetch((void *)(ts->readchunk + 48));
2257
prefetch((void *)(ts->writechunk + 48));
2258
prefetch((void *)(ts->readchunk + 56));
2259
prefetch((void *)(ts->writechunk + 56));
2262
zt_ec_span(&ts->span);
2263
zt_receive(&ts->span);
2266
static inline void __transmit_span(struct t4_span *ts)
2268
zt_transmit(&ts->span);
2271
#ifdef ENABLE_WORKQUEUES
2272
static void workq_handlespan(void *data)
2274
struct t4_span *ts = data;
2275
struct t4 *wc = ts->owner;
2278
__transmit_span(ts);
2279
atomic_dec(&wc->worklist);
2280
if (!atomic_read(&wc->worklist))
2281
t4_pci_out(wc, WC_INTR, 0);
2284
static void t4_prep_gen2(struct t4 *wc)
2287
for (x=0;x<wc->numspans;x++) {
2288
if (wc->tspans[x]->span.flags & ZT_FLAG_RUNNING) {
2289
__receive_span(wc->tspans[x]);
2290
__transmit_span(wc->tspans[x]);
2297
static void t4_transmitprep(struct t4 *wc, int irq)
2299
volatile unsigned int *writechunk;
2307
writechunk = wc->writechunk + 1;
2309
writechunk = wc->writechunk + ZT_CHUNKSIZE * 32 + 1;
2311
for (y=0;y<wc->numspans;y++) {
2312
if (wc->tspans[y]->span.flags & ZT_FLAG_RUNNING)
2313
zt_transmit(&wc->tspans[y]->span);
2316
for (x=0;x<ZT_CHUNKSIZE;x++) {
2317
/* Once per chunk */
2318
for (z=0;z<24;z++) {
2319
/* All T1/E1 channels */
2320
tmp = (wc->tspans[3]->span.chans[z].writechunk[x]) |
2321
(wc->tspans[2]->span.chans[z].writechunk[x] << 8) |
2322
(wc->tspans[1]->span.chans[z].writechunk[x] << 16) |
2323
(wc->tspans[0]->span.chans[z].writechunk[x] << 24);
2324
writechunk[z+offset] = tmp;
2327
for (z=24;z<31;z++) {
2328
/* Only E1 channels now */
2330
if (wc->numspans == 4) {
2331
if (wc->tspans[3]->span.channels > 24)
2332
tmp |= wc->tspans[3]->span.chans[z].writechunk[x];
2333
if (wc->tspans[2]->span.channels > 24)
2334
tmp |= (wc->tspans[2]->span.chans[z].writechunk[x] << 8);
2336
if (wc->tspans[1]->span.channels > 24)
2337
tmp |= (wc->tspans[1]->span.chans[z].writechunk[x] << 16);
2338
if (wc->tspans[0]->span.channels > 24)
2339
tmp |= (wc->tspans[0]->span.chans[z].writechunk[x] << 24);
2340
writechunk[z] = tmp;
2343
/* Advance pointer by 4 TDM frame lengths */
2350
static void t4_check_sigbits(struct t4 *wc, int span)
2353
struct t4_span *ts = wc->tspans[span];
2355
if (debug & DEBUG_RBS)
2356
printk("Checking sigbits on span %d\n", span + 1);
2358
if (!(ts->span.flags & ZT_FLAG_RUNNING))
2360
if (ts->spantype == TYPE_E1) {
2361
for (i = 0; i < 15; i++) {
2362
a = t4_framer_in(wc, span, 0x71 + i);
2363
/* Get high channel in low bits */
2365
if (!(ts->span.chans[i+16].sig & ZT_SIG_CLEAR)) {
2366
if (ts->span.chans[i+16].rxsig != rxs)
2367
zt_rbsbits(&ts->span.chans[i+16], rxs);
2369
rxs = (a >> 4) & 0xf;
2370
if (!(ts->span.chans[i].sig & ZT_SIG_CLEAR)) {
2371
if (ts->span.chans[i].rxsig != rxs)
2372
zt_rbsbits(&ts->span.chans[i], rxs);
2375
} else if (ts->span.lineconfig & ZT_CONFIG_D4) {
2376
for (i = 0; i < 24; i+=4) {
2377
a = t4_framer_in(wc, span, 0x70 + (i>>2));
2378
/* Get high channel in low bits */
2379
rxs = (a & 0x3) << 2;
2380
if (!(ts->span.chans[i+3].sig & ZT_SIG_CLEAR)) {
2381
if (ts->span.chans[i+3].rxsig != rxs)
2382
zt_rbsbits(&ts->span.chans[i+3], rxs);
2385
if (!(ts->span.chans[i+2].sig & ZT_SIG_CLEAR)) {
2386
if (ts->span.chans[i+2].rxsig != rxs)
2387
zt_rbsbits(&ts->span.chans[i+2], rxs);
2389
rxs = (a >> 2) & 0xc;
2390
if (!(ts->span.chans[i+1].sig & ZT_SIG_CLEAR)) {
2391
if (ts->span.chans[i+1].rxsig != rxs)
2392
zt_rbsbits(&ts->span.chans[i+1], rxs);
2394
rxs = (a >> 4) & 0xc;
2395
if (!(ts->span.chans[i].sig & ZT_SIG_CLEAR)) {
2396
if (ts->span.chans[i].rxsig != rxs)
2397
zt_rbsbits(&ts->span.chans[i], rxs);
2401
for (i = 0; i < 24; i+=2) {
2402
a = t4_framer_in(wc, span, 0x70 + (i>>1));
2403
/* Get high channel in low bits */
2405
if (!(ts->span.chans[i+1].sig & ZT_SIG_CLEAR)) {
2406
/* XXX Not really reset on every trans! XXX */
2407
if (ts->span.chans[i+1].rxsig != rxs) {
2408
zt_rbsbits(&ts->span.chans[i+1], rxs);
2411
rxs = (a >> 4) & 0xf;
2412
if (!(ts->span.chans[i].sig & ZT_SIG_CLEAR)) {
2413
/* XXX Not really reset on every trans! XXX */
2414
if (ts->span.chans[i].rxsig != rxs) {
2415
zt_rbsbits(&ts->span.chans[i], rxs);
2422
static void t4_check_alarms(struct t4 *wc, int span)
2427
struct t4_span *ts = wc->tspans[span];
2428
unsigned long flags;
2430
if (!(ts->span.flags & ZT_FLAG_RUNNING))
2433
spin_lock_irqsave(&wc->reglock, flags);
2435
c = __t4_framer_in(wc, span, 0x4c);
2436
d = __t4_framer_in(wc, span, 0x4d);
2438
/* Assume no alarms */
2441
/* And consider only carrier alarms */
2442
ts->span.alarms &= (ZT_ALARM_RED | ZT_ALARM_BLUE | ZT_ALARM_NOTOPEN);
2444
if (ts->spantype == TYPE_E1) {
2446
/* No multiframe found, force RAI high after 400ms only if
2447
we haven't found a multiframe since last loss
2449
if (!(ts->spanflags & FLAG_NMF)) {
2450
__t4_framer_out(wc, span, 0x20, 0x9f | 0x20); /* LIM0: Force RAI High */
2451
ts->spanflags |= FLAG_NMF;
2452
printk("NMF workaround on!\n");
2454
__t4_framer_out(wc, span, 0x1e, 0xc3); /* Reset to CRC4 mode */
2455
__t4_framer_out(wc, span, 0x1c, 0xf2); /* Force Resync */
2456
__t4_framer_out(wc, span, 0x1c, 0xf0); /* Force Resync */
2457
} else if (!(c & 0x02)) {
2458
if ((ts->spanflags & FLAG_NMF)) {
2459
__t4_framer_out(wc, span, 0x20, 0x9f); /* LIM0: Clear forced RAI */
2460
ts->spanflags &= ~FLAG_NMF;
2461
printk("NMF workaround off!\n");
2465
/* Detect loopup code if we're not sending one */
2466
if ((!ts->span.mainttimer) && (d & 0x08)) {
2467
/* Loop-up code detected */
2468
if ((ts->loopupcnt++ > 80) && (ts->span.maintstat != ZT_MAINT_REMOTELOOP)) {
2469
__t4_framer_out(wc, span, 0x36, 0x08); /* LIM0: Disable any local loop */
2470
__t4_framer_out(wc, span, 0x37, 0xf6 ); /* LIM1: Enable remote loop */
2471
ts->span.maintstat = ZT_MAINT_REMOTELOOP;
2475
/* Same for loopdown code */
2476
if ((!ts->span.mainttimer) && (d & 0x10)) {
2477
/* Loop-down code detected */
2478
if ((ts->loopdowncnt++ > 80) && (ts->span.maintstat == ZT_MAINT_REMOTELOOP)) {
2479
__t4_framer_out(wc, span, 0x36, 0x08); /* LIM0: Disable any local loop */
2480
__t4_framer_out(wc, span, 0x37, 0xf0 ); /* LIM1: Disable remote loop */
2481
ts->span.maintstat = ZT_MAINT_NONE;
2484
ts->loopdowncnt = 0;
2487
if (ts->span.lineconfig & ZT_CONFIG_NOTOPEN) {
2488
for (x=0,j=0;x < ts->span.channels;x++)
2489
if ((ts->span.chans[x].flags & ZT_FLAG_OPEN) ||
2490
(ts->span.chans[x].flags & ZT_FLAG_NETDEV))
2493
alarms |= ZT_ALARM_NOTOPEN;
2497
if (ts->alarmcount >= alarmdebounce)
2498
alarms |= ZT_ALARM_RED;
2504
alarms |= ZT_ALARM_BLUE;
2506
if (((!ts->span.alarms) && alarms) ||
2507
(ts->span.alarms && (!alarms)))
2508
set_bit(T4_CHECK_TIMING, &wc->checkflag);
2510
/* Keep track of recovering */
2511
if ((!alarms) && ts->span.alarms)
2512
ts->alarmtimer = ZT_ALARMSETTLE_TIME;
2514
alarms |= ZT_ALARM_RECOVER;
2516
/* If receiving alarms, go into Yellow alarm state */
2517
if (alarms && !(ts->spanflags & FLAG_SENDINGYELLOW)) {
2520
printk("wct%dxxp: Setting yellow alarm on span %d\n", wc->numspans, span + 1);
2522
/* We manually do yellow alarm to handle RECOVER and NOTOPEN, otherwise it's auto anyway */
2523
fmr4 = __t4_framer_in(wc, span, 0x20);
2524
__t4_framer_out(wc, span, 0x20, fmr4 | 0x20);
2525
ts->spanflags |= FLAG_SENDINGYELLOW;
2526
} else if ((!alarms) && (ts->spanflags & FLAG_SENDINGYELLOW)) {
2529
printk("wct%dxxp: Clearing yellow alarm on span %d\n", wc->numspans, span + 1);
2531
/* We manually do yellow alarm to handle RECOVER */
2532
fmr4 = __t4_framer_in(wc, span, 0x20);
2533
__t4_framer_out(wc, span, 0x20, fmr4 & ~0x20);
2534
ts->spanflags &= ~FLAG_SENDINGYELLOW;
2537
/* Re-check the timing source when we enter/leave alarm, not withstanding
2540
alarms |= ZT_ALARM_YELLOW;
2541
if (ts->span.mainttimer || ts->span.maintstat)
2542
alarms |= ZT_ALARM_LOOPBACK;
2543
ts->span.alarms = alarms;
2544
spin_unlock_irqrestore(&wc->reglock, flags);
2545
zt_alarm_notify(&ts->span);
2548
static void t4_do_counters(struct t4 *wc)
2551
for (span=0;span<wc->numspans;span++) {
2552
struct t4_span *ts = wc->tspans[span];
2555
spin_lock(&wc->reglock);
2556
if (ts->loopupcnt || ts->loopdowncnt)
2558
if (ts->alarmtimer) {
2559
if (!--ts->alarmtimer) {
2561
ts->span.alarms &= ~(ZT_ALARM_RECOVER);
2564
spin_unlock(&wc->reglock);
2566
t4_check_alarms(wc, span);
2567
zt_alarm_notify(&ts->span);
2572
static inline void __handle_leds(struct t4 *wc)
2577
for (x=0;x<wc->numspans;x++) {
2578
struct t4_span *ts = wc->tspans[x];
2579
if (ts->span.flags & ZT_FLAG_RUNNING) {
2580
if (ts->span.alarms & (ZT_ALARM_RED | ZT_ALARM_BLUE)) {
2582
if (wc->blinktimer == (altab[wc->alarmpos] >> 1)) {
2583
__t4_set_led(wc, x, WC_RED);
2585
if (wc->blinktimer == 0xf) {
2586
__t4_set_led(wc, x, WC_OFF);
2589
if (wc->blinktimer == 160) {
2590
__t4_set_led(wc, x, WC_RED);
2591
} else if (wc->blinktimer == 480) {
2592
__t4_set_led(wc, x, WC_OFF);
2595
} else if (ts->span.alarms & ZT_ALARM_YELLOW) {
2597
__t4_set_led(wc, x, WC_YELLOW);
2598
} else if (ts->span.mainttimer || ts->span.maintstat) {
2600
if (wc->blinktimer == (altab[wc->alarmpos] >> 1)) {
2601
__t4_set_led(wc, x, WC_GREEN);
2603
if (wc->blinktimer == 0xf) {
2604
__t4_set_led(wc, x, WC_OFF);
2607
if (wc->blinktimer == 160) {
2608
__t4_set_led(wc, x, WC_GREEN);
2609
} else if (wc->blinktimer == 480) {
2610
__t4_set_led(wc, x, WC_OFF);
2615
__t4_set_led(wc, x, WC_GREEN);
2618
__t4_set_led(wc, x, WC_OFF);
2622
if (wc->blinktimer == 0xf) {
2623
wc->blinktimer = -1;
2625
if (wc->alarmpos >= (sizeof(altab) / sizeof(altab[0])))
2629
if (wc->blinktimer == 480)
2634
static inline void t4_framer_interrupt(struct t4 *wc, int span)
2636
/* Check interrupts for a given span */
2637
unsigned char gis, isr0, isr1, isr2, isr3, isr4;
2639
struct t4_span *ts = wc->tspans[span];
2640
struct zt_chan *sigchan;
2641
unsigned long flags;
2643
if (debug & DEBUG_FRAMER)
2644
printk("framer interrupt span %d:%d!\n", wc->num, span + 1);
2646
/* 1st gen cards isn't used interrupts */
2647
gis = t4_framer_in(wc, span, FRMR_GIS);
2648
isr0 = (gis & FRMR_GIS_ISR0) ? t4_framer_in(wc, span, FRMR_ISR0) : 0;
2649
isr1 = (gis & FRMR_GIS_ISR1) ? t4_framer_in(wc, span, FRMR_ISR1) : 0;
2650
isr2 = (gis & FRMR_GIS_ISR2) ? t4_framer_in(wc, span, FRMR_ISR2) : 0;
2651
isr3 = (gis & FRMR_GIS_ISR3) ? t4_framer_in(wc, span, FRMR_ISR3) : 0;
2652
isr4 = (gis & FRMR_GIS_ISR4) ? t4_framer_in(wc, span, FRMR_ISR4) : 0;
2654
if (debug & DEBUG_FRAMER)
2655
printk("gis: %02x, isr0: %02x, isr1: %02x, isr2: %02x, isr3: %02x, isr4: %02x\n", gis, isr0, isr1, isr2, isr3, isr4);
2658
t4_check_sigbits(wc, span);
2660
if (ts->spantype == TYPE_E1) {
2662
if ((isr3 & 0x38) || isr2 || isr1)
2663
t4_check_alarms(wc, span);
2666
if (isr2 || (isr3 & 0x08))
2667
t4_check_alarms(wc, span);
2669
if (!ts->span.alarms) {
2670
if ((isr3 & 0x3) || (isr4 & 0xc0))
2671
ts->span.timingslips++;
2673
if (debug & DEBUG_MAIN) {
2675
printk("TE%d10P: RECEIVE slip NEGATIVE on span %d\n", wc->numspans, span + 1);
2677
printk("TE%d10P: RECEIVE slip POSITIVE on span %d\n", wc->numspans, span + 1);
2679
printk("TE%dXXP: TRANSMIT slip POSITIVE on span %d\n", wc->numspans, span + 1);
2681
printk("TE%d10P: TRANSMIT slip NEGATIVE on span %d\n", wc->numspans, span + 1);
2684
ts->span.timingslips = 0;
2686
spin_lock_irqsave(&wc->reglock, flags);
2687
/* HDLC controller checks - receive side */
2689
spin_unlock_irqrestore(&wc->reglock, flags);
2693
sigchan = ts->sigchan;
2694
spin_unlock_irqrestore(&wc->reglock, flags);
2696
if (isr0 & FRMR_ISR0_RME) {
2697
readsize = (t4_framer_in(wc, span, FRMR_RBCH) << 8) | t4_framer_in(wc, span, FRMR_RBCL);
2698
if (debug & DEBUG_FRAMER) printk("Received data length is %d (%d)\n", readsize, readsize & FRMR_RBCL_MAX_SIZE);
2699
/* RPF isn't set on last part of frame */
2700
if ((readsize > 0) && ((readsize &= FRMR_RBCL_MAX_SIZE) == 0))
2702
} else if (isr0 & FRMR_ISR0_RPF)
2707
unsigned char readbuf[readsize];
2709
if (debug & DEBUG_FRAMER) printk("Framer %d: Got RPF/RME! readsize is %d\n", sigchan->span->offset, readsize);
2711
for (i = 0; i < readsize; i++)
2712
readbuf[i] = t4_framer_in(wc, span, FRMR_RXFIFO);
2714
/* Tell the framer to clear the RFIFO */
2715
t4_framer_cmd_wait(wc, span, FRMR_CMDR_RMC);
2717
if (debug & DEBUG_FRAMER) {
2719
for (i = 0; i < readsize; i++)
2720
printk((i ? " %02x" : "%02x"), readbuf[i]);
2724
if (isr0 & FRMR_ISR0_RME) {
2725
/* Do checks for HDLC problems */
2726
unsigned char rsis = readbuf[readsize-1];
2728
unsigned int olddebug = debug;
2730
unsigned char rsis_reg = t4_framer_in(wc, span, FRMR_RSIS);
2733
if ((rsis != 0xA2) || (rsis != rsis_reg))
2734
debug |= DEBUG_FRAMER;
2738
if ((debug & DEBUG_FRAMER) && !(ts->frames_in & 0x0f))
2739
printk("Received %d frames on span %d\n", ts->frames_in, span);
2740
if (debug & DEBUG_FRAMER) printk("Received HDLC frame %d. RSIS = 0x%x (%x)\n", ts->frames_in, rsis, rsis_reg);
2741
if (!(rsis & FRMR_RSIS_CRC16)) {
2742
if (debug & DEBUG_FRAMER) printk("CRC check failed %d\n", span);
2743
zt_hdlc_abort(sigchan, ZT_EVENT_BADFCS);
2744
} else if (rsis & FRMR_RSIS_RAB) {
2745
if (debug & DEBUG_FRAMER) printk("ABORT of current frame due to overflow %d\n", span);
2746
zt_hdlc_abort(sigchan, ZT_EVENT_ABORT);
2747
} else if (rsis & FRMR_RSIS_RDO) {
2748
if (debug & DEBUG_FRAMER) printk("HDLC overflow occured %d\n", span);
2749
zt_hdlc_abort(sigchan, ZT_EVENT_OVERRUN);
2750
} else if (!(rsis & FRMR_RSIS_VFR)) {
2751
if (debug & DEBUG_FRAMER) printk("Valid Frame check failed on span %d\n", span);
2752
zt_hdlc_abort(sigchan, ZT_EVENT_ABORT);
2754
zt_hdlc_putbuf(sigchan, readbuf, readsize - 1);
2755
zt_hdlc_finish(sigchan);
2756
if (debug & DEBUG_FRAMER) printk("Received valid HDLC frame on span %d\n", span);
2761
} else if (isr0 & FRMR_ISR0_RPF)
2762
zt_hdlc_putbuf(sigchan, readbuf, readsize);
2766
if (isr1 & FRMR_ISR1_XDU) {
2767
if (debug & DEBUG_FRAMER) printk("XDU: Resetting signal controler!\n");
2768
t4_framer_cmd_wait(wc, span, FRMR_CMDR_SRES);
2769
} else if (isr1 & FRMR_ISR1_XPR) {
2770
if (debug & DEBUG_FRAMER)
2771
printk("Sigchan %d is %p\n", sigchan->chanpos, sigchan);
2773
if (debug & DEBUG_FRAMER) printk("Framer %d: Got XPR!\n", sigchan->span->offset);
2774
t4_hdlc_xmit_fifo(wc, span, ts);
2777
if (isr1 & FRMR_ISR1_ALLS) {
2778
if (debug & DEBUG_FRAMER) printk("ALLS received\n");
2784
ZAP_IRQ_HANDLER(t4_interrupt)
2786
struct t4 *wc = dev_id;
2787
unsigned long flags;
2790
unsigned int status;
2791
unsigned int status2;
2794
if (wc->intcount < 20)
2795
printk("Pre-interrupt\n");
2798
/* Make sure it's really for us */
2799
status = __t4_pci_in(wc, WC_INTR);
2801
/* Process framer interrupts */
2802
status2 = t4_framer_in(wc, 0, FRMR_CIS);
2803
if (status2 & 0x0f) {
2804
for (x = 0; x < wc->numspans; ++x) {
2805
if (status2 & (1 << x))
2806
t4_framer_interrupt(wc, x);
2810
/* Ignore if it's not for us */
2818
__t4_pci_out(wc, WC_INTR, 0);
2820
if (!wc->spansstarted) {
2821
printk("Not prepped yet!\n");
2831
if (wc->intcount < 20)
2832
printk("Got interrupt, status = %08x\n", status);
2836
t4_receiveprep(wc, status);
2837
t4_transmitprep(wc, status);
2841
if ((wc->intcount < 10) || !(wc->intcount % 1000)) {
2842
status2 = t4_framer_in(wc, 0, FRMR_CIS);
2843
printk("Status2: %04x\n", status2);
2844
for (x = 0;x<4;x++) {
2845
status2 = t4_framer_in(wc, x, FRMR_FRS0);
2846
printk("FRS0/%d: %04x\n", x, status2);
2852
x = wc->intcount & 15 /* 63 */;
2858
t4_check_sigbits(wc, x);
2864
t4_check_alarms(wc, x - 4);
2868
spin_lock_irqsave(&wc->reglock, flags);
2872
if (test_bit(T4_CHECK_TIMING, &wc->checkflag))
2873
__t4_set_timing_source_auto(wc);
2875
spin_unlock_irqrestore(&wc->reglock, flags);
2877
return IRQ_RETVAL(1);
2882
static void t4_isr_bh(unsigned long data)
2884
struct t4 *wc = (struct t4 *)data;
2888
if (test_and_clear_bit(T4_CHECK_VPM, &wc->checkflag)) {
2890
/* How stupid is it that the octasic can't generate an
2891
interrupt when there's a tone, in spite of what their
2892
documentation says? */
2893
t4_check_vpm450(wc);
2895
t4_check_vpm400(wc, wc->vpm400checkstatus);
2901
ZAP_IRQ_HANDLER(t4_interrupt_gen2)
2903
struct t4 *wc = dev_id;
2904
unsigned int status;
2906
/* Check this first in case we get a spurious interrupt */
2907
if (unlikely(test_bit(T4_STOP_DMA, &wc->checkflag))) {
2908
/* Stop DMA cleanly if requested */
2910
t4_pci_out(wc, WC_DMACTRL, 0x00000000);
2911
/* Acknowledge any pending interrupts */
2912
t4_pci_out(wc, WC_INTR, 0x00000000);
2913
spin_lock(&wc->reglock);
2914
__t4_set_timing_source(wc, 4, 0, 0);
2915
spin_unlock(&wc->reglock);
2916
return IRQ_RETVAL(1);
2919
/* Make sure it's really for us */
2920
status = __t4_pci_in(wc, WC_INTR);
2922
/* Ignore if it's not for us */
2923
if (!(status & 0x7)) {
2931
#ifdef ENABLE_WORKQUEUES
2932
__t4_pci_out(wc, WC_INTR, status & 0x00000008);
2935
if (unlikely(!wc->spansstarted)) {
2936
printk("Not prepped yet!\n");
2946
if (unlikely((wc->intcount < 20) && debug))
2948
printk("2G: Got interrupt, status = %08x, CIS = %04x\n", status, t4_framer_in(wc, 0, FRMR_CIS));
2950
if (likely(status & 0x2)) {
2951
#ifdef ENABLE_WORKQUEUES
2952
int cpus = num_online_cpus();
2953
atomic_set(&wc->worklist, wc->numspans);
2954
if (wc->tspans[0]->span.flags & ZT_FLAG_RUNNING)
2955
t4_queue_work(wc->workq, &wc->tspans[0]->swork, 0);
2957
atomic_dec(&wc->worklist);
2958
if (wc->tspans[1]->span.flags & ZT_FLAG_RUNNING)
2959
t4_queue_work(wc->workq, &wc->tspans[1]->swork, 1 % cpus);
2961
atomic_dec(&wc->worklist);
2962
if (wc->numspans == 4) {
2963
if (wc->tspans[2]->span.flags & ZT_FLAG_RUNNING)
2964
t4_queue_work(wc->workq, &wc->tspans[2]->swork, 2 % cpus);
2966
atomic_dec(&wc->worklist);
2967
if (wc->tspans[3]->span.flags & ZT_FLAG_RUNNING)
2968
t4_queue_work(wc->workq, &wc->tspans[3]->swork, 3 % cpus);
2970
atomic_dec(&wc->worklist);
2976
spin_lock(&wc->reglock);
2978
spin_unlock(&wc->reglock);
2982
if (unlikely(status & 0x1)) {
2985
cis = t4_framer_in(wc, 0, FRMR_CIS);
2986
if (cis & FRMR_CIS_GIS1)
2987
t4_framer_interrupt(wc, 0);
2988
if (cis & FRMR_CIS_GIS2)
2989
t4_framer_interrupt(wc, 1);
2990
if (cis & FRMR_CIS_GIS3)
2991
t4_framer_interrupt(wc, 2);
2992
if (cis & FRMR_CIS_GIS4)
2993
t4_framer_interrupt(wc, 3);
2996
if (wc->vpm && vpmdtmfsupport) {
2998
/* How stupid is it that the octasic can't generate an
2999
interrupt when there's a tone, in spite of what their
3000
documentation says? */
3001
if (!(wc->intcount & 0xf)) {
3002
set_bit(T4_CHECK_VPM, &wc->checkflag);
3004
} else if ((status & 0xff00) != 0xff00) {
3005
wc->vpm400checkstatus = (status & 0xff00) >> 8;
3006
set_bit(T4_CHECK_VPM, &wc->checkflag);
3010
spin_lock(&wc->reglock);
3012
if (unlikely(test_bit(T4_CHECK_TIMING, &wc->checkflag))) {
3013
__t4_set_timing_source_auto(wc);
3016
spin_unlock(&wc->reglock);
3018
if (unlikely(test_bit(T4_CHECK_VPM, &wc->checkflag)))
3019
tasklet_schedule(&wc->t4_tlet);
3021
#ifndef ENABLE_WORKQUEUES
3022
__t4_pci_out(wc, WC_INTR, 0);
3025
return IRQ_RETVAL(1);
3030
static int t4_reset_dma(struct t4 *wc)
3032
/* Turn off DMA and such */
3034
t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
3035
t4_pci_out(wc, WC_COUNT, 0);
3036
t4_pci_out(wc, WC_RDADDR, 0);
3037
t4_pci_out(wc, WC_WRADDR, 0);
3038
t4_pci_out(wc, WC_INTR, 0);
3039
/* Turn it all back on */
3040
t4_pci_out(wc, WC_RDADDR, wc->readdma);
3041
t4_pci_out(wc, WC_WRADDR, wc->writedma);
3042
t4_pci_out(wc, WC_COUNT, ((ZT_MAX_CHUNKSIZE * 2 * 32 - 1) << 18) | ((ZT_MAX_CHUNKSIZE * 2 * 32 - 1) << 2));
3043
t4_pci_out(wc, WC_INTR, 0);
3045
wc->dmactrl = 0xc0000000 | (1 << 29) | wc->vpm;
3047
wc->dmactrl = 0xc0000000 | (1 << 29);
3050
wc->dmactrl |= (1 << 26);
3051
t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
3057
static void t4_vpm_set_dtmf_threshold(struct t4 *wc, unsigned int threshold)
3061
for (x = 0; x < 8; x++) {
3062
t4_vpm_out(wc, x, 0xC4, (threshold >> 8) & 0xFF);
3063
t4_vpm_out(wc, x, 0xC5, (threshold & 0xFF));
3065
printk("VPM: DTMF threshold set to %d\n", threshold);
3068
static unsigned int t4_vpm_mask(int chip)
3070
unsigned int mask=0;
3073
mask = 0x55555555 << (chip >> 2);
3076
mask = 0x11111111 << (chip >> 1);
3079
mask = 0x01010101 << chip;
3085
static int t4_vpm_spanno(int chip)
3090
spanno = chip & 0x3;
3093
spanno = chip & 0x1;
3095
/* Case 1 is implicit */
3100
static int t4_vpm_echotail(void)
3102
int echotail = 0x01ff;
3110
/* Case 1 is implicit */
3115
static void t4_vpm450_init(struct t4 *wc)
3117
unsigned int check1, check2;
3118
int laws[4] = { 0, };
3120
unsigned int vpm_capacity;
3121
struct firmware embedded_firmware;
3122
const struct firmware *firmware = &embedded_firmware;
3123
#if !defined(HOTPLUG_FIRMWARE)
3124
extern void _binary_zaptel_fw_oct6114_064_bin_size;
3125
extern void _binary_zaptel_fw_oct6114_128_bin_size;
3126
extern u8 _binary_zaptel_fw_oct6114_064_bin_start[];
3127
extern u8 _binary_zaptel_fw_oct6114_128_bin_start[];
3129
static const char oct064_firmware[] = "zaptel-fw-oct6114-064.bin";
3130
static const char oct128_firmware[] = "zaptel-fw-oct6114-128.bin";
3134
printk("VPM450: Support Disabled\n");
3138
/* Turn on GPIO/DATA mux if supported */
3139
t4_gpio_setdir(wc, (1 << 24), (1 << 24));
3140
__t4_raw_oct_out(wc, 0x000a, 0x5678);
3141
__t4_raw_oct_out(wc, 0x0004, 0x1234);
3142
check1 = __t4_raw_oct_in(wc, 0x0004);
3143
check2 = __t4_raw_oct_in(wc, 0x000a);
3145
printk("OCT Result: %04x/%04x\n", __t4_raw_oct_in(wc, 0x0004), __t4_raw_oct_in(wc, 0x000a));
3146
if (__t4_raw_oct_in(wc, 0x0004) != 0x1234) {
3147
printk("VPM450: Not Present\n");
3151
/* Setup alaw vs ulaw rules */
3152
for (x = 0;x < wc->numspans; x++) {
3153
if (wc->tspans[x]->span.channels > 24)
3157
switch ((vpm_capacity = get_vpm450m_capacity(wc))) {
3159
#if defined(HOTPLUG_FIRMWARE)
3160
if ((request_firmware(&firmware, oct064_firmware, &wc->dev->dev) != 0) ||
3162
printk("VPM450: firmware %s not available from userspace\n", oct064_firmware);
3166
embedded_firmware.data = _binary_zaptel_fw_oct6114_064_bin_start;
3167
/* Yes... this is weird. objcopy gives us a symbol containing
3168
the size of the firmware, not a pointer a variable containing
3169
the size. The only way we can get the value of the symbol
3170
is to take its address, so we define it as a pointer and
3171
then cast that value to the proper type.
3173
embedded_firmware.size = (size_t) &_binary_zaptel_fw_oct6114_064_bin_size;
3177
#if defined(HOTPLUG_FIRMWARE)
3178
if ((request_firmware(&firmware, oct128_firmware, &wc->dev->dev) != 0) ||
3180
printk("VPM450: firmware %s not available from userspace\n", oct128_firmware);
3184
embedded_firmware.data = _binary_zaptel_fw_oct6114_128_bin_start;
3185
/* Yes... this is weird. objcopy gives us a symbol containing
3186
the size of the firmware, not a pointer a variable containing
3187
the size. The only way we can get the value of the symbol
3188
is to take its address, so we define it as a pointer and
3189
then cast that value to the proper type.
3191
embedded_firmware.size = (size_t) &_binary_zaptel_fw_oct6114_128_bin_size;
3195
printk("Unsupported channel capacity found on VPM module (%d).\n", vpm_capacity);
3199
set_bit(T4_LOADING_FW, &wc->checkflag);
3200
if (!(wc->vpm450m = init_vpm450m(wc, laws, wc->numspans, firmware))) {
3201
printk("VPM450: Failed to initialize\n");
3202
if (firmware != &embedded_firmware)
3203
release_firmware(firmware);
3204
clear_bit(T4_LOADING_FW, &wc->checkflag);
3207
clear_bit(T4_LOADING_FW, &wc->checkflag);
3209
if (firmware != &embedded_firmware)
3210
release_firmware(firmware);
3212
if (vpmdtmfsupport == -1) {
3213
printk("VPM450: hardware DTMF disabled.\n");
3217
wc->vpm = T4_VPM_PRESENT;
3218
printk("VPM450: Present and operational servicing %d span(s)\n", wc->numspans);
3222
static void t4_vpm400_init(struct t4 *wc)
3227
unsigned int i, x, y, gen2vpm=0;
3230
printk("VPM400: Support Disabled\n");
3240
printk("VPM400: %d is not a valid vpmspans value, using 4\n", vpmspans);
3245
int spanno = t4_vpm_spanno(x);
3246
struct t4_span *ts = wc->tspans[spanno];
3247
int echotail = t4_vpm_echotail();
3249
ver = t4_vpm_in(wc, x, 0x1a0); /* revision */
3250
if ((ver != 0x26) && (ver != 0x33)) {
3251
printk("VPM400: %s\n", x ? "Inoperable" : "Not Present");
3255
if (x && !gen2vpm) {
3256
printk("VPM400: Inconsistent\n");
3259
ts->spanflags |= FLAG_VPM2GEN;
3261
} else if (gen2vpm) {
3262
printk("VPM400: Inconsistent\n");
3269
t4_vpm_out(wc, x, 0x1a8 + y, 0x00); /* GPIO out */
3270
t4_vpm_out(wc, x, 0x1ac + y, 0x00); /* GPIO dir */
3271
t4_vpm_out(wc, x, 0x1b0 + y, 0x00); /* GPIO sel */
3274
/* Setup TDM path - sets fsync and tdm_clk as inputs */
3275
reg = t4_vpm_in(wc, x, 0x1a3); /* misc_con */
3276
t4_vpm_out(wc, x, 0x1a3, reg & ~2);
3278
/* Setup timeslots */
3279
t4_vpm_out(wc, x, 0x02f, 0x20 | (spanno << 3));
3281
/* Setup Echo length (128 taps) */
3282
t4_vpm_out(wc, x, 0x022, (echotail >> 8));
3283
t4_vpm_out(wc, x, 0x023, (echotail & 0xff));
3285
/* Setup the tdm channel masks for all chips*/
3286
mask = t4_vpm_mask(x);
3287
for (i = 0; i < 4; i++)
3288
t4_vpm_out(wc, x, 0x30 + i, (mask >> (i << 3)) & 0xff);
3290
/* Setup convergence rate */
3291
reg = t4_vpm_in(wc,x,0x20);
3293
if (ts->spantype == TYPE_E1) {
3295
printk("VPM400: Span %d A-law mode\n", spanno);
3299
printk("VPM400: Span %d U-law mode\n", spanno);
3302
t4_vpm_out(wc,x,0x20,(reg | 0x20));
3304
/* Initialize echo cans */
3305
for (i = 0 ; i < MAX_TDM_CHAN; i++) {
3306
if (mask & (0x00000001 << i))
3307
t4_vpm_out(wc,x,i,0x00);
3312
/* Put in bypass mode */
3313
for (i = 0 ; i < MAX_TDM_CHAN ; i++) {
3314
if (mask & (0x00000001 << i)) {
3315
t4_vpm_out(wc,x,i,0x01);
3320
for (i = 0 ; i < MAX_TDM_CHAN ; i++) {
3321
if (mask & (0x00000001 << i))
3322
t4_vpm_out(wc,x,0x78 + i,0x01);
3325
/* set DTMF detection threshold */
3326
t4_vpm_set_dtmf_threshold(wc, dtmfthreshold);
3328
/* Enable DTMF detectors (always DTMF detect all spans) */
3329
for (i = 0; i < MAX_DTMF_DET; i++) {
3330
t4_vpm_out(wc, x, 0x98 + i, 0x40 | (i * 2) | ((x < 4) ? 0 : 1));
3332
for (i = 0x34; i < 0x38; i++)
3333
t4_vpm_out(wc, x, i, 0x00);
3334
for (i = 0x3C; i < 0x40; i++)
3335
t4_vpm_out(wc, x, i, 0x00);
3337
for (i = 0x48; i < 0x4B; i++)
3338
t4_vpm_out(wc, x, i, 0x00);
3339
for (i = 0x50; i < 0x53; i++)
3340
t4_vpm_out(wc, x, i, 0x00);
3341
for (i = 0xB8; i < 0xBE; i++)
3342
t4_vpm_out(wc, x, i, 0xFF);
3344
for (i = 0xBE; i < 0xC0; i++)
3345
t4_vpm_out(wc, x, i, 0xFF);
3347
for (i = 0xBE; i < 0xC0; i++)
3348
t4_vpm_out(wc, x, i, 0x00);
3350
for (i = 0xC0; i < 0xC4; i++)
3351
t4_vpm_out(wc, x, i, (x < 4) ? 0x55 : 0xAA);
3354
if (vpmdtmfsupport == -1) {
3355
printk("VPM400: hardware DTMF enabled.\n");
3358
printk("VPM400%s: Present and operational servicing %d span(s)\n", (gen2vpm ? " (2nd Gen)" : ""), wc->numspans);
3359
wc->vpm = T4_VPM_PRESENT;
3364
static void t4_tsi_reset(struct t4 *wc)
3367
for (x=0;x<128;x++) {
3368
wc->dmactrl &= ~0x00007fff;
3369
wc->dmactrl |= (0x00004000 | (x << 7));
3370
t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
3372
wc->dmactrl &= ~0x00007fff;
3373
t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
3376
/* Note that channels here start from 1 */
3377
static void t4_tsi_assign(struct t4 *wc, int fromspan, int fromchan, int tospan, int tochan)
3379
unsigned long flags;
3382
fromts = (fromspan << 5) |(fromchan);
3383
tots = (tospan << 5) | (tochan);
3389
spin_lock_irqsave(&wc->reglock, flags);
3390
wc->dmactrl &= ~0x00007fff;
3391
wc->dmactrl |= (0x00004000 | (tots << 7) | (fromts));
3392
__t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
3393
wc->dmactrl &= ~0x00007fff;
3394
__t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
3395
spin_unlock_irqrestore(&wc->reglock, flags);
3398
static void t4_tsi_unassign(struct t4 *wc, int tospan, int tochan)
3400
unsigned long flags;
3403
tots = (tospan << 5) | (tochan);
3407
spin_lock_irqsave(&wc->reglock, flags);
3408
wc->dmactrl &= ~0x00007fff;
3409
wc->dmactrl |= (0x00004000 | (tots << 7));
3410
__t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
3411
if (debug & DEBUG_TSI)
3412
printk("Sending '%08x\n", wc->dmactrl);
3413
wc->dmactrl &= ~0x00007fff;
3414
__t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
3415
spin_unlock_irqrestore(&wc->reglock, flags);
3418
static int t4_hardware_init_1(struct t4 *wc, unsigned int cardflags)
3420
unsigned int version;
3422
version = t4_pci_in(wc, WC_VERSION);
3423
printk("TE%dXXP version %08x, burst %s\n", wc->numspans, version, (!(cardflags & FLAG_BURST) && noburst) ? "OFF" : "ON");
3424
#ifdef ENABLE_WORKQUEUES
3425
printk("TE%dXXP running with work queues.\n", wc->numspans);
3428
/* Make sure DMA engine is not running and interrupts are acknowledged */
3430
t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
3431
/* Reset Framer and friends */
3432
t4_pci_out(wc, WC_LEDS, 0x00000000);
3434
/* Set DMA addresses */
3435
t4_pci_out(wc, WC_RDADDR, wc->readdma);
3436
t4_pci_out(wc, WC_WRADDR, wc->writedma);
3438
/* Setup counters, interrupt flags (ignored in Gen2) */
3439
if (cardflags & FLAG_2NDGEN) {
3442
t4_pci_out(wc, WC_COUNT, ((ZT_MAX_CHUNKSIZE * 2 * 32 - 1) << 18) | ((ZT_MAX_CHUNKSIZE * 2 * 32 - 1) << 2));
3445
/* Reset pending interrupts */
3446
t4_pci_out(wc, WC_INTR, 0x00000000);
3448
/* Read T1/E1 status */
3449
if (t1e1override > -1)
3450
wc->t1e1 = t1e1override;
3452
wc->t1e1 = ((t4_pci_in(wc, WC_LEDS)) & 0x0f00) >> 8;
3453
wc->order = ((t4_pci_in(wc, WC_LEDS)) & 0xf0000000) >> 28;
3454
order_index[wc->order]++;
3458
static int t4_hardware_init_2(struct t4 *wc)
3461
unsigned int falcver;
3463
if (t4_pci_in(wc, WC_VERSION) >= 0xc01a0165) {
3464
wc->tspans[0]->spanflags |= FLAG_OCTOPT;
3465
printk("Octasic optimized!\n");
3467
/* Setup LEDS, take out of reset */
3468
t4_pci_out(wc, WC_LEDS, 0x000000ff);
3471
t4_framer_out(wc, 0, 0x4a, 0xaa);
3472
falcver = t4_framer_in(wc, 0 ,0x4a);
3473
printk("FALC version: %08x, Board ID: %02x\n", falcver, wc->order);
3476
printk("Reg %d: 0x%08x\n", x, t4_pci_in(wc, x));
3480
static int __devinit t4_launch(struct t4 *wc)
3483
unsigned long flags;
3484
if (wc->tspans[0]->span.flags & ZT_FLAG_REGISTERED)
3486
printk("TE%dXXP: Launching card: %d\n", wc->numspans, wc->order);
3488
/* Setup serial parameters and system interface */
3490
t4_serial_setup(wc, x);
3492
if (zt_register(&wc->tspans[0]->span, 0)) {
3493
printk(KERN_ERR "Unable to register span %s\n", wc->tspans[0]->span.name);
3496
if (zt_register(&wc->tspans[1]->span, 0)) {
3497
printk(KERN_ERR "Unable to register span %s\n", wc->tspans[1]->span.name);
3498
zt_unregister(&wc->tspans[0]->span);
3502
if (wc->numspans == 4) {
3503
if (zt_register(&wc->tspans[2]->span, 0)) {
3504
printk(KERN_ERR "Unable to register span %s\n", wc->tspans[2]->span.name);
3505
zt_unregister(&wc->tspans[0]->span);
3506
zt_unregister(&wc->tspans[1]->span);
3509
if (zt_register(&wc->tspans[3]->span, 0)) {
3510
printk(KERN_ERR "Unable to register span %s\n", wc->tspans[3]->span.name);
3511
zt_unregister(&wc->tspans[0]->span);
3512
zt_unregister(&wc->tspans[1]->span);
3513
zt_unregister(&wc->tspans[2]->span);
3517
set_bit(T4_CHECK_TIMING, &wc->checkflag);
3518
spin_lock_irqsave(&wc->reglock, flags);
3519
__t4_set_timing_source(wc,4, 0, 0);
3520
spin_unlock_irqrestore(&wc->reglock, flags);
3521
tasklet_init(&wc->t4_tlet, t4_isr_bh, (unsigned long)wc);
3525
static int __devinit t4_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3534
unsigned int *canary;
3538
if (pci_enable_device(pdev)) {
3541
wc = kmalloc(sizeof(struct t4), GFP_KERNEL);
3543
memset(wc, 0x0, sizeof(struct t4));
3544
spin_lock_init(&wc->reglock);
3545
dt = (struct devtype *)(ent->driver_data);
3546
if (dt->flags & FLAG_2NDGEN)
3547
basesize = ZT_MAX_CHUNKSIZE * 32 * 4;
3549
basesize = ZT_MAX_CHUNKSIZE * 32 * 2 * 4;
3551
if (dt->flags & FLAG_2PORT)
3556
wc->variety = dt->desc;
3558
wc->memaddr = pci_resource_start(pdev, 0);
3559
wc->memlen = pci_resource_len(pdev, 0);
3560
wc->membase = ioremap(wc->memaddr, wc->memlen);
3561
/* This rids of the Double missed interrupt message after loading */
3564
if (!request_mem_region(wc->memaddr, wc->memlen, wc->variety))
3565
printk("wct4: Unable to request memory region :(, using anyway...\n");
3567
if (pci_request_regions(pdev, wc->variety))
3568
printk("wct%dxxp: Unable to request regions\n", wc->numspans);
3570
printk("Found TE%dXXP at base address %08lx, remapped to %p\n", wc->numspans, wc->memaddr, wc->membase);
3575
/* 32 channels, Double-buffer, Read/Write, 4 spans */
3576
(unsigned int *)pci_alloc_consistent(pdev, basesize * 2, &wc->writedma);
3577
if (!wc->writechunk) {
3578
printk("wct%dxxp: Unable to allocate DMA-able memory\n", wc->numspans);
3582
/* Read is after the whole write piece (in words) */
3583
wc->readchunk = wc->writechunk + basesize / 4;
3585
/* Same thing but in bytes... */
3586
wc->readdma = wc->writedma + basesize;
3588
/* Initialize Write/Buffers to all blank data */
3589
memset((void *)wc->writechunk,0x00, basesize);
3590
memset((void *)wc->readchunk,0xff, basesize);
3592
memset((void *)wc->readchunk,0xff,ZT_MAX_CHUNKSIZE * 2 * 32 * 4);
3593
/* Initialize canary */
3594
canary = (unsigned int *)(wc->readchunk + ZT_CHUNKSIZE * 64 * 4 - 4);
3595
*canary = (CANARY << 16) | (0xffff);
3598
/* Enable bus mastering */
3599
pci_set_master(pdev);
3601
/* Keep track of which device we are */
3602
pci_set_drvdata(pdev, wc);
3604
/* Initialize hardware */
3605
t4_hardware_init_1(wc, dt->flags);
3607
for(x = 0; x < MAX_T4_CARDS; x++) {
3608
if (!cards[x]) break;
3611
if (x >= MAX_T4_CARDS) {
3612
printk("No cards[] slot available!!\n");
3620
#ifdef ENABLE_WORKQUEUES
3621
if (dt->flags & FLAG_2NDGEN) {
3623
sprintf(tmp, "te%dxxp[%d]", wc->numspans, wc->num);
3624
wc->workq = create_workqueue(tmp);
3628
/* Allocate pieces we need here */
3630
if (wc->t1e1 & (1 << x)) {
3631
wc->tspans[x] = kmalloc(sizeof(struct t4_span) + sizeof(struct zt_chan) * 31, GFP_KERNEL);
3632
if (wc->tspans[x]) {
3633
memset(wc->tspans[x], 0, sizeof(struct t4_span) + sizeof(struct zt_chan) * 31);
3634
wc->tspans[x]->spantype = TYPE_E1;
3637
wc->tspans[x] = kmalloc(sizeof(struct t4_span) + sizeof(struct zt_chan) * 24, GFP_KERNEL);
3638
if (wc->tspans[x]) {
3639
memset(wc->tspans[x], 0, sizeof(struct t4_span) + sizeof(struct zt_chan) * 24);
3641
wc->tspans[x]->spantype = TYPE_J1;
3643
wc->tspans[x]->spantype = TYPE_T1;
3648
#ifdef ENABLE_WORKQUEUES
3649
INIT_WORK(&wc->tspans[x]->swork, workq_handlespan, wc->tspans[x]);
3651
wc->tspans[x]->spanflags |= dt->flags;
3655
/* Continue hardware intiialization */
3656
t4_hardware_init_2(wc);
3660
if (request_irq(pdev->irq, (dt->flags & FLAG_2NDGEN) ? t4_interrupt_gen2 :t4_interrupt, ZAP_IRQ_SHARED_DISABLED, (wc->numspans == 2) ? "wct2xxp" : "wct4xxp", wc))
3662
if (!(wc->tspans[0]->spanflags & FLAG_2NDGEN)) {
3663
printk("This driver does not support 1st gen modules\n");
3667
if (request_irq(pdev->irq, t4_interrupt_gen2, ZAP_IRQ_SHARED_DISABLED, "t4xxp", wc))
3670
printk("t4xxp: Unable to request IRQ %d\n", pdev->irq);
3677
/* Launch cards as appropriate */
3679
/* Find a card to activate */
3681
for (x=0;cards[x];x++) {
3682
if (cards[x]->order <= highestorder) {
3683
t4_launch(cards[x]);
3684
if (cards[x]->order == highestorder)
3688
/* If we found at least one, increment the highest order and search again, otherwise stop */
3695
printk("Found a Wildcard: %s\n", wc->variety);
3696
wc->gpio = 0x00000000;
3697
t4_pci_out(wc, WC_GPIO, wc->gpio);
3698
t4_gpio_setdir(wc, (1 << 17), (1 << 17));
3699
t4_gpio_setdir(wc, (0xff), (0xff));
3702
for (x=0;x<0x10000;x++) {
3703
__t4_raw_oct_out(wc, 0x0004, x);
3704
__t4_raw_oct_out(wc, 0x000a, x ^ 0xffff);
3705
if (__t4_raw_oct_in(wc, 0x0004) != x)
3706
printk("Register 4 failed %04x\n", x);
3707
if (__t4_raw_oct_in(wc, 0x000a) != (x ^ 0xffff))
3708
printk("Register 10 failed %04x\n", x);
3718
static int t4_hardware_stop(struct t4 *wc)
3721
/* Turn off DMA, leave interrupts enabled */
3722
set_bit(T4_STOP_DMA, &wc->checkflag);
3724
/* Wait for interrupts to stop */
3727
/* Turn off counter, address, etc */
3728
if (wc->tspans[0]->spanflags & FLAG_2NDGEN) {
3731
t4_pci_out(wc, WC_COUNT, 0x000000);
3733
t4_pci_out(wc, WC_RDADDR, 0x0000000);
3734
t4_pci_out(wc, WC_WRADDR, 0x0000000);
3735
wc->gpio = 0x00000000;
3736
t4_pci_out(wc, WC_GPIO, wc->gpio);
3737
t4_pci_out(wc, WC_LEDS, 0x00000000);
3739
printk("\nStopped TE%dXXP, Turned off DMA\n", wc->numspans);
3743
static void __devexit t4_remove_one(struct pci_dev *pdev)
3745
struct t4 *wc = pci_get_drvdata(pdev);
3749
t4_hardware_stop(wc);
3751
/* Release vpm450m */
3753
release_vpm450m(wc->vpm450m);
3755
/* Unregister spans */
3756
if (wc->tspans[0]->span.flags & ZT_FLAG_REGISTERED)
3757
zt_unregister(&wc->tspans[0]->span);
3758
if (wc->tspans[1]->span.flags & ZT_FLAG_REGISTERED)
3759
zt_unregister(&wc->tspans[1]->span);
3760
if (wc->numspans == 4) {
3761
if (wc->tspans[2]->span.flags & ZT_FLAG_REGISTERED)
3762
zt_unregister(&wc->tspans[2]->span);
3763
if (wc->tspans[3]->span.flags & ZT_FLAG_REGISTERED)
3764
zt_unregister(&wc->tspans[3]->span);
3766
#ifdef ENABLE_WORKQUEUES
3768
flush_workqueue(wc->workq);
3769
destroy_workqueue(wc->workq);
3773
free_irq(pdev->irq, wc);
3776
iounmap((void *)wc->membase);
3778
pci_release_regions(pdev);
3780
/* Immediately free resources */
3781
pci_free_consistent(pdev, ZT_MAX_CHUNKSIZE * 2 * 2 * 32 * 4, (void *)wc->writechunk, wc->writedma);
3783
order_index[wc->order]--;
3785
cards[wc->num] = NULL;
3786
pci_set_drvdata(pdev, NULL);
3787
for (x=0;x<wc->numspans;x++) {
3789
kfree(wc->tspans[x]);
3796
static struct pci_device_id t4_pci_tbl[] __devinitdata =
3798
{ 0x10ee, 0x0314, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long)&wct4xxp },
3800
{ 0xd161, 0x0420, 0x0004, PCI_ANY_ID, 0, 0, (unsigned long)&wct420p4 },
3801
{ 0xd161, 0x0410, 0x0004, PCI_ANY_ID, 0, 0, (unsigned long)&wct410p4 },
3802
{ 0xd161, 0x0405, 0x0004, PCI_ANY_ID, 0, 0, (unsigned long)&wct405p4 },
3803
{ 0xd161, 0x0410, 0x0003, PCI_ANY_ID, 0, 0, (unsigned long)&wct410p3 },
3804
{ 0xd161, 0x0405, 0x0003, PCI_ANY_ID, 0, 0, (unsigned long)&wct405p3 },
3805
{ 0xd161, 0x0410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long)&wct410p2 },
3806
{ 0xd161, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long)&wct405p2 },
3808
{ 0xd161, 0x0220, 0x0004, PCI_ANY_ID, 0, 0, (unsigned long)&wct220p4 },
3809
{ 0xd161, 0x0205, 0x0004, PCI_ANY_ID, 0, 0, (unsigned long)&wct205p4 },
3810
{ 0xd161, 0x0210, 0x0004, PCI_ANY_ID, 0, 0, (unsigned long)&wct210p4 },
3811
{ 0xd161, 0x0205, 0x0003, PCI_ANY_ID, 0, 0, (unsigned long)&wct205p3 },
3812
{ 0xd161, 0x0210, 0x0003, PCI_ANY_ID, 0, 0, (unsigned long)&wct210p3 },
3813
{ 0xd161, 0x0205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long)&wct205 },
3814
{ 0xd161, 0x0210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long)&wct210 },
3818
static struct pci_driver t4_driver = {
3822
remove: __devexit_p(t4_remove_one),
3824
remove: t4_remove_one,
3828
id_table: t4_pci_tbl,
3831
static int __init t4_init(void)
3834
res = zap_pci_module(&t4_driver);
3840
static void __exit t4_cleanup(void)
3842
pci_unregister_driver(&t4_driver);
3846
MODULE_AUTHOR("Mark Spencer");
3847
MODULE_DESCRIPTION("Unified TE4XXP-TE2XXP PCI Driver");
3848
#if defined(MODULE_ALIAS)
3849
MODULE_ALIAS("wct2xxp");
3851
#ifdef MODULE_LICENSE
3852
MODULE_LICENSE("GPL");
3855
module_param(pedanticpci, int, 0600);
3856
module_param(debug, int, 0600);
3857
module_param(loopback, int, 0600);
3858
module_param(noburst, int, 0600);
3859
module_param(timingcable, int, 0600);
3860
module_param(t1e1override, int, 0600);
3861
module_param(alarmdebounce, int, 0600);
3862
module_param(j1mode, int, 0600);
3863
module_param(sigmode, int, 0600);
3865
module_param(vpmsupport, int, 0600);
3866
module_param(vpmdtmfsupport, int, 0600);
3867
module_param(vpmspans, int, 0600);
3868
module_param(dtmfthreshold, int, 0600);
3871
MODULE_PARM(pedanticpci, "i");
3872
MODULE_PARM(debug, "i");
3873
MODULE_PARM(loopback, "i");
3874
MODULE_PARM(noburst, "i");
3875
MODULE_PARM(hardhdlcmode, "i");
3876
MODULE_PARM(timingcable, "i");
3877
MODULE_PARM(t1e1override, "i");
3878
MODULE_PARM(alarmdebounce, "i");
3879
MODULE_PARM(j1mode, "i");
3880
MODULE_PARM(sigmode, "i");
3882
MODULE_PARM(vpmsupport, "i");
3883
MODULE_PARM(vpmdtmfsupport, "i");
3884
MODULE_PARM(vpmspans, "i");
3885
MODULE_PARM(dtmfthreshold, "i");
3889
MODULE_DEVICE_TABLE(pci, t4_pci_tbl);
3891
module_init(t4_init);
3892
module_exit(t4_cleanup);