2
* Driver for high-speed SCC boards (those with DMA support)
3
* Copyright (C) 1997-2000 Klaus Kudielka
5
* S5SCC/DMA support by Janko Koleznik S52HI
7
* This program is free software; you can redistribute it and/or modify
8
* it under the terms of the GNU General Public License as published by
9
* the Free Software Foundation; either version 2 of the License, or
10
* (at your option) any later version.
12
* This program is distributed in the hope that it will be useful,
13
* but WITHOUT ANY WARRANTY; without even the implied warranty of
14
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
* GNU General Public License for more details.
17
* You should have received a copy of the GNU General Public License
18
* along with this program; if not, write to the Free Software
19
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23
#include <linux/module.h>
24
#include <linux/bitops.h>
25
#include <linux/delay.h>
26
#include <linux/errno.h>
27
#include <linux/if_arp.h>
29
#include <linux/init.h>
30
#include <linux/interrupt.h>
31
#include <linux/ioport.h>
32
#include <linux/kernel.h>
34
#include <linux/netdevice.h>
35
#include <linux/rtnetlink.h>
36
#include <linux/sockios.h>
37
#include <linux/workqueue.h>
38
#include <asm/atomic.h>
42
#include <asm/uaccess.h>
47
/* Number of buffers per channel */
49
#define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
50
#define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
51
#define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
56
#define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
57
0, 8, 1843200, 3686400 }
58
#define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
59
0, 8, 3686400, 7372800 }
60
#define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
61
0, 4, 6144000, 6144000 }
62
#define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
63
0, 8, 4915200, 9830400 }
65
#define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
67
#define TMR_0_HZ 25600 /* Frequency of timer 0 */
75
#define MAX_NUM_DEVS 32
78
/* SCC chips supported */
84
#define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
89
/* 8530 registers relative to card base */
91
#define SCCB_DATA 0x01
93
#define SCCA_DATA 0x03
95
/* 8253/8254 registers relative to card base */
101
/* Additional PI/PI2 registers relative to card base */
102
#define PI_DREQ_MASK 0x04
104
/* Additional PackeTwin registers relative to card base */
105
#define TWIN_INT_REG 0x08
106
#define TWIN_CLR_TMR1 0x09
107
#define TWIN_CLR_TMR2 0x0a
108
#define TWIN_SPARE_1 0x0b
109
#define TWIN_DMA_CFG 0x08
110
#define TWIN_SERIAL_CFG 0x09
111
#define TWIN_DMA_CLR_FF 0x0a
112
#define TWIN_SPARE_2 0x0b
115
/* PackeTwin I/O register values */
118
#define TWIN_SCC_MSK 0x01
119
#define TWIN_TMR1_MSK 0x02
120
#define TWIN_TMR2_MSK 0x04
121
#define TWIN_INT_MSK 0x07
124
#define TWIN_DTRA_ON 0x01
125
#define TWIN_DTRB_ON 0x02
126
#define TWIN_EXTCLKA 0x04
127
#define TWIN_EXTCLKB 0x08
128
#define TWIN_LOOPA_ON 0x10
129
#define TWIN_LOOPB_ON 0x20
133
#define TWIN_DMA_HDX_T1 0x08
134
#define TWIN_DMA_HDX_R1 0x0a
135
#define TWIN_DMA_HDX_T3 0x14
136
#define TWIN_DMA_HDX_R3 0x16
137
#define TWIN_DMA_FDX_T3R1 0x1b
138
#define TWIN_DMA_FDX_T1R3 0x1d
157
#define SIOCGSCCPARAM SIOCDEVPRIVATE
158
#define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
164
int pclk_hz; /* frequency of BRG input (don't change) */
165
int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
166
int nrzi; /* 0 (nrz), 1 (nrzi) */
167
int clocks; /* see dmascc_cfg documentation */
168
int txdelay; /* [1/TMR_0_HZ] */
169
int txtimeout; /* [1/HZ] */
170
int txtail; /* [1/TMR_0_HZ] */
171
int waittime; /* [1/TMR_0_HZ] */
172
int slottime; /* [1/TMR_0_HZ] */
173
int persist; /* 1 ... 256 */
174
int dma; /* -1 (disable), 0, 1, 3 */
175
int txpause; /* [1/TMR_0_HZ] */
176
int rtsoff; /* [1/TMR_0_HZ] */
177
int dcdon; /* [1/TMR_0_HZ] */
178
int dcdoff; /* [1/TMR_0_HZ] */
181
struct scc_hardware {
196
struct net_device *dev;
197
struct scc_info *info;
200
int card_base, scc_cmd, scc_data;
201
int tmr_cnt, tmr_ctrl, tmr_mode;
202
struct scc_param param;
203
char rx_buf[NUM_RX_BUF][BUF_SIZE];
204
int rx_len[NUM_RX_BUF];
206
struct work_struct rx_work;
207
int rx_head, rx_tail, rx_count;
209
char tx_buf[NUM_TX_BUF][BUF_SIZE];
210
int tx_len[NUM_TX_BUF];
212
int tx_head, tx_tail, tx_count;
214
unsigned long tx_start;
216
spinlock_t *register_lock; /* Per scc_info */
217
spinlock_t ring_lock;
223
struct net_device *dev[2];
224
struct scc_priv priv[2];
225
struct scc_info *next;
226
spinlock_t register_lock; /* Per device register lock */
230
/* Function declarations */
231
static int setup_adapter(int card_base, int type, int n) __init;
233
static void write_scc(struct scc_priv *priv, int reg, int val);
234
static void write_scc_data(struct scc_priv *priv, int val, int fast);
235
static int read_scc(struct scc_priv *priv, int reg);
236
static int read_scc_data(struct scc_priv *priv);
238
static int scc_open(struct net_device *dev);
239
static int scc_close(struct net_device *dev);
240
static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
241
static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
242
static int scc_set_mac_address(struct net_device *dev, void *sa);
244
static inline void tx_on(struct scc_priv *priv);
245
static inline void rx_on(struct scc_priv *priv);
246
static inline void rx_off(struct scc_priv *priv);
247
static void start_timer(struct scc_priv *priv, int t, int r15);
248
static inline unsigned char random(void);
250
static inline void z8530_isr(struct scc_info *info);
251
static irqreturn_t scc_isr(int irq, void *dev_id);
252
static void rx_isr(struct scc_priv *priv);
253
static void special_condition(struct scc_priv *priv, int rc);
254
static void rx_bh(struct work_struct *);
255
static void tx_isr(struct scc_priv *priv);
256
static void es_isr(struct scc_priv *priv);
257
static void tm_isr(struct scc_priv *priv);
260
/* Initialization variables */
262
static int io[MAX_NUM_DEVS] __initdata = { 0, };
264
/* Beware! hw[] is also used in dmascc_exit(). */
265
static struct scc_hardware hw[NUM_TYPES] = HARDWARE;
268
/* Global variables */
270
static struct scc_info *first;
271
static unsigned long rand;
274
MODULE_AUTHOR("Klaus Kudielka");
275
MODULE_DESCRIPTION("Driver for high-speed SCC boards");
276
module_param_array(io, int, NULL, 0);
277
MODULE_LICENSE("GPL");
279
static void __exit dmascc_exit(void)
282
struct scc_info *info;
287
/* Unregister devices */
288
for (i = 0; i < 2; i++)
289
unregister_netdev(info->dev[i]);
292
if (info->priv[0].type == TYPE_TWIN)
293
outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
294
write_scc(&info->priv[0], R9, FHWRES);
295
release_region(info->dev[0]->base_addr,
296
hw[info->priv[0].type].io_size);
298
for (i = 0; i < 2; i++)
299
free_netdev(info->dev[i]);
307
static int __init dmascc_init(void)
310
int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
313
unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
314
counting[MAX_NUM_DEVS];
316
/* Initialize random number generator */
318
/* Cards found = 0 */
320
/* Warning message */
322
printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
324
/* Run autodetection for each card type */
325
for (h = 0; h < NUM_TYPES; h++) {
328
/* User-specified I/O address regions */
329
for (i = 0; i < hw[h].num_devs; i++)
331
for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
333
hw[h].io_region) / hw[h].io_delta;
334
if (j >= 0 && j < hw[h].num_devs &&
336
j * hw[h].io_delta == io[i]) {
341
/* Default I/O address regions */
342
for (i = 0; i < hw[h].num_devs; i++) {
344
hw[h].io_region + i * hw[h].io_delta;
348
/* Check valid I/O address regions */
349
for (i = 0; i < hw[h].num_devs; i++)
352
(base[i], hw[h].io_size, "dmascc"))
356
base[i] + hw[h].tmr_offset +
359
base[i] + hw[h].tmr_offset +
362
base[i] + hw[h].tmr_offset +
368
for (i = 0; i < hw[h].num_devs; i++)
370
/* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
372
outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
374
outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
376
/* Timer 1: LSB+MSB, Mode 0, HZ/10 */
378
outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
379
outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
383
/* Timer 2: LSB+MSB, Mode 0 */
387
/* Wait until counter registers are loaded */
388
udelay(2000000 / TMR_0_HZ);
391
while (jiffies - time < 13) {
392
for (i = 0; i < hw[h].num_devs; i++)
393
if (base[i] && counting[i]) {
394
/* Read back Timer 1: latch; read LSB; read MSB */
397
inb(t1[i]) + (inb(t1[i]) << 8);
398
/* Also check whether counter did wrap */
400
t_val > TMR_0_HZ / HZ * 10)
402
delay[i] = jiffies - start[i];
406
/* Evaluate measurements */
407
for (i = 0; i < hw[h].num_devs; i++)
409
if ((delay[i] >= 9 && delay[i] <= 11) &&
410
/* Ok, we have found an adapter */
411
(setup_adapter(base[i], h, n) == 0))
414
release_region(base[i],
420
/* If any adapter was successfully initialized, return ok */
424
/* If no adapter found, return error */
425
printk(KERN_INFO "dmascc: no adapters found\n");
429
module_init(dmascc_init);
430
module_exit(dmascc_exit);
432
static void __init dev_setup(struct net_device *dev)
434
dev->type = ARPHRD_AX25;
435
dev->hard_header_len = AX25_MAX_HEADER_LEN;
437
dev->addr_len = AX25_ADDR_LEN;
438
dev->tx_queue_len = 64;
439
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
440
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
443
static const struct net_device_ops scc_netdev_ops = {
444
.ndo_open = scc_open,
445
.ndo_stop = scc_close,
446
.ndo_start_xmit = scc_send_packet,
447
.ndo_do_ioctl = scc_ioctl,
448
.ndo_set_mac_address = scc_set_mac_address,
451
static int __init setup_adapter(int card_base, int type, int n)
454
struct scc_info *info;
455
struct net_device *dev;
456
struct scc_priv *priv;
459
int tmr_base = card_base + hw[type].tmr_offset;
460
int scc_base = card_base + hw[type].scc_offset;
461
char *chipnames[] = CHIPNAMES;
463
/* Initialize what is necessary for write_scc and write_scc_data */
464
info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
466
printk(KERN_ERR "dmascc: "
467
"could not allocate memory for %s at %#3x\n",
468
hw[type].name, card_base);
473
info->dev[0] = alloc_netdev(0, "", dev_setup);
475
printk(KERN_ERR "dmascc: "
476
"could not allocate memory for %s at %#3x\n",
477
hw[type].name, card_base);
481
info->dev[1] = alloc_netdev(0, "", dev_setup);
483
printk(KERN_ERR "dmascc: "
484
"could not allocate memory for %s at %#3x\n",
485
hw[type].name, card_base);
488
spin_lock_init(&info->register_lock);
490
priv = &info->priv[0];
492
priv->card_base = card_base;
493
priv->scc_cmd = scc_base + SCCA_CMD;
494
priv->scc_data = scc_base + SCCA_DATA;
495
priv->register_lock = &info->register_lock;
498
write_scc(priv, R9, FHWRES | MIE | NV);
500
/* Determine type of chip by enabling SDLC/HDLC enhancements */
501
write_scc(priv, R15, SHDLCE);
502
if (!read_scc(priv, R15)) {
503
/* WR7' not present. This is an ordinary Z8530 SCC. */
506
/* Put one character in TX FIFO */
507
write_scc_data(priv, 0, 0);
508
if (read_scc(priv, R0) & Tx_BUF_EMP) {
509
/* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
512
/* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
516
write_scc(priv, R15, 0);
518
/* Start IRQ auto-detection */
519
irqs = probe_irq_on();
521
/* Enable interrupts */
522
if (type == TYPE_TWIN) {
523
outb(0, card_base + TWIN_DMA_CFG);
524
inb(card_base + TWIN_CLR_TMR1);
525
inb(card_base + TWIN_CLR_TMR2);
526
info->twin_serial_cfg = TWIN_EI;
527
outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
529
write_scc(priv, R15, CTSIE);
530
write_scc(priv, R0, RES_EXT_INT);
531
write_scc(priv, R1, EXT_INT_ENAB);
535
outb(1, tmr_base + TMR_CNT1);
536
outb(0, tmr_base + TMR_CNT1);
538
/* Wait and detect IRQ */
540
while (jiffies - time < 2 + HZ / TMR_0_HZ);
541
irq = probe_irq_off(irqs);
543
/* Clear pending interrupt, disable interrupts */
544
if (type == TYPE_TWIN) {
545
inb(card_base + TWIN_CLR_TMR1);
547
write_scc(priv, R1, 0);
548
write_scc(priv, R15, 0);
549
write_scc(priv, R0, RES_EXT_INT);
554
"dmascc: could not find irq of %s at %#3x (irq=%d)\n",
555
hw[type].name, card_base, irq);
559
/* Set up data structures */
560
for (i = 0; i < 2; i++) {
562
priv = &info->priv[i];
568
spin_lock_init(&priv->ring_lock);
569
priv->register_lock = &info->register_lock;
570
priv->card_base = card_base;
571
priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
572
priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
573
priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
574
priv->tmr_ctrl = tmr_base + TMR_CTRL;
575
priv->tmr_mode = i ? 0xb0 : 0x70;
576
priv->param.pclk_hz = hw[type].pclk_hz;
577
priv->param.brg_tc = -1;
578
priv->param.clocks = TCTRxCP | RCRTxCP;
579
priv->param.persist = 256;
580
priv->param.dma = -1;
581
INIT_WORK(&priv->rx_work, rx_bh);
583
sprintf(dev->name, "dmascc%i", 2 * n + i);
584
dev->base_addr = card_base;
586
dev->netdev_ops = &scc_netdev_ops;
587
dev->header_ops = &ax25_header_ops;
589
if (register_netdev(info->dev[0])) {
590
printk(KERN_ERR "dmascc: could not register %s\n",
594
if (register_netdev(info->dev[1])) {
595
printk(KERN_ERR "dmascc: could not register %s\n",
603
printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
604
hw[type].name, chipnames[chip], card_base, irq);
608
unregister_netdev(info->dev[0]);
610
if (info->priv[0].type == TYPE_TWIN)
611
outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
612
write_scc(&info->priv[0], R9, FHWRES);
613
free_netdev(info->dev[1]);
615
free_netdev(info->dev[0]);
623
/* Driver functions */
625
static void write_scc(struct scc_priv *priv, int reg, int val)
628
switch (priv->type) {
631
outb(reg, priv->scc_cmd);
632
outb(val, priv->scc_cmd);
636
outb_p(reg, priv->scc_cmd);
637
outb_p(val, priv->scc_cmd);
640
spin_lock_irqsave(priv->register_lock, flags);
641
outb_p(0, priv->card_base + PI_DREQ_MASK);
643
outb_p(reg, priv->scc_cmd);
644
outb_p(val, priv->scc_cmd);
645
outb(1, priv->card_base + PI_DREQ_MASK);
646
spin_unlock_irqrestore(priv->register_lock, flags);
652
static void write_scc_data(struct scc_priv *priv, int val, int fast)
655
switch (priv->type) {
657
outb(val, priv->scc_data);
660
outb_p(val, priv->scc_data);
664
outb_p(val, priv->scc_data);
666
spin_lock_irqsave(priv->register_lock, flags);
667
outb_p(0, priv->card_base + PI_DREQ_MASK);
668
outb_p(val, priv->scc_data);
669
outb(1, priv->card_base + PI_DREQ_MASK);
670
spin_unlock_irqrestore(priv->register_lock, flags);
677
static int read_scc(struct scc_priv *priv, int reg)
681
switch (priv->type) {
684
outb(reg, priv->scc_cmd);
685
return inb(priv->scc_cmd);
688
outb_p(reg, priv->scc_cmd);
689
return inb_p(priv->scc_cmd);
691
spin_lock_irqsave(priv->register_lock, flags);
692
outb_p(0, priv->card_base + PI_DREQ_MASK);
694
outb_p(reg, priv->scc_cmd);
695
rc = inb_p(priv->scc_cmd);
696
outb(1, priv->card_base + PI_DREQ_MASK);
697
spin_unlock_irqrestore(priv->register_lock, flags);
703
static int read_scc_data(struct scc_priv *priv)
707
switch (priv->type) {
709
return inb(priv->scc_data);
711
return inb_p(priv->scc_data);
713
spin_lock_irqsave(priv->register_lock, flags);
714
outb_p(0, priv->card_base + PI_DREQ_MASK);
715
rc = inb_p(priv->scc_data);
716
outb(1, priv->card_base + PI_DREQ_MASK);
717
spin_unlock_irqrestore(priv->register_lock, flags);
723
static int scc_open(struct net_device *dev)
725
struct scc_priv *priv = dev->ml_priv;
726
struct scc_info *info = priv->info;
727
int card_base = priv->card_base;
729
/* Request IRQ if not already used by other channel */
730
if (!info->irq_used) {
731
if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
737
/* Request DMA if required */
738
if (priv->param.dma >= 0) {
739
if (request_dma(priv->param.dma, "dmascc")) {
740
if (--info->irq_used == 0)
741
free_irq(dev->irq, info);
744
unsigned long flags = claim_dma_lock();
745
clear_dma_ff(priv->param.dma);
746
release_dma_lock(flags);
750
/* Initialize local variables */
753
priv->rx_head = priv->rx_tail = priv->rx_count = 0;
755
priv->tx_head = priv->tx_tail = priv->tx_count = 0;
759
write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
760
/* X1 clock, SDLC mode */
761
write_scc(priv, R4, SDLC | X1CLK);
763
write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
764
/* 8 bit RX char, RX disable */
765
write_scc(priv, R3, Rx8);
766
/* 8 bit TX char, TX disable */
767
write_scc(priv, R5, Tx8);
768
/* SDLC address field */
769
write_scc(priv, R6, 0);
771
write_scc(priv, R7, FLAG);
772
switch (priv->chip) {
775
write_scc(priv, R15, SHDLCE);
777
write_scc(priv, R7, AUTOEOM);
778
write_scc(priv, R15, 0);
782
write_scc(priv, R15, SHDLCE);
783
/* The following bits are set (see 2.5.2.1):
784
- Automatic EOM reset
785
- Interrupt request if RX FIFO is half full
786
This bit should be ignored in DMA mode (according to the
787
documentation), but actually isn't. The receiver doesn't work if
788
it is set. Thus, we have to clear it in DMA mode.
789
- Interrupt/DMA request if TX FIFO is completely empty
790
a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
792
b) If cleared, DMA requests may follow each other very quickly,
793
filling up the TX FIFO.
794
Advantage: TX works even in case of high bus latency.
795
Disadvantage: Edge-triggered DMA request circuitry may miss
796
a request. No more data is delivered, resulting
797
in a TX FIFO underrun.
798
Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
799
The PackeTwin doesn't. I don't know about the PI, but let's
800
assume it behaves like the PI2.
802
if (priv->param.dma >= 0) {
803
if (priv->type == TYPE_TWIN)
804
write_scc(priv, R7, AUTOEOM | TXFIFOE);
806
write_scc(priv, R7, AUTOEOM);
808
write_scc(priv, R7, AUTOEOM | RXFIFOH);
810
write_scc(priv, R15, 0);
813
/* Preset CRC, NRZ(I) encoding */
814
write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
816
/* Configure baud rate generator */
817
if (priv->param.brg_tc >= 0) {
818
/* Program BR generator */
819
write_scc(priv, R12, priv->param.brg_tc & 0xFF);
820
write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
821
/* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
822
PackeTwin, not connected on the PI2); set DPLL source to BRG */
823
write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
825
write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
827
/* Disable BR generator */
828
write_scc(priv, R14, DTRREQ | BRSRC);
831
/* Configure clocks */
832
if (priv->type == TYPE_TWIN) {
833
/* Disable external TX clock receiver */
834
outb((info->twin_serial_cfg &=
835
~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
836
card_base + TWIN_SERIAL_CFG);
838
write_scc(priv, R11, priv->param.clocks);
839
if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
840
/* Enable external TX clock receiver */
841
outb((info->twin_serial_cfg |=
842
(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
843
card_base + TWIN_SERIAL_CFG);
846
/* Configure PackeTwin */
847
if (priv->type == TYPE_TWIN) {
848
/* Assert DTR, enable interrupts */
849
outb((info->twin_serial_cfg |= TWIN_EI |
850
(priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
851
card_base + TWIN_SERIAL_CFG);
854
/* Read current status */
855
priv->rr0 = read_scc(priv, R0);
856
/* Enable DCD interrupt */
857
write_scc(priv, R15, DCDIE);
859
netif_start_queue(dev);
865
static int scc_close(struct net_device *dev)
867
struct scc_priv *priv = dev->ml_priv;
868
struct scc_info *info = priv->info;
869
int card_base = priv->card_base;
871
netif_stop_queue(dev);
873
if (priv->type == TYPE_TWIN) {
875
outb((info->twin_serial_cfg &=
876
(priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
877
card_base + TWIN_SERIAL_CFG);
880
/* Reset channel, free DMA and IRQ */
881
write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
882
if (priv->param.dma >= 0) {
883
if (priv->type == TYPE_TWIN)
884
outb(0, card_base + TWIN_DMA_CFG);
885
free_dma(priv->param.dma);
887
if (--info->irq_used == 0)
888
free_irq(dev->irq, info);
894
static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
896
struct scc_priv *priv = dev->ml_priv;
901
(ifr->ifr_data, &priv->param,
902
sizeof(struct scc_param)))
906
if (!capable(CAP_NET_ADMIN))
908
if (netif_running(dev))
911
(&priv->param, ifr->ifr_data,
912
sizeof(struct scc_param)))
921
static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
923
struct scc_priv *priv = dev->ml_priv;
927
/* Temporarily stop the scheduler feeding us packets */
928
netif_stop_queue(dev);
930
/* Transfer data to DMA buffer */
932
skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
933
priv->tx_len[i] = skb->len - 1;
935
/* Clear interrupts while we touch our circular buffers */
937
spin_lock_irqsave(&priv->ring_lock, flags);
938
/* Move the ring buffer's head */
939
priv->tx_head = (i + 1) % NUM_TX_BUF;
942
/* If we just filled up the last buffer, leave queue stopped.
943
The higher layers must wait until we have a DMA buffer
944
to accept the data. */
945
if (priv->tx_count < NUM_TX_BUF)
946
netif_wake_queue(dev);
948
/* Set new TX state */
949
if (priv->state == IDLE) {
950
/* Assert RTS, start timer */
951
priv->state = TX_HEAD;
952
priv->tx_start = jiffies;
953
write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
954
write_scc(priv, R15, 0);
955
start_timer(priv, priv->param.txdelay, 0);
958
/* Turn interrupts back on and free buffer */
959
spin_unlock_irqrestore(&priv->ring_lock, flags);
966
static int scc_set_mac_address(struct net_device *dev, void *sa)
968
memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
974
static inline void tx_on(struct scc_priv *priv)
979
if (priv->param.dma >= 0) {
980
n = (priv->chip == Z85230) ? 3 : 1;
981
/* Program DMA controller */
982
flags = claim_dma_lock();
983
set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
984
set_dma_addr(priv->param.dma,
985
(int) priv->tx_buf[priv->tx_tail] + n);
986
set_dma_count(priv->param.dma,
987
priv->tx_len[priv->tx_tail] - n);
988
release_dma_lock(flags);
989
/* Enable TX underrun interrupt */
990
write_scc(priv, R15, TxUIE);
992
if (priv->type == TYPE_TWIN)
993
outb((priv->param.dma ==
994
1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
995
priv->card_base + TWIN_DMA_CFG);
998
EXT_INT_ENAB | WT_FN_RDYFN |
1000
/* Write first byte(s) */
1001
spin_lock_irqsave(priv->register_lock, flags);
1002
for (i = 0; i < n; i++)
1003
write_scc_data(priv,
1004
priv->tx_buf[priv->tx_tail][i], 1);
1005
enable_dma(priv->param.dma);
1006
spin_unlock_irqrestore(priv->register_lock, flags);
1008
write_scc(priv, R15, TxUIE);
1010
EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1013
/* Reset EOM latch if we do not have the AUTOEOM feature */
1014
if (priv->chip == Z8530)
1015
write_scc(priv, R0, RES_EOM_L);
1019
static inline void rx_on(struct scc_priv *priv)
1021
unsigned long flags;
1024
while (read_scc(priv, R0) & Rx_CH_AV)
1025
read_scc_data(priv);
1027
if (priv->param.dma >= 0) {
1028
/* Program DMA controller */
1029
flags = claim_dma_lock();
1030
set_dma_mode(priv->param.dma, DMA_MODE_READ);
1031
set_dma_addr(priv->param.dma,
1032
(int) priv->rx_buf[priv->rx_head]);
1033
set_dma_count(priv->param.dma, BUF_SIZE);
1034
release_dma_lock(flags);
1035
enable_dma(priv->param.dma);
1036
/* Configure PackeTwin DMA */
1037
if (priv->type == TYPE_TWIN) {
1038
outb((priv->param.dma ==
1039
1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1040
priv->card_base + TWIN_DMA_CFG);
1042
/* Sp. cond. intr. only, ext int enable, RX DMA enable */
1043
write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1044
WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1046
/* Reset current frame */
1048
/* Intr. on all Rx characters and Sp. cond., ext int enable */
1049
write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1052
write_scc(priv, R0, ERR_RES);
1053
write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1057
static inline void rx_off(struct scc_priv *priv)
1059
/* Disable receiver */
1060
write_scc(priv, R3, Rx8);
1061
/* Disable DREQ / RX interrupt */
1062
if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1063
outb(0, priv->card_base + TWIN_DMA_CFG);
1065
write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1067
if (priv->param.dma >= 0)
1068
disable_dma(priv->param.dma);
1072
static void start_timer(struct scc_priv *priv, int t, int r15)
1074
outb(priv->tmr_mode, priv->tmr_ctrl);
1078
outb(t & 0xFF, priv->tmr_cnt);
1079
outb((t >> 8) & 0xFF, priv->tmr_cnt);
1080
if (priv->type != TYPE_TWIN) {
1081
write_scc(priv, R15, r15 | CTSIE);
1088
static inline unsigned char random(void)
1090
/* See "Numerical Recipes in C", second edition, p. 284 */
1091
rand = rand * 1664525L + 1013904223L;
1092
return (unsigned char) (rand >> 24);
1095
static inline void z8530_isr(struct scc_info *info)
1099
while ((is = read_scc(&info->priv[0], R3)) && i--) {
1101
rx_isr(&info->priv[0]);
1102
} else if (is & CHATxIP) {
1103
tx_isr(&info->priv[0]);
1104
} else if (is & CHAEXT) {
1105
es_isr(&info->priv[0]);
1106
} else if (is & CHBRxIP) {
1107
rx_isr(&info->priv[1]);
1108
} else if (is & CHBTxIP) {
1109
tx_isr(&info->priv[1]);
1111
es_isr(&info->priv[1]);
1113
write_scc(&info->priv[0], R0, RES_H_IUS);
1117
printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
1120
/* Ok, no interrupts pending from this 8530. The INT line should
1125
static irqreturn_t scc_isr(int irq, void *dev_id)
1127
struct scc_info *info = dev_id;
1129
spin_lock(info->priv[0].register_lock);
1130
/* At this point interrupts are enabled, and the interrupt under service
1131
is already acknowledged, but masked off.
1133
Interrupt processing: We loop until we know that the IRQ line is
1134
low. If another positive edge occurs afterwards during the ISR,
1135
another interrupt will be triggered by the interrupt controller
1136
as soon as the IRQ level is enabled again (see asm/irq.h).
1138
Bottom-half handlers will be processed after scc_isr(). This is
1139
important, since we only have small ringbuffers and want new data
1140
to be fetched/delivered immediately. */
1142
if (info->priv[0].type == TYPE_TWIN) {
1143
int is, card_base = info->priv[0].card_base;
1144
while ((is = ~inb(card_base + TWIN_INT_REG)) &
1146
if (is & TWIN_SCC_MSK) {
1148
} else if (is & TWIN_TMR1_MSK) {
1149
inb(card_base + TWIN_CLR_TMR1);
1150
tm_isr(&info->priv[0]);
1152
inb(card_base + TWIN_CLR_TMR2);
1153
tm_isr(&info->priv[1]);
1158
spin_unlock(info->priv[0].register_lock);
1163
static void rx_isr(struct scc_priv *priv)
1165
if (priv->param.dma >= 0) {
1166
/* Check special condition and perform error reset. See 2.4.7.5. */
1167
special_condition(priv, read_scc(priv, R1));
1168
write_scc(priv, R0, ERR_RES);
1170
/* Check special condition for each character. Error reset not necessary.
1171
Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1173
while (read_scc(priv, R0) & Rx_CH_AV) {
1174
rc = read_scc(priv, R1);
1175
if (priv->rx_ptr < BUF_SIZE)
1176
priv->rx_buf[priv->rx_head][priv->
1178
read_scc_data(priv);
1181
read_scc_data(priv);
1183
special_condition(priv, rc);
1189
static void special_condition(struct scc_priv *priv, int rc)
1192
unsigned long flags;
1194
/* See Figure 2-15. Only overrun and EOF need to be checked. */
1197
/* Receiver overrun */
1199
if (priv->param.dma < 0)
1200
write_scc(priv, R0, ERR_RES);
1201
} else if (rc & END_FR) {
1202
/* End of frame. Get byte count */
1203
if (priv->param.dma >= 0) {
1204
flags = claim_dma_lock();
1205
cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
1207
release_dma_lock(flags);
1209
cb = priv->rx_ptr - 2;
1211
if (priv->rx_over) {
1212
/* We had an overrun */
1213
priv->dev->stats.rx_errors++;
1214
if (priv->rx_over == 2)
1215
priv->dev->stats.rx_length_errors++;
1217
priv->dev->stats.rx_fifo_errors++;
1219
} else if (rc & CRC_ERR) {
1220
/* Count invalid CRC only if packet length >= minimum */
1222
priv->dev->stats.rx_errors++;
1223
priv->dev->stats.rx_crc_errors++;
1227
if (priv->rx_count < NUM_RX_BUF - 1) {
1228
/* Put good frame in FIFO */
1229
priv->rx_len[priv->rx_head] = cb;
1234
schedule_work(&priv->rx_work);
1236
priv->dev->stats.rx_errors++;
1237
priv->dev->stats.rx_over_errors++;
1241
/* Get ready for new frame */
1242
if (priv->param.dma >= 0) {
1243
flags = claim_dma_lock();
1244
set_dma_addr(priv->param.dma,
1245
(int) priv->rx_buf[priv->rx_head]);
1246
set_dma_count(priv->param.dma, BUF_SIZE);
1247
release_dma_lock(flags);
1255
static void rx_bh(struct work_struct *ugli_api)
1257
struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
1258
int i = priv->rx_tail;
1260
unsigned long flags;
1261
struct sk_buff *skb;
1262
unsigned char *data;
1264
spin_lock_irqsave(&priv->ring_lock, flags);
1265
while (priv->rx_count) {
1266
spin_unlock_irqrestore(&priv->ring_lock, flags);
1267
cb = priv->rx_len[i];
1268
/* Allocate buffer */
1269
skb = dev_alloc_skb(cb + 1);
1272
priv->dev->stats.rx_dropped++;
1275
data = skb_put(skb, cb + 1);
1277
memcpy(&data[1], priv->rx_buf[i], cb);
1278
skb->protocol = ax25_type_trans(skb, priv->dev);
1280
priv->dev->stats.rx_packets++;
1281
priv->dev->stats.rx_bytes += cb;
1283
spin_lock_irqsave(&priv->ring_lock, flags);
1285
priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1288
spin_unlock_irqrestore(&priv->ring_lock, flags);
1292
static void tx_isr(struct scc_priv *priv)
1294
int i = priv->tx_tail, p = priv->tx_ptr;
1296
/* Suspend TX interrupts if we don't want to send anything.
1298
if (p == priv->tx_len[i]) {
1299
write_scc(priv, R0, RES_Tx_P);
1303
/* Write characters */
1304
while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1305
write_scc_data(priv, priv->tx_buf[i][p++], 0);
1308
/* Reset EOM latch of Z8530 */
1309
if (!priv->tx_ptr && p && priv->chip == Z8530)
1310
write_scc(priv, R0, RES_EOM_L);
1316
static void es_isr(struct scc_priv *priv)
1318
int i, rr0, drr0, res;
1319
unsigned long flags;
1321
/* Read status, reset interrupt bit (open latches) */
1322
rr0 = read_scc(priv, R0);
1323
write_scc(priv, R0, RES_EXT_INT);
1324
drr0 = priv->rr0 ^ rr0;
1327
/* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1328
it might have already been cleared again by AUTOEOM. */
1329
if (priv->state == TX_DATA) {
1330
/* Get remaining bytes */
1332
if (priv->param.dma >= 0) {
1333
disable_dma(priv->param.dma);
1334
flags = claim_dma_lock();
1335
res = get_dma_residue(priv->param.dma);
1336
release_dma_lock(flags);
1338
res = priv->tx_len[i] - priv->tx_ptr;
1341
/* Disable DREQ / TX interrupt */
1342
if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1343
outb(0, priv->card_base + TWIN_DMA_CFG);
1345
write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1347
/* Update packet statistics */
1348
priv->dev->stats.tx_errors++;
1349
priv->dev->stats.tx_fifo_errors++;
1350
/* Other underrun interrupts may already be waiting */
1351
write_scc(priv, R0, RES_EXT_INT);
1352
write_scc(priv, R0, RES_EXT_INT);
1354
/* Update packet statistics */
1355
priv->dev->stats.tx_packets++;
1356
priv->dev->stats.tx_bytes += priv->tx_len[i];
1357
/* Remove frame from FIFO */
1358
priv->tx_tail = (i + 1) % NUM_TX_BUF;
1360
/* Inform upper layers */
1361
netif_wake_queue(priv->dev);
1364
write_scc(priv, R15, 0);
1365
if (priv->tx_count &&
1366
(jiffies - priv->tx_start) < priv->param.txtimeout) {
1367
priv->state = TX_PAUSE;
1368
start_timer(priv, priv->param.txpause, 0);
1370
priv->state = TX_TAIL;
1371
start_timer(priv, priv->param.txtail, 0);
1375
/* DCD transition */
1378
switch (priv->state) {
1381
priv->state = DCD_ON;
1382
write_scc(priv, R15, 0);
1383
start_timer(priv, priv->param.dcdon, 0);
1386
switch (priv->state) {
1389
priv->state = DCD_OFF;
1390
write_scc(priv, R15, 0);
1391
start_timer(priv, priv->param.dcdoff, 0);
1396
/* CTS transition */
1397
if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1403
static void tm_isr(struct scc_priv *priv)
1405
switch (priv->state) {
1409
priv->state = TX_DATA;
1412
write_scc(priv, R5, TxCRC_ENAB | Tx8);
1413
priv->state = RTS_OFF;
1414
if (priv->type != TYPE_TWIN)
1415
write_scc(priv, R15, 0);
1416
start_timer(priv, priv->param.rtsoff, 0);
1419
write_scc(priv, R15, DCDIE);
1420
priv->rr0 = read_scc(priv, R0);
1421
if (priv->rr0 & DCD) {
1422
priv->dev->stats.collisions++;
1424
priv->state = RX_ON;
1427
start_timer(priv, priv->param.waittime, DCDIE);
1431
if (priv->tx_count) {
1432
priv->state = TX_HEAD;
1433
priv->tx_start = jiffies;
1435
TxCRC_ENAB | RTS | TxENAB | Tx8);
1436
write_scc(priv, R15, 0);
1437
start_timer(priv, priv->param.txdelay, 0);
1440
if (priv->type != TYPE_TWIN)
1441
write_scc(priv, R15, DCDIE);
1446
write_scc(priv, R15, DCDIE);
1447
priv->rr0 = read_scc(priv, R0);
1448
if (priv->rr0 & DCD) {
1450
priv->state = RX_ON;
1454
random() / priv->param.persist *
1455
priv->param.slottime, DCDIE);