1
/*******************************************************************************
2
This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3
DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
6
Copyright (C) 2007-2009 STMicroelectronics Ltd
8
This program is free software; you can redistribute it and/or modify it
9
under the terms and conditions of the GNU General Public License,
10
version 2, as published by the Free Software Foundation.
12
This program is distributed in the hope it will be useful, but WITHOUT
13
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17
You should have received a copy of the GNU General Public License along with
18
this program; if not, write to the Free Software Foundation, Inc.,
19
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21
The full GNU General Public License is included in this distribution in
22
the file called "COPYING".
24
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25
*******************************************************************************/
27
#include <linux/netdevice.h>
28
#include <linux/crc32.h>
29
#include <linux/mii.h>
30
#include <linux/phy.h>
36
/*#define GMAC_DEBUG*/
37
#undef FRAME_FILTER_DEBUG
38
/*#define FRAME_FILTER_DEBUG*/
40
#define DBG(fmt, args...) printk(fmt, ## args)
42
#define DBG(fmt, args...) do { } while (0)
45
static void gmac_dump_regs(unsigned long ioaddr)
48
pr_info("\t----------------------------------------------\n"
49
"\t GMAC registers (base addr = 0x%8x)\n"
50
"\t----------------------------------------------\n",
51
(unsigned int)ioaddr);
53
for (i = 0; i < 55; i++) {
55
pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
56
offset, readl(ioaddr + offset));
61
static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
63
u32 value = readl(ioaddr + DMA_BUS_MODE);
65
value |= DMA_BUS_MODE_SFT_RESET;
66
writel(value, ioaddr + DMA_BUS_MODE);
67
do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
69
value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
70
((pbl << DMA_BUS_MODE_PBL_SHIFT) |
71
(pbl << DMA_BUS_MODE_RPBL_SHIFT));
73
#ifdef CONFIG_STMMAC_DA
74
value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
76
writel(value, ioaddr + DMA_BUS_MODE);
78
/* Mask interrupts by writing to CSR7 */
79
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
81
/* The base address of the RX/TX descriptor lists must be written into
82
* DMA CSR3 and CSR4, respectively. */
83
writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
84
writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
89
/* Transmit FIFO flush operation */
90
static void gmac_flush_tx_fifo(unsigned long ioaddr)
92
u32 csr6 = readl(ioaddr + DMA_CONTROL);
93
writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
95
do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
98
static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
101
u32 csr6 = readl(ioaddr + DMA_CONTROL);
103
if (txmode == SF_DMA_MODE) {
104
DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n");
105
/* Transmit COE type 2 cannot be done in cut-through mode. */
106
csr6 |= DMA_CONTROL_TSF;
107
/* Operating on second frame increase the performance
108
* especially when transmit store-and-forward is used.*/
109
csr6 |= DMA_CONTROL_OSF;
111
DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
112
" (threshold = %d)\n", txmode);
113
csr6 &= ~DMA_CONTROL_TSF;
114
csr6 &= DMA_CONTROL_TC_TX_MASK;
115
/* Set the transmit threashold */
117
csr6 |= DMA_CONTROL_TTC_32;
118
else if (txmode <= 64)
119
csr6 |= DMA_CONTROL_TTC_64;
120
else if (txmode <= 128)
121
csr6 |= DMA_CONTROL_TTC_128;
122
else if (txmode <= 192)
123
csr6 |= DMA_CONTROL_TTC_192;
125
csr6 |= DMA_CONTROL_TTC_256;
128
if (rxmode == SF_DMA_MODE) {
129
DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n");
130
csr6 |= DMA_CONTROL_RSF;
132
DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
133
" (threshold = %d)\n", rxmode);
134
csr6 &= ~DMA_CONTROL_RSF;
135
csr6 &= DMA_CONTROL_TC_RX_MASK;
137
csr6 |= DMA_CONTROL_RTC_32;
138
else if (rxmode <= 64)
139
csr6 |= DMA_CONTROL_RTC_64;
140
else if (rxmode <= 96)
141
csr6 |= DMA_CONTROL_RTC_96;
143
csr6 |= DMA_CONTROL_RTC_128;
146
writel(csr6, ioaddr + DMA_CONTROL);
150
/* Not yet implemented --- no RMON module */
151
static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
152
unsigned long ioaddr)
157
static void gmac_dump_dma_regs(unsigned long ioaddr)
160
pr_info(" DMA registers\n");
161
for (i = 0; i < 22; i++) {
162
if ((i < 9) || (i > 17)) {
164
pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
165
(DMA_BUS_MODE + offset),
166
readl(ioaddr + DMA_BUS_MODE + offset));
172
static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
173
struct dma_desc *p, unsigned long ioaddr)
176
struct net_device_stats *stats = (struct net_device_stats *)data;
178
if (unlikely(p->des01.etx.error_summary)) {
179
DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
180
if (unlikely(p->des01.etx.jabber_timeout)) {
181
DBG(KERN_ERR "\tjabber_timeout error\n");
185
if (unlikely(p->des01.etx.frame_flushed)) {
186
DBG(KERN_ERR "\tframe_flushed error\n");
187
x->tx_frame_flushed++;
188
gmac_flush_tx_fifo(ioaddr);
191
if (unlikely(p->des01.etx.loss_carrier)) {
192
DBG(KERN_ERR "\tloss_carrier error\n");
194
stats->tx_carrier_errors++;
196
if (unlikely(p->des01.etx.no_carrier)) {
197
DBG(KERN_ERR "\tno_carrier error\n");
199
stats->tx_carrier_errors++;
201
if (unlikely(p->des01.etx.late_collision)) {
202
DBG(KERN_ERR "\tlate_collision error\n");
203
stats->collisions += p->des01.etx.collision_count;
205
if (unlikely(p->des01.etx.excessive_collisions)) {
206
DBG(KERN_ERR "\texcessive_collisions\n");
207
stats->collisions += p->des01.etx.collision_count;
209
if (unlikely(p->des01.etx.excessive_deferral)) {
210
DBG(KERN_INFO "\texcessive tx_deferral\n");
214
if (unlikely(p->des01.etx.underflow_error)) {
215
DBG(KERN_ERR "\tunderflow error\n");
216
gmac_flush_tx_fifo(ioaddr);
220
if (unlikely(p->des01.etx.ip_header_error)) {
221
DBG(KERN_ERR "\tTX IP header csum error\n");
222
x->tx_ip_header_error++;
225
if (unlikely(p->des01.etx.payload_error)) {
226
DBG(KERN_ERR "\tAddr/Payload csum error\n");
227
x->tx_payload_error++;
228
gmac_flush_tx_fifo(ioaddr);
234
if (unlikely(p->des01.etx.deferred)) {
235
DBG(KERN_INFO "GMAC TX status: tx deferred\n");
238
#ifdef STMMAC_VLAN_TAG_USED
239
if (p->des01.etx.vlan_frame) {
240
DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
248
static int gmac_get_tx_len(struct dma_desc *p)
250
return p->des01.etx.buffer1_size;
253
static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
255
int ret = good_frame;
256
u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
258
/* bits 5 7 0 | Frame status
259
* ----------------------------------------------------------
260
* 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects)
261
* 1 0 0 | IPv4/6 No CSUM errorS.
262
* 1 0 1 | IPv4/6 CSUM PAYLOAD error
263
* 1 1 0 | IPv4/6 CSUM IP HR error
264
* 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
265
* 0 0 1 | IPv4/6 unsupported IP PAYLOAD
266
* 0 1 1 | COE bypassed.. no IPv4/6 frame
270
DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
272
} else if (status == 0x4) {
273
DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
275
} else if (status == 0x5) {
276
DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
278
} else if (status == 0x6) {
279
DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
281
} else if (status == 0x7) {
283
"RX Des0 status: IPv4/6 Header and Payload Error.\n");
285
} else if (status == 0x1) {
287
"RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
289
} else if (status == 0x3) {
290
DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
296
static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
299
int ret = good_frame;
300
struct net_device_stats *stats = (struct net_device_stats *)data;
302
if (unlikely(p->des01.erx.error_summary)) {
303
DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
304
if (unlikely(p->des01.erx.descriptor_error)) {
305
DBG(KERN_ERR "\tdescriptor error\n");
307
stats->rx_length_errors++;
309
if (unlikely(p->des01.erx.overflow_error)) {
310
DBG(KERN_ERR "\toverflow error\n");
311
x->rx_gmac_overflow++;
314
if (unlikely(p->des01.erx.ipc_csum_error))
315
DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
317
if (unlikely(p->des01.erx.late_collision)) {
318
DBG(KERN_ERR "\tlate_collision error\n");
322
if (unlikely(p->des01.erx.receive_watchdog)) {
323
DBG(KERN_ERR "\treceive_watchdog error\n");
326
if (unlikely(p->des01.erx.error_gmii)) {
327
DBG(KERN_ERR "\tReceive Error\n");
330
if (unlikely(p->des01.erx.crc_error)) {
331
DBG(KERN_ERR "\tCRC error\n");
333
stats->rx_crc_errors++;
338
/* After a payload csum error, the ES bit is set.
339
* It doesn't match with the information reported into the databook.
340
* At any rate, we need to understand if the CSUM hw computation is ok
341
* and report this info to the upper layers. */
342
ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
343
p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
345
if (unlikely(p->des01.erx.dribbling)) {
346
DBG(KERN_ERR "GMAC RX: dribbling error\n");
349
if (unlikely(p->des01.erx.sa_filter_fail)) {
350
DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
351
x->sa_rx_filter_fail++;
354
if (unlikely(p->des01.erx.da_filter_fail)) {
355
DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
356
x->da_rx_filter_fail++;
359
if (unlikely(p->des01.erx.length_error)) {
360
DBG(KERN_ERR "GMAC RX: length_error error\n");
364
#ifdef STMMAC_VLAN_TAG_USED
365
if (p->des01.erx.vlan_tag) {
366
DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
373
static void gmac_irq_status(unsigned long ioaddr)
375
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
377
/* Not used events (e.g. MMC interrupts) are not handled. */
378
if ((intr_status & mmc_tx_irq))
379
DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
380
readl(ioaddr + GMAC_MMC_TX_INTR));
381
if (unlikely(intr_status & mmc_rx_irq))
382
DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
383
readl(ioaddr + GMAC_MMC_RX_INTR));
384
if (unlikely(intr_status & mmc_rx_csum_offload_irq))
385
DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
386
readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
387
if (unlikely(intr_status & pmt_irq)) {
388
DBG(KERN_DEBUG "GMAC: received Magic frame\n");
389
/* clear the PMT bits 5 and 6 by reading the PMT
390
* status register. */
391
readl(ioaddr + GMAC_PMT);
397
static void gmac_core_init(unsigned long ioaddr)
399
u32 value = readl(ioaddr + GMAC_CONTROL);
400
value |= GMAC_CORE_INIT;
401
writel(value, ioaddr + GMAC_CONTROL);
403
/* STBus Bridge Configuration */
404
/*writel(0xc5608, ioaddr + 0x00007000);*/
406
/* Freeze MMC counters */
407
writel(0x8, ioaddr + GMAC_MMC_CTRL);
408
/* Mask GMAC interrupts */
409
writel(0x207, ioaddr + GMAC_INT_MASK);
411
#ifdef STMMAC_VLAN_TAG_USED
412
/* Tag detection without filtering */
413
writel(0x0, ioaddr + GMAC_VLAN_TAG);
418
static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
421
stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
422
GMAC_ADDR_LOW(reg_n));
425
static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
428
stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
429
GMAC_ADDR_LOW(reg_n));
432
static void gmac_set_filter(struct net_device *dev)
434
unsigned long ioaddr = dev->base_addr;
435
unsigned int value = 0;
437
DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
438
__func__, dev->mc_count, dev->uc_count);
440
if (dev->flags & IFF_PROMISC)
441
value = GMAC_FRAME_FILTER_PR;
442
else if ((dev->mc_count > HASH_TABLE_SIZE)
443
|| (dev->flags & IFF_ALLMULTI)) {
444
value = GMAC_FRAME_FILTER_PM; /* pass all multi */
445
writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
446
writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
447
} else if (dev->mc_count > 0) {
450
struct dev_mc_list *mclist;
452
/* Hash filter for multicast */
453
value = GMAC_FRAME_FILTER_HMC;
455
memset(mc_filter, 0, sizeof(mc_filter));
456
for (i = 0, mclist = dev->mc_list;
457
mclist && i < dev->mc_count; i++, mclist = mclist->next) {
458
/* The upper 6 bits of the calculated CRC are used to
459
index the contens of the hash table */
461
bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
462
/* The most significant bit determines the register to
463
* use (H/L) while the other 5 bits determine the bit
464
* within the register. */
465
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
467
writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
468
writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
471
/* Handle multiple unicast addresses (perfect filtering)*/
472
if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
473
/* Switch to promiscuous mode is more than 16 addrs
475
value |= GMAC_FRAME_FILTER_PR;
478
struct dev_addr_list *uc_ptr = dev->uc_list;
480
for (i = 0; i < dev->uc_count; i++) {
481
gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
484
DBG(KERN_INFO "\t%d "
485
"- Unicast addr %02x:%02x:%02x:%02x:%02x:"
487
uc_ptr->da_addr[0], uc_ptr->da_addr[1],
488
uc_ptr->da_addr[2], uc_ptr->da_addr[3],
489
uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
490
uc_ptr = uc_ptr->next;
494
#ifdef FRAME_FILTER_DEBUG
495
/* Enable Receive all mode (to debug filtering_fail errors) */
496
value |= GMAC_FRAME_FILTER_RA;
498
writel(value, ioaddr + GMAC_FRAME_FILTER);
500
DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
501
"HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
502
readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
507
static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
508
unsigned int fc, unsigned int pause_time)
510
unsigned int flow = 0;
512
DBG(KERN_DEBUG "GMAC Flow-Control:\n");
514
DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
515
flow |= GMAC_FLOW_CTRL_RFE;
518
DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
519
flow |= GMAC_FLOW_CTRL_TFE;
523
DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
524
flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
527
writel(flow, ioaddr + GMAC_FLOW_CTRL);
531
static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
533
unsigned int pmt = 0;
535
if (mode == WAKE_MAGIC) {
536
DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
537
pmt |= power_down | magic_pkt_en;
538
} else if (mode == WAKE_UCAST) {
539
DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
540
pmt |= global_unicast;
543
writel(pmt, ioaddr + GMAC_PMT);
547
static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
551
for (i = 0; i < ring_size; i++) {
552
p->des01.erx.own = 1;
553
p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
554
/* To support jumbo frames */
555
p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
556
if (i == ring_size - 1)
557
p->des01.erx.end_ring = 1;
559
p->des01.erx.disable_ic = 1;
565
static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
569
for (i = 0; i < ring_size; i++) {
570
p->des01.etx.own = 0;
571
if (i == ring_size - 1)
572
p->des01.etx.end_ring = 1;
579
static int gmac_get_tx_owner(struct dma_desc *p)
581
return p->des01.etx.own;
584
static int gmac_get_rx_owner(struct dma_desc *p)
586
return p->des01.erx.own;
589
static void gmac_set_tx_owner(struct dma_desc *p)
591
p->des01.etx.own = 1;
594
static void gmac_set_rx_owner(struct dma_desc *p)
596
p->des01.erx.own = 1;
599
static int gmac_get_tx_ls(struct dma_desc *p)
601
return p->des01.etx.last_segment;
604
static void gmac_release_tx_desc(struct dma_desc *p)
606
int ter = p->des01.etx.end_ring;
608
memset(p, 0, sizeof(struct dma_desc));
609
p->des01.etx.end_ring = ter;
614
static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
617
p->des01.etx.first_segment = is_fs;
618
if (unlikely(len > BUF_SIZE_4KiB)) {
619
p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
620
p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
622
p->des01.etx.buffer1_size = len;
624
if (likely(csum_flag))
625
p->des01.etx.checksum_insertion = cic_full;
628
static void gmac_clear_tx_ic(struct dma_desc *p)
630
p->des01.etx.interrupt = 0;
633
static void gmac_close_tx_desc(struct dma_desc *p)
635
p->des01.etx.last_segment = 1;
636
p->des01.etx.interrupt = 1;
639
static int gmac_get_rx_frame_len(struct dma_desc *p)
641
return p->des01.erx.frame_length;
644
struct stmmac_ops gmac_driver = {
645
.core_init = gmac_core_init,
646
.dump_mac_regs = gmac_dump_regs,
647
.dma_init = gmac_dma_init,
648
.dump_dma_regs = gmac_dump_dma_regs,
649
.dma_mode = gmac_dma_operation_mode,
650
.dma_diagnostic_fr = gmac_dma_diagnostic_fr,
651
.tx_status = gmac_get_tx_frame_status,
652
.rx_status = gmac_get_rx_frame_status,
653
.get_tx_len = gmac_get_tx_len,
654
.set_filter = gmac_set_filter,
655
.flow_ctrl = gmac_flow_ctrl,
657
.init_rx_desc = gmac_init_rx_desc,
658
.init_tx_desc = gmac_init_tx_desc,
659
.get_tx_owner = gmac_get_tx_owner,
660
.get_rx_owner = gmac_get_rx_owner,
661
.release_tx_desc = gmac_release_tx_desc,
662
.prepare_tx_desc = gmac_prepare_tx_desc,
663
.clear_tx_ic = gmac_clear_tx_ic,
664
.close_tx_desc = gmac_close_tx_desc,
665
.get_tx_ls = gmac_get_tx_ls,
666
.set_tx_owner = gmac_set_tx_owner,
667
.set_rx_owner = gmac_set_rx_owner,
668
.get_rx_frame_len = gmac_get_rx_frame_len,
669
.host_irq_status = gmac_irq_status,
670
.set_umac_addr = gmac_set_umac_addr,
671
.get_umac_addr = gmac_get_umac_addr,
674
struct mac_device_info *gmac_setup(unsigned long ioaddr)
676
struct mac_device_info *mac;
677
u32 uid = readl(ioaddr + GMAC_VERSION);
679
pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
680
((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
682
mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
684
mac->ops = &gmac_driver;
685
mac->hw.pmt = PMT_SUPPORTED;
686
mac->hw.link.port = GMAC_CONTROL_PS;
687
mac->hw.link.duplex = GMAC_CONTROL_DM;
688
mac->hw.link.speed = GMAC_CONTROL_FES;
689
mac->hw.mii.addr = GMAC_MII_ADDR;
690
mac->hw.mii.data = GMAC_MII_DATA;