1
Running: /home/sarnold/bin/audit-code.sh -c C ./
3
== subprocess_spawned() ==
4
./examples/performance-thread/common/lthread.c:158:static void _lthread_exec(void *arg)
5
./examples/ip_pipeline/config_parse.c:2691: status = system(buffer);
6
./app/test/process.h:94: if (execv("/proc/" self "/" exe, argv_cpy) < 0)
7
./app/test/test_kni.c:169: if (system(IFCONFIG TEST_KNI_PORT" up") == -1)
8
./app/test/test_kni.c:171: if (system(IFCONFIG TEST_KNI_PORT" mtu 1400") == -1)
9
./app/test/test_kni.c:173: if (system(IFCONFIG TEST_KNI_PORT" down") == -1)
10
./app/test/test_kni.c:341: if (system(IFCONFIG TEST_KNI_PORT " mtu" TEST_KNI_MTU_STR)
11
./app/test/test_kni.c:346: if (system(IFCONFIG TEST_KNI_PORT " mtu" TEST_KNI_MTU_STR)
12
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:1792: * active by the system (IFF_UP). At this point all resources needed
13
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:3214: * active by the system (IFF_UP). At this point all resources needed
14
./lib/librte_eal/common/eal_private.h:41: * Initialize the memzone subsystem (private to eal).
16
== memory_management() ==
17
./examples/quota_watermark/qw/main.c:88: mbuf = rte_pktmbuf_alloc(mbuf_pool);
18
./examples/cmdline/commands.c:197: o = malloc(sizeof(*o));
19
./examples/kni/main.c:463: rte_zmalloc("KNI_port_params",
20
./examples/kni/main.c:713: memcpy(&conf, &port_conf, sizeof(conf));
21
./examples/kni/main.c:766:kni_alloc(uint8_t port_id)
22
./examples/kni/main.c:811: kni = rte_kni_alloc(pktmbuf_pool, &conf, &ops);
23
./examples/kni/main.c:813: kni = rte_kni_alloc(pktmbuf_pool, &conf, NULL);
24
./examples/kni/main.c:901: kni_alloc(port);
25
./examples/l3fwd-acl/main.c:1051: acl_rules = calloc(acl_num, rule_size);
26
./examples/l3fwd-acl/main.c:1057: route_rules = calloc(route_num, rule_size);
27
./examples/l3fwd-acl/main.c:1186: memcpy(&acl_build_param.defs, ipv6 ? ipv6_defs : ipv4_defs,
28
./examples/bond/main.c:440: created_pkt = rte_pktmbuf_alloc(mbuf_pool);
29
./examples/tep_termination/vxlan.c:215: pneth = rte_memcpy(pneth, &app_l2_hdr[vport_id],
30
./examples/tep_termination/vxlan.c:219: ip = rte_memcpy(ip, &app_ip_hdr[vport_id],
31
./examples/tep_termination/main.c:856: ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
32
./examples/tep_termination/main.c:883: malloc(sizeof(struct lcore_ll_info));
33
./examples/tep_termination/main.c:1014: vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
34
./examples/l2fwd-crypto/main.c:531: (uint8_t *)rte_malloc(NULL, 16, 8);
35
./examples/l2fwd-crypto/main.c:606: ol = rte_pktmbuf_offload_alloc(
36
./examples/multi_process/client_server_mp/mp_server/init.c:179: clients = rte_malloc("client details",
37
./examples/multi_process/client_server_mp/mp_server/main.c:309: cl_rx_buf = calloc(num_clients, sizeof(cl_rx_buf[0]));
38
./examples/multi_process/l2fwd_fork/flib.c:180: lcore_cfg = rte_zmalloc("LCORE_ID_MAP",
39
./examples/multi_process/l2fwd_fork/flib.c:300: if ((core_cfg = rte_zmalloc("core_cfg",
40
./examples/multi_process/l2fwd_fork/main.c:297: port_statistics = rte_zmalloc("port_stat",
41
./examples/multi_process/l2fwd_fork/main.c:306: mapping_id = rte_malloc("mapping_id", sizeof(unsigned) * RTE_MAX_LCORE,
42
./examples/multi_process/l2fwd_fork/main.c:524: memcpy(&lcore_resource[map_id], &lcore_resource[slaveid],
43
./examples/multi_process/l2fwd_fork/main.c:528: memcpy(&lcore_queue_conf[map_id], &lcore_queue_conf[slaveid],
44
./examples/vm_power_manager/channel_manager.c:264: memcpy(&sock_addr.sun_path, info->channel_path,
45
./examples/vm_power_manager/channel_manager.c:410: chan_info = rte_malloc(NULL, sizeof(*chan_info),
46
./examples/vm_power_manager/channel_manager.c:478: chan_info = rte_malloc(NULL, sizeof(*chan_info),
47
./examples/vm_power_manager/channel_manager.c:599: memcpy(info->channels[channel_num].channel_path,
48
./examples/vm_power_manager/channel_manager.c:610: memcpy(info->name, vm_info->name, sizeof(vm_info->name));
49
./examples/vm_power_manager/channel_manager.c:641: new_domain = rte_malloc("virtual_machine_info", sizeof(*new_domain),
50
./examples/vm_power_manager/channel_manager.c:747: global_vircpuinfo = rte_zmalloc(NULL, sizeof(*global_vircpuinfo) *
51
./examples/vm_power_manager/channel_manager.c:753: global_cpumaps = rte_zmalloc(NULL, CHANNEL_CMDS_MAX_CPUS * global_maplen,
52
./examples/vm_power_manager/channel_monitor.c:177: global_events_list = rte_malloc("epoll_events", sizeof(*global_events_list)
53
./examples/vhost/main.c:349: (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
54
./examples/vhost/main.c:350: (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
55
./examples/vhost/main.c:1595: mbuf = __rte_mbuf_raw_alloc(vpool->pool);
56
./examples/vhost/main.c:1657: mbuf = __rte_mbuf_raw_alloc(vpool->pool);
57
./examples/vhost/main.c:1753: rte_memcpy((void *)(uintptr_t)buff_hdr_addr,
58
./examples/vhost/main.c:1842: rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
59
./examples/vhost/main.c:2272: ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
60
./examples/vhost/main.c:2297: lcore_info[lcore].lcore_ll = malloc(sizeof(struct lcore_ll_info));
61
./examples/vhost/main.c:2603: vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
62
./examples/vhost/main.c:2623: vdev->regions_hpa = rte_calloc("vhost hpa region",
63
./examples/vhost/main.c:3061: = __rte_mbuf_raw_alloc(
64
./examples/vmdq/main.c:182: (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
65
./examples/vmdq/main.c:183: (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
66
./examples/l3fwd-power/main.c:597: memcpy(key.ip_dst, ipv6_hdr->dst_addr, IPV6_ADDR_LEN);
67
./examples/l3fwd-power/main.c:598: memcpy(key.ip_src, ipv6_hdr->src_addr, IPV6_ADDR_LEN);
68
./examples/netmap_compat/lib/compat_netmap.c:165: rte_memcpy(NETMAP_BUF(r, r->slot[index].buf_idx), data, length);
69
./examples/netmap_compat/lib/compat_netmap.c:186: rte_memcpy(data, NETMAP_BUF(r, r->slot[index].buf_idx), length);
70
./examples/netmap_compat/lib/compat_netmap.c:561: tx_mbufs[i] = rte_pktmbuf_alloc(pool);
71
./examples/vhost_xen/vhost_monitor.c:141: guest = calloc(1, sizeof(struct xen_guest));
72
./examples/vhost_xen/vhost_monitor.c:257: new_ll_dev = calloc(1, sizeof(struct virtio_net_config_ll));
73
./examples/vhost_xen/vhost_monitor.c:258: virtqueue_rx = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), RTE_CACHE_LINE_SIZE);
74
./examples/vhost_xen/vhost_monitor.c:259: virtqueue_tx = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), RTE_CACHE_LINE_SIZE);
75
./examples/vhost_xen/vhost_monitor.c:282: memcpy(&new_ll_dev->dev.mac_address, &guest->vring[virtio_idx].addr, sizeof(struct ether_addr));
76
./examples/vhost_xen/vhost_monitor.c:285: new_ll_dev->dev.mem = malloc(sizeof(struct virtio_memory) + sizeof(struct virtio_memory_regions) * MAX_XENVIRT_MEMPOOL);
77
./examples/vhost_xen/xenstore_parse.c:239: gref_list = malloc(MAX_GREF_PER_NODE * sizeof(char *));
78
./examples/vhost_xen/xenstore_parse.c:250: gntnode = calloc(1, sizeof(struct xen_gntnode));
79
./examples/vhost_xen/xenstore_parse.c:251: gnt = calloc(gref_num, sizeof(struct xen_gnt));
80
./examples/vhost_xen/xenstore_parse.c:269: memcpy(gnt[i].gref_pfn, addr, pg_sz);
81
./examples/vhost_xen/xenstore_parse.c:340: pfn = calloc(total_pages, (size_t)sizeof(uint32_t));
82
./examples/vhost_xen/xenstore_parse.c:341: pindex = calloc(total_pages, (size_t)sizeof(uint64_t));
83
./examples/vhost_xen/main.c:249: (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
84
./examples/vhost_xen/main.c:250: (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
85
./examples/vhost_xen/main.c:661: rte_memcpy((void *)(uintptr_t)buff_addr, userdata, rte_pktmbuf_data_len(buff));
86
./examples/vhost_xen/main.c:667: rte_memcpy((void *)(uintptr_t)buff_hdr_addr, (const void *)&virtio_hdr, vq->vhost_hlen);
87
./examples/vhost_xen/main.c:859: mbuf = rte_pktmbuf_alloc(mbuf_pool);
88
./examples/vhost_xen/main.c:867: rte_memcpy(rte_pktmbuf_mtod(mbuf, void*),
89
./examples/vhost_xen/main.c:878: rte_memcpy(rte_pktmbuf_mtod_offset(mbuf, void *, VLAN_ETH_HLEN),
90
./examples/vhost_xen/main.c:1175: ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
91
./examples/vhost_xen/main.c:1200: lcore_info[lcore].lcore_ll = malloc(sizeof(struct lcore_ll_info));
92
./examples/ipv4_multicast/main.c:280: if (unlikely ((hdr = rte_pktmbuf_alloc(header_pool)) == NULL))
93
./examples/performance-thread/common/lthread.h:88:struct lthread_stack *_stack_alloc(void);
94
./examples/performance-thread/common/lthread_tls.c:238:void _lthread_tls_alloc(struct lthread *lt)
95
./examples/performance-thread/common/lthread_tls.c:242: tls = _lthread_objcache_alloc((THIS_SCHED)->tls_cache);
96
./examples/performance-thread/common/lthread_tls.c:252: _lthread_objcache_alloc((THIS_SCHED)->per_lthread_cache);
97
./examples/performance-thread/common/lthread.c:142:struct lthread_stack *_stack_alloc(void)
98
./examples/performance-thread/common/lthread.c:146: s = _lthread_objcache_alloc((THIS_SCHED)->stack_cache);
99
./examples/performance-thread/common/lthread.c:236: lt = _lthread_objcache_alloc((THIS_SCHED)->lthread_cache);
100
./examples/performance-thread/common/lthread_tls.h:54:void _lthread_tls_alloc(struct lthread *lt);
101
./examples/performance-thread/common/lthread_pool.h:259:_qnode_alloc(void)
102
./examples/performance-thread/common/lthread_sched.c:401: s = _stack_alloc();
103
./examples/performance-thread/common/lthread_sched.c:407: _lthread_tls_alloc(lt);
104
./examples/performance-thread/common/lthread_objcache.h:114:_lthread_objcache_alloc(struct lthread_objcache *c)
105
./examples/performance-thread/common/lthread_cond.c:104: c = _lthread_objcache_alloc((THIS_SCHED)->cond_cache);
106
./examples/performance-thread/common/lthread_mutex.c:74: m = _lthread_objcache_alloc((THIS_SCHED)->mutex_cache);
107
./examples/performance-thread/common/lthread_api.h:173: * @see _lthread_alloc()
108
./examples/performance-thread/common/lthread_api.h:174: * @see _cond_alloc()
109
./examples/performance-thread/common/lthread_api.h:175: * @see _mutex_alloc()
110
./examples/performance-thread/common/lthread_queue.h:131: stub = _qnode_alloc();
111
./examples/performance-thread/common/lthread_queue.h:189: struct qnode *n = _qnode_alloc();
112
./examples/performance-thread/common/lthread_queue.h:224: struct qnode *n = _qnode_alloc();
113
./examples/distributor/main.c:578: rte_malloc(NULL, sizeof(*p), 0);
114
./examples/dpdk_qat/crypto.c:340: pLocalInstanceHandles = rte_malloc("pLocalInstanceHandles",
115
./examples/dpdk_qat/crypto.c:692: memcpy(qaCoreConf[lcore_id].pPacketIV, &g_crypto_hash_keys.iv,
116
./examples/qos_sched/main.c:137: wt_confs[i]->m_table = rte_malloc("table_wt", sizeof(struct rte_mbuf *)
117
./examples/qos_sched/main.c:152: tx_confs[i]->m_table = rte_malloc("table_tx", sizeof(struct rte_mbuf *)
118
./examples/qos_sched/main.c:195: memcpy(&rx_stats[i], &stats, sizeof(stats));
119
./examples/qos_sched/main.c:202: memcpy(&tx_stats[i], &stats, sizeof(stats));
120
./examples/ip_pipeline/app.h:509: sprintf(name, prefix "%" PRIu32, id); \
121
./examples/ip_pipeline/app.h:862: sprintf(link_name, "LINK%" PRIu32, rxq_link_id);
122
./examples/ip_pipeline/app.h:879: sprintf(link_name, "LINK%" PRIu32, txq_link_id);
123
./examples/ip_pipeline/app.h:895: sprintf(link_name, "LINK%" PRIu32, link_id);
124
./examples/ip_pipeline/thread.c:112: memcpy(&t->regular[i],
125
./examples/ip_pipeline/thread.c:128: memcpy(&t->custom[i],
126
./examples/ip_pipeline/init.c:1473: memcpy(&app->cmds[app->n_cmds],
127
./examples/ip_pipeline/init.c:1512: memcpy(&app->pipeline_type[app->n_pipeline_types++],
128
./examples/ip_pipeline/config_check.c:110: sprintf(name, "RXQ%" PRIu32 ".%" PRIu32,
129
./examples/ip_pipeline/config_check.c:126: sprintf(name, "TXQ%" PRIu32 ".%" PRIu32,
130
./examples/ip_pipeline/cpu_core_map.c:91: map = (struct cpu_core_map *) malloc(map_mem_size);
131
./examples/ip_pipeline/thread_fe.c:91: req = app_msg_alloc(app);
132
./examples/ip_pipeline/thread_fe.c:150: req = app_msg_alloc(app);
133
./examples/ip_pipeline/thread_fe.c:338: memcpy(&app->cmds[app->n_cmds], thread_cmds,
134
./examples/ip_pipeline/config_parse.c:581: entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
135
./examples/ip_pipeline/config_parse.c:1133: entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
136
./examples/ip_pipeline/config_parse.c:1240: entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
137
./examples/ip_pipeline/config_parse.c:1295: entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
138
./examples/ip_pipeline/config_parse.c:1362: entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
139
./examples/ip_pipeline/config_parse.c:1424: entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
140
./examples/ip_pipeline/config_parse.c:1479: entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
141
./examples/ip_pipeline/config_parse.c:1615: entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
142
./examples/ip_pipeline/config_parse.c:1675: entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
143
./examples/ip_pipeline/config_parse.c:1733: entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
144
./examples/ip_pipeline/config_parse.c:1778: entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
145
./examples/ip_pipeline/config_parse.c:1823: entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
146
./examples/ip_pipeline/config_parse.c:1935: section_names = malloc(sect_count * sizeof(char *));
147
./examples/ip_pipeline/config_parse.c:1939: section_names[i] = malloc(CFG_NAME_LEN);
148
./examples/ip_pipeline/config_parse.c:2468: memcpy(app, &app_params_default, sizeof(struct app_params));
149
./examples/ip_pipeline/config_parse.c:2471: memcpy(&app->mempool_params[i],
150
./examples/ip_pipeline/config_parse.c:2476: memcpy(&app->link_params[i],
151
./examples/ip_pipeline/config_parse.c:2481: memcpy(&app->hwq_in_params[i],
152
./examples/ip_pipeline/config_parse.c:2486: memcpy(&app->hwq_out_params[i],
153
./examples/ip_pipeline/config_parse.c:2491: memcpy(&app->swq_params[i],
154
./examples/ip_pipeline/config_parse.c:2496: memcpy(&app->tm_params[i],
155
./examples/ip_pipeline/config_parse.c:2501: memcpy(&app->source_params[i],
156
./examples/ip_pipeline/config_parse.c:2506: memcpy(&app->sink_params[i],
157
./examples/ip_pipeline/config_parse.c:2511: memcpy(&app->msgq_params[i],
158
./examples/ip_pipeline/config_parse.c:2516: memcpy(&app->pipeline_params[i],
159
./examples/ip_pipeline/config_parse.c:2526: char *s = malloc(strlen(filename) + strlen(suffix) + 1);
160
./examples/ip_pipeline/config_parse.c:2531: sprintf(s, "%s%s", filename, suffix);
161
./examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c:170: memcpy(p0, p1, sizeof(*p0));
162
./examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c:524: p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
163
./examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c:529: strcpy(p->name, params->name);
164
./examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c:701: memcpy(p->handlers, handlers, sizeof(p->handlers));
165
./examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c:702: memcpy(p_fa->custom_handlers,
166
./examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c:957: memcpy(&rsp->stats,
167
./examples/ip_pipeline/pipeline/pipeline_firewall.c:197: p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
168
./examples/ip_pipeline/pipeline/pipeline_firewall.c:298: params->keys = rte_malloc(NULL,
169
./examples/ip_pipeline/pipeline/pipeline_firewall.c:306: params->priorities = rte_malloc(NULL,
170
./examples/ip_pipeline/pipeline/pipeline_firewall.c:314: params->port_ids = rte_malloc(NULL,
171
./examples/ip_pipeline/pipeline/pipeline_firewall.c:485: params->keys = rte_malloc(NULL,
172
./examples/ip_pipeline/pipeline/pipeline_firewall.c:660: rule = rte_malloc(NULL, sizeof(*rule), RTE_CACHE_LINE_SIZE);
173
./examples/ip_pipeline/pipeline/pipeline_firewall.c:667: req = app_msg_alloc(app);
174
./examples/ip_pipeline/pipeline/pipeline_firewall.c:676: memcpy(&req->key, key, sizeof(*key));
175
./examples/ip_pipeline/pipeline/pipeline_firewall.c:699: memcpy(&rule->key, key, sizeof(*key));
176
./examples/ip_pipeline/pipeline/pipeline_firewall.c:747: req = app_msg_alloc(app);
177
./examples/ip_pipeline/pipeline/pipeline_firewall.c:753: memcpy(&req->key, key, sizeof(*key));
178
./examples/ip_pipeline/pipeline/pipeline_firewall.c:806: rules = rte_malloc(NULL,
179
./examples/ip_pipeline/pipeline/pipeline_firewall.c:812: new_rules = rte_malloc(NULL,
180
./examples/ip_pipeline/pipeline/pipeline_firewall.c:837: rules[i] = rte_malloc(NULL, sizeof(rules[i]),
181
./examples/ip_pipeline/pipeline/pipeline_firewall.c:854: keys_found = rte_malloc(NULL,
182
./examples/ip_pipeline/pipeline/pipeline_firewall.c:869: entries_ptr = rte_malloc(NULL,
183
./examples/ip_pipeline/pipeline/pipeline_firewall.c:885: entries_ptr[i] = rte_malloc(NULL,
184
./examples/ip_pipeline/pipeline/pipeline_firewall.c:908: req = app_msg_alloc(app);
185
./examples/ip_pipeline/pipeline/pipeline_firewall.c:984: memcpy(&rules[i]->key, &keys[i], sizeof(keys[i]));
186
./examples/ip_pipeline/pipeline/pipeline_firewall.c:1032: rules = rte_malloc(NULL,
187
./examples/ip_pipeline/pipeline/pipeline_firewall.c:1046: keys_found = rte_malloc(NULL,
188
./examples/ip_pipeline/pipeline/pipeline_firewall.c:1055: req = app_msg_alloc(app);
189
./examples/ip_pipeline/pipeline/pipeline_firewall.c:1124: req = app_msg_alloc(app);
190
./examples/ip_pipeline/pipeline/pipeline_firewall.c:1172: req = app_msg_alloc(app);
191
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:97: p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
192
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:112: p->flows = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
193
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:226: req = app_msg_alloc(app);
194
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:237: memcpy(&req->params, params, sizeof(*params));
195
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:256: memcpy(&flow->params.m[i], ¶ms->m[i], sizeof(params->m[i]));
196
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:263: memcpy(&flow->params.p[i], ¶ms->p[i], sizeof(params->p[i]));
197
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:323: req_entry_ptr = (void **) rte_malloc(NULL,
198
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:329: req_flow_id = (uint32_t *) rte_malloc(NULL,
199
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:345: req = app_msg_alloc(app);
200
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:385: memcpy(&flow->params.m[j],
201
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:395: memcpy(&flow->params.p[j],
202
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:442: req = app_msg_alloc(app);
203
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:504: req = app_msg_alloc(app);
204
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:525: memcpy(stats, &rsp->stats, sizeof(*stats));
205
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:843: flow_id = (uint32_t *) rte_malloc(NULL,
206
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:851: flow_params = (struct pipeline_fa_flow_params *) rte_malloc(NULL,
207
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:870: memcpy(&flow_params[pos],
208
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:1143: flow_id = (uint32_t *) rte_malloc(NULL,
209
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:1151: flow_params = (struct pipeline_fa_flow_params *) rte_malloc(NULL,
210
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:1181: memcpy(&flow_params[pos], &flow_template,
211
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:1390: flow_id = (uint32_t *) rte_malloc(NULL,
212
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:1398: flow_params = (struct pipeline_fa_flow_params *) rte_malloc(NULL,
213
./examples/ip_pipeline/pipeline/pipeline_master_be.c:67: p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
214
./examples/ip_pipeline/pipeline/pipeline_routing.c:97: p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
215
./examples/ip_pipeline/pipeline/pipeline_routing.c:339: entry = rte_malloc(NULL, sizeof(*entry), RTE_CACHE_LINE_SIZE);
216
./examples/ip_pipeline/pipeline/pipeline_routing.c:346: req = app_msg_alloc(app);
217
./examples/ip_pipeline/pipeline/pipeline_routing.c:355: memcpy(&req->key, key, sizeof(*key));
218
./examples/ip_pipeline/pipeline/pipeline_routing.c:356: memcpy(&req->data, data, sizeof(*data));
219
./examples/ip_pipeline/pipeline/pipeline_routing.c:376: memcpy(&entry->key, key, sizeof(*key));
220
./examples/ip_pipeline/pipeline/pipeline_routing.c:377: memcpy(&entry->data, data, sizeof(*data));
221
./examples/ip_pipeline/pipeline/pipeline_routing.c:439: req = app_msg_alloc(app);
222
./examples/ip_pipeline/pipeline/pipeline_routing.c:445: memcpy(&req->key, key, sizeof(*key));
223
./examples/ip_pipeline/pipeline/pipeline_routing.c:490: req = app_msg_alloc(app);
224
./examples/ip_pipeline/pipeline/pipeline_routing.c:539: req = app_msg_alloc(app);
225
./examples/ip_pipeline/pipeline/pipeline_routing.c:632: entry = rte_malloc(NULL, sizeof(*entry), RTE_CACHE_LINE_SIZE);
226
./examples/ip_pipeline/pipeline/pipeline_routing.c:639: req = app_msg_alloc(app);
227
./examples/ip_pipeline/pipeline/pipeline_routing.c:648: memcpy(&req->key, key, sizeof(*key));
228
./examples/ip_pipeline/pipeline/pipeline_routing.c:670: memcpy(&entry->key, key, sizeof(*key));
229
./examples/ip_pipeline/pipeline/pipeline_routing.c:729: req = app_msg_alloc(app);
230
./examples/ip_pipeline/pipeline/pipeline_routing.c:735: memcpy(&req->key, key, sizeof(*key));
231
./examples/ip_pipeline/pipeline/pipeline_routing.c:780: req = app_msg_alloc(app);
232
./examples/ip_pipeline/pipeline/pipeline_routing.c:829: req = app_msg_alloc(app);
233
./examples/ip_pipeline/pipeline/pipeline_common_fe.c:70: req = app_msg_alloc(app);
234
./examples/ip_pipeline/pipeline/pipeline_common_fe.c:113: req = app_msg_alloc(app);
235
./examples/ip_pipeline/pipeline/pipeline_common_fe.c:130: memcpy(stats, &rsp->stats, sizeof(rsp->stats));
236
./examples/ip_pipeline/pipeline/pipeline_common_fe.c:161: req = app_msg_alloc(app);
237
./examples/ip_pipeline/pipeline/pipeline_common_fe.c:177: memcpy(stats, &rsp->stats, sizeof(rsp->stats));
238
./examples/ip_pipeline/pipeline/pipeline_common_fe.c:206: req = app_msg_alloc(app);
239
./examples/ip_pipeline/pipeline/pipeline_common_fe.c:222: memcpy(stats, &rsp->stats, sizeof(rsp->stats));
240
./examples/ip_pipeline/pipeline/pipeline_common_fe.c:250: req = app_msg_alloc(app);
241
./examples/ip_pipeline/pipeline/pipeline_common_fe.c:292: req = app_msg_alloc(app);
242
./examples/ip_pipeline/pipeline/pipeline_common_fe.c:1293: memcpy(&app->cmds[app->n_cmds],
243
./examples/ip_pipeline/pipeline/pipeline_common_fe.h:113:app_msg_alloc(__rte_unused struct app_params *app)
244
./examples/ip_pipeline/pipeline/pipeline_common_fe.h:115: return rte_malloc(NULL, 2048, RTE_CACHE_LINE_SIZE);
245
./examples/ip_pipeline/pipeline/pipeline_passthrough_be.c:391: p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
246
./examples/ip_pipeline/pipeline/pipeline_passthrough_be.c:396: strcpy(p->name, params->name);
247
./examples/ip_pipeline/pipeline/pipeline_passthrough_be.c:554: memcpy(p->handlers, handlers, sizeof(p->handlers));
248
./examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c:321: p->key_mask = rte_malloc(NULL, p->key_size, 0);
249
./examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c:357: p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
250
./examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c:362: strcpy(p->name, params->name);
251
./examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c:562: memcpy(p->handlers, handlers, sizeof(p->handlers));
252
./examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c:563: memcpy(p_fc->custom_handlers,
253
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:133: memcpy(&ipv6->ip_src, &key_in->key.ipv6_5tuple.ip_src, 16);
254
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:134: memcpy(&ipv6->ip_dst, &key_in->key.ipv6_5tuple.ip_dst, 16);
255
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:214: p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
256
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:315: flow = rte_malloc(NULL, sizeof(*flow), RTE_CACHE_LINE_SIZE);
257
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:322: req = app_msg_alloc(app);
258
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:352: memcpy(&flow->key, key, sizeof(flow->key));
259
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:414: flow = rte_malloc(NULL,
260
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:420: signature = rte_malloc(NULL,
261
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:428: new_flow = rte_malloc(
262
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:438: flow_req = rte_malloc(NULL,
263
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:448: flow_rsp = rte_malloc(NULL,
264
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:464: flow[i] = rte_zmalloc(NULL,
265
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:486: req = app_msg_alloc(app);
266
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:543: memcpy(&flow[i]->key, &key[i], sizeof(flow[i]->key));
267
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:604: req = app_msg_alloc(app);
268
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:657: req = app_msg_alloc(app);
269
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:706: req = app_msg_alloc(app);
270
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:1046: key = rte_zmalloc(NULL,
271
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:1054: port_id = rte_malloc(NULL,
272
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:1063: flow_id = rte_malloc(NULL,
273
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:1326: key = rte_zmalloc(NULL,
274
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:1334: port_id = rte_malloc(NULL,
275
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:1343: flow_id = rte_malloc(NULL,
276
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:1475: memcpy(key.key.ipv6_5tuple.ip_src,
277
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:1478: memcpy(key.key.ipv6_5tuple.ip_dst,
278
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:1611: key = rte_zmalloc(NULL,
279
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:1619: port_id = rte_malloc(NULL,
280
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:1628: flow_id = rte_malloc(NULL,
281
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:1931: memcpy(key.key.ipv6_5tuple.ip_src,
282
./examples/ip_pipeline/pipeline/pipeline_flow_classification.c:1934: memcpy(key.key.ipv6_5tuple.ip_dst,
283
./examples/ip_pipeline/pipeline/pipeline_routing_be.c:1133: p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
284
./examples/ip_pipeline/pipeline/pipeline_routing_be.c:1138: strcpy(p->name, params->name);
285
./examples/ip_pipeline/pipeline/pipeline_routing_be.c:1321: memcpy(p->handlers, handlers, sizeof(p->handlers));
286
./examples/ip_pipeline/pipeline/pipeline_routing_be.c:1322: memcpy(p_rt->custom_handlers,
287
./examples/ip_pipeline/pipeline/pipeline_firewall_be.c:380: p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
288
./examples/ip_pipeline/pipeline/pipeline_firewall_be.c:385: strcpy(p->name, params->name);
289
./examples/ip_pipeline/pipeline/pipeline_firewall_be.c:479: memcpy(table_acl_params.field_format,
290
./examples/ip_pipeline/pipeline/pipeline_firewall_be.c:534: memcpy(p->handlers, handlers, sizeof(p->handlers));
291
./examples/ip_pipeline/pipeline/pipeline_firewall_be.c:535: memcpy(p_fw->custom_handlers,
292
./examples/ip_pipeline/pipeline/pipeline_firewall_be.c:725: entries[i] = rte_malloc(NULL,
293
./examples/ip_pipeline/pipeline/pipeline_firewall_be.c:733: params[i] = rte_malloc(NULL,
294
./examples/ip_pipeline/pipeline/pipeline_firewall_be.c:807: params[i] = rte_malloc(NULL,
295
./examples/ethtool/ethtool-app/ethapp.c:239: buf_data = malloc(len_regs);
296
./examples/ptpclient/ptpclient.c:357: rte_memcpy(&ptp_data->master_clock_id,
297
./examples/ptpclient/ptpclient.c:411: created_pkt = rte_pktmbuf_alloc(mbuf_pool);
298
./examples/ptpclient/ptpclient.c:446: rte_memcpy(&ptp_data->client_clock_id,
299
./examples/vmdq_dcb/main.c:149: (void)(rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf)));
300
./examples/vmdq_dcb/main.c:150: (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &conf,
301
./examples/exception_path/main.c:274: struct rte_mbuf *m = rte_pktmbuf_alloc(pktmbuf_pool);
302
./drivers/crypto/qat/qat_crypto.c:429: rte_memcpy(cipher_param->u.cipher_IV_array,
303
./drivers/crypto/qat/qat_adf/qat_algs_build_desc.c:145: rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
304
./drivers/crypto/qat/qat_adf/qat_algs_build_desc.c:156: rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
305
./drivers/crypto/qat/qat_adf/qat_algs_build_desc.c:167: rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
306
./drivers/crypto/qat/qat_adf/qat_algs_build_desc.c:251: in = rte_zmalloc("working mem for key",
307
./drivers/crypto/qat/qat_adf/qat_algs_build_desc.c:253: rte_memcpy(in, qat_aes_xcbc_key_seed,
308
./drivers/crypto/qat/qat_adf/qat_algs_build_desc.c:281: in = rte_zmalloc("working mem for key",
309
./drivers/crypto/qat/qat_adf/qat_algs_build_desc.c:307: rte_memcpy(ipad, auth_key, auth_keylen);
310
./drivers/crypto/qat/qat_adf/qat_algs_build_desc.c:308: rte_memcpy(opad, auth_key, auth_keylen);
311
./drivers/crypto/qat/qat_adf/qat_algs_build_desc.c:406: memcpy(cipher->aes.key, cipherkey, cipherkeylen);
312
./drivers/crypto/qat/qat_adf/qat_algs_build_desc.c:544: rte_memcpy(cd->aes.key, key, keylen);
313
./drivers/crypto/qat/qat_qp.c:171: qp = rte_zmalloc("qat PMD qp metadata",
314
./drivers/net/ixgbe/ixgbe_fdir.c:84: rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
315
./drivers/net/ixgbe/ixgbe_fdir.c:105: rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
316
./drivers/net/ixgbe/ixgbe_fdir.c:692: rte_memcpy(input->formatted.src_ip,
317
./drivers/net/ixgbe/ixgbe_fdir.c:695: rte_memcpy(input->formatted.dst_ip,
318
./drivers/net/ixgbe/ixgbe_fdir.c:704: rte_memcpy(
319
./drivers/net/ixgbe/ixgbe_fdir.c:709: rte_memcpy(
320
./drivers/net/ixgbe/ixgbe_rxtx.c:91:rte_rxmbuf_alloc(struct rte_mempool *mp)
321
./drivers/net/ixgbe/ixgbe_rxtx.c:95: m = __rte_mbuf_raw_alloc(mp);
322
./drivers/net/ixgbe/ixgbe_rxtx.c:1251:ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
323
./drivers/net/ixgbe/ixgbe_rxtx.c:1351: nmb = rte_rxmbuf_alloc(rxq->mb_pool);
324
./drivers/net/ixgbe/ixgbe_rxtx.c:1620: nmb = rte_rxmbuf_alloc(rxq->mb_pool);
325
./drivers/net/ixgbe/ixgbe_rxtx.c:1818:ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
326
./drivers/net/ixgbe/ixgbe_rxtx.c:1825:ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
327
./drivers/net/ixgbe/ixgbe_rxtx.c:3583: struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
328
./drivers/net/ixgbe/base/ixgbe_vf.c:231: memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
329
./drivers/net/ixgbe/base/ixgbe_vf.c:353: memcpy(msg_addr, addr, 6);
330
./drivers/net/ixgbe/base/ixgbe_vf.c:501: memcpy(msg_addr, addr, 6);
331
./drivers/net/ixgbe/base/ixgbe_common.c:787: memcpy(pba->pba_block,
332
./drivers/net/ixgbe/base/ixgbe_common.c:845: memcpy(&eeprom_buf[pba->word[1]],
333
./drivers/net/ixgbe/ixgbe_ethdev.c:1102: eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
334
./drivers/net/ixgbe/ixgbe_ethdev.c:1116: eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
335
./drivers/net/ixgbe/ixgbe_ethdev.c:1251: memcpy(&mac_addr->addr_bytes[3], &random, 3);
336
./drivers/net/ixgbe/ixgbe_ethdev.c:1336: eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
337
./drivers/net/ixgbe/ixgbe_ethdev.c:2023: rte_zmalloc("intr_vec",
338
./drivers/net/ixgbe/ixgbe_ethdev.c:3861: rte_zmalloc("intr_vec",
339
./drivers/net/ixgbe/ixgbe_ethdev.c:5280: filter = rte_zmalloc("ixgbe_5tuple_filter",
340
./drivers/net/ixgbe/ixgbe_ethdev.c:5284: (void)rte_memcpy(&filter->filter_info,
341
./drivers/net/ixgbe/ixgbe_pf.c:77: memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
342
./drivers/net/ixgbe/ixgbe_pf.c:114: *vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
343
./drivers/net/ixgbe/ixgbe_pf.c:428: rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN);
344
./drivers/net/ixgbe/ixgbe_pf.c:449: rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
345
./drivers/net/ixgbe/ixgbe_ethdev.h:382:uint16_t ixgbe_recv_pkts_lro_single_alloc(void *rx_queue,
346
./drivers/net/ixgbe/ixgbe_ethdev.h:384:uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,
347
./drivers/net/ixgbe/ixgbe_rxtx_vec.c:488: memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
348
./drivers/net/szedata2/rte_eth_szedata2.c:160: mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
349
./drivers/net/szedata2/rte_eth_szedata2.c:205: rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
350
./drivers/net/szedata2/rte_eth_szedata2.c:211: rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
351
./drivers/net/szedata2/rte_eth_szedata2.c:328: rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
352
./drivers/net/szedata2/rte_eth_szedata2.c:331: rte_memcpy((void *)(rte_pktmbuf_mtod(mbuf,
353
./drivers/net/szedata2/rte_eth_szedata2.c:447: rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
354
./drivers/net/szedata2/rte_eth_szedata2.c:453: rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
355
./drivers/net/szedata2/rte_eth_szedata2.c:560: mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
356
./drivers/net/szedata2/rte_eth_szedata2.c:580: rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
357
./drivers/net/szedata2/rte_eth_szedata2.c:583: rte_memcpy((void *)
358
./drivers/net/szedata2/rte_eth_szedata2.c:598: rte_memcpy(rte_pktmbuf_append(mbuf, len), packet_ptr1,
359
./drivers/net/szedata2/rte_eth_szedata2.c:605: m->next = rte_pktmbuf_alloc(sze_q->mb_pool);
360
./drivers/net/szedata2/rte_eth_szedata2.c:626: rte_memcpy(rte_pktmbuf_append(mbuf, len),
361
./drivers/net/szedata2/rte_eth_szedata2.c:638: rte_memcpy(rte_pktmbuf_append(mbuf, len),
362
./drivers/net/szedata2/rte_eth_szedata2.c:645: m->next = rte_pktmbuf_alloc(
363
./drivers/net/szedata2/rte_eth_szedata2.c:669: rte_memcpy(
364
./drivers/net/szedata2/rte_eth_szedata2.c:764: rte_memcpy(tmp_dst,
365
./drivers/net/szedata2/rte_eth_szedata2.c:771: rte_memcpy(tmp_dst,
366
./drivers/net/szedata2/rte_eth_szedata2.c:825: rte_memcpy(tmp_dst,
367
./drivers/net/szedata2/rte_eth_szedata2.c:833: rte_memcpy(dst,
368
./drivers/net/szedata2/rte_eth_szedata2.c:849: rte_memcpy(tmp_dst,
369
./drivers/net/szedata2/rte_eth_szedata2.c:875: rte_memcpy(tmp_dst, (const void *)
370
./drivers/net/mpipe/mpipe_tilegx.c:239: memcpy(&context->channels[channel], config, sizeof(*config));
371
./drivers/net/mpipe/mpipe_tilegx.c:503: mbuf = __rte_mbuf_raw_alloc(priv->rx_mpool);
372
./drivers/net/mpipe/mpipe_tilegx.c:625: stack_mem = rte_zmalloc(NULL, stack_size, 65536);
373
./drivers/net/mpipe/mpipe_tilegx.c:674: priv->tx_comps = rte_zmalloc(NULL, ring_size, RTE_CACHE_LINE_SIZE);
374
./drivers/net/mpipe/mpipe_tilegx.c:683: ring_mem = rte_zmalloc(NULL, ring_size, ring_size);
375
./drivers/net/mpipe/mpipe_tilegx.c:791: ring_mem = rte_malloc(NULL, ring_size, ring_size);
376
./drivers/net/mpipe/mpipe_tilegx.c:1446: mbuf = __rte_mbuf_raw_alloc(priv->rx_mpool);
377
./drivers/net/mpipe/mpipe_tilegx.c:1559: priv = rte_zmalloc(NULL, sizeof(*priv), 0);
378
./drivers/net/mlx4/mlx4.c:884: tx_queues = rte_zmalloc("secondary ethdev->tx_queues",
379
./drivers/net/mlx4/mlx4.c:887: rx_queues = rte_zmalloc("secondary ethdev->rx_queues",
380
./drivers/net/mlx4/mlx4.c:942: memcpy(sd->data.rx_queue_state, sd->shared_dev_data->rx_queue_state,
381
./drivers/net/mlx4/mlx4.c:944: memcpy(sd->data.tx_queue_state, sd->shared_dev_data->tx_queue_state,
382
./drivers/net/mlx4/mlx4.c:1370: memcpy(&(*linear)[offset],
383
./drivers/net/mlx4/mlx4.c:2016: * with rte_pktmbuf_alloc().
384
./drivers/net/mlx4/mlx4.c:2060: buf = rte_pktmbuf_alloc(rxq->mp);
385
./drivers/net/mlx4/mlx4.c:2068: /* Headroom is reserved by rte_pktmbuf_alloc(). */
386
./drivers/net/mlx4/mlx4.c:2163: * with rte_pktmbuf_alloc().
387
./drivers/net/mlx4/mlx4.c:2194: buf = rte_pktmbuf_alloc(rxq->mp);
388
./drivers/net/mlx4/mlx4.c:2211: /* Headroom is reserved by rte_pktmbuf_alloc(). */
389
./drivers/net/mlx4/mlx4.c:3018: rep = __rte_mbuf_raw_alloc(rxq->mp);
390
./drivers/net/mlx4/mlx4.c:3217: rep = __rte_mbuf_raw_alloc(rxq->mp);
391
./drivers/net/mlx4/mlx4.c:3511: memcpy(rxq->mac_configured, tmpl.mac_configured,
392
./drivers/net/mlx4/mlx4.c:3513: memcpy(rxq->mac_flow, tmpl.mac_flow, sizeof(rxq->mac_flow));
393
./drivers/net/mlx4/mlx4.c:3558: memcpy(rxq->mac_configured, tmpl.mac_configured,
394
./drivers/net/mlx4/mlx4.c:3560: memcpy(rxq->mac_flow, tmpl.mac_flow, sizeof(rxq->mac_flow));
395
./drivers/net/mlx4/mlx4.c:3563: pool = rte_malloc(__func__, (mbuf_n * sizeof(*pool)), 0);
396
./drivers/net/mlx4/mlx4.c:3701: buf = rte_pktmbuf_alloc(mp);
397
./drivers/net/mlx4/mlx4.c:5080: memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
398
./drivers/net/mlx4/mlx4.c:5431: priv = rte_zmalloc("ethdev private structure",
399
./drivers/net/mlx4/mlx4.c:5583: memcpy(sd->data.name, sd->shared_dev_data->name,
400
./drivers/net/i40e/i40e_pf.c:298: vf_res = rte_zmalloc("i40e_vf_res", len, 0);
401
./drivers/net/i40e/i40e_pf.c:680: (void)rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
402
./drivers/net/i40e/i40e_pf.c:1028: pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0);
403
./drivers/net/i40e/i40e_fdir.c:751: rte_memcpy(&(ip6->src_addr),
404
./drivers/net/i40e/i40e_fdir.c:754: rte_memcpy(&(ip6->dst_addr),
405
./drivers/net/i40e/i40e_fdir.c:912: (void)rte_memcpy(ptr,
406
./drivers/net/i40e/base/i40e_hmc.c:99: i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr,
407
./drivers/net/i40e/base/i40e_hmc.c:103: i40e_memcpy(&sd_entry->u.bp.addr,
408
./drivers/net/i40e/base/i40e_hmc.c:185: i40e_memcpy(&pd_entry->bp.addr, page,
409
./drivers/net/i40e/base/i40e_hmc.c:196: i40e_memcpy(pd_addr, &page_desc, sizeof(u64),
410
./drivers/net/i40e/base/i40e_nvm.c:1280: memcpy(buff, &bytes[aq_desc_len], aq_data_len);
411
./drivers/net/i40e/base/i40e_nvm.c:1347: memcpy(bytes, buff, len);
412
./drivers/net/i40e/base/i40e_nvm.c:1361: memcpy(bytes, buff, remainder);
413
./drivers/net/i40e/base/i40e_lan_hmc.c:784: i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
414
./drivers/net/i40e/base/i40e_lan_hmc.c:790: i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
415
./drivers/net/i40e/base/i40e_lan_hmc.c:828: i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
416
./drivers/net/i40e/base/i40e_lan_hmc.c:834: i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
417
./drivers/net/i40e/base/i40e_lan_hmc.c:880: i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
418
./drivers/net/i40e/base/i40e_lan_hmc.c:886: i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
419
./drivers/net/i40e/base/i40e_lan_hmc.c:932: i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
420
./drivers/net/i40e/base/i40e_lan_hmc.c:938: i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
421
./drivers/net/i40e/base/i40e_lan_hmc.c:965: i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
422
./drivers/net/i40e/base/i40e_lan_hmc.c:975: i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
423
./drivers/net/i40e/base/i40e_lan_hmc.c:1003: i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
424
./drivers/net/i40e/base/i40e_lan_hmc.c:1019: i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
425
./drivers/net/i40e/base/i40e_lan_hmc.c:1055: i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
426
./drivers/net/i40e/base/i40e_lan_hmc.c:1071: i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
427
./drivers/net/i40e/base/i40e_lan_hmc.c:1108: i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
428
./drivers/net/i40e/base/i40e_lan_hmc.c:1124: i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
429
./drivers/net/i40e/base/i40e_prototype.h:280:enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
430
./drivers/net/i40e/base/i40e_adminq.c:758: i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
431
./drivers/net/i40e/base/i40e_adminq.c:838: i40e_memcpy(details,
432
./drivers/net/i40e/base/i40e_adminq.c:899: i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
433
./drivers/net/i40e/base/i40e_adminq.c:906: i40e_memcpy(dma_buff->va, buff, buff_size,
434
./drivers/net/i40e/base/i40e_adminq.c:949: i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
435
./drivers/net/i40e/base/i40e_adminq.c:952: i40e_memcpy(buff, dma_buff->va, buff_size,
436
./drivers/net/i40e/base/i40e_adminq.c:978: i40e_memcpy(details->wb_desc, desc_on_ring,
437
./drivers/net/i40e/base/i40e_adminq.c:1076: i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
438
./drivers/net/i40e/base/i40e_adminq.c:1081: i40e_memcpy(e->msg_buf,
439
./drivers/net/i40e/base/i40e_common.c:1112: memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
440
./drivers/net/i40e/base/i40e_common.c:1135: memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
441
./drivers/net/i40e/base/i40e_common.c:1898: i40e_memcpy(&hw->phy.link_info_old, hw_link_info,
442
./drivers/net/i40e/base/i40e_common.c:1940: i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info),
443
./drivers/net/i40e/base/i40e_common.c:2602: memcpy(hw->phy.link_info.module_type, &abilities.module_type,
444
./drivers/net/i40e/base/i40e_common.c:4123: * i40e_aq_get_switch_resource_alloc (0x0204)
445
./drivers/net/i40e/base/i40e_common.c:4134:enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
446
./drivers/net/i40e/base/i40e_common.c:5146: i40e_memcpy(cmd->mac, mac_addr, I40E_ETH_LENGTH_OF_ADDRESS,
447
./drivers/net/i40e/base/i40e_common.c:5766: i40e_memcpy(hw->mac.perm_addr,
448
./drivers/net/i40e/base/i40e_common.c:5770: i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
449
./drivers/net/i40e/base/i40e_osdep.h:187:#define i40e_memcpy(a, b, c, d) rte_memcpy((a), (b), (c))
450
./drivers/net/i40e/i40e_rxtx_vec.c:502: memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
451
./drivers/net/i40e/i40e_ethdev_vf.c:501: (void)rte_memcpy(vf->vf_res, args.out_buffer,
452
./drivers/net/i40e/i40e_ethdev_vf.c:583: (void)rte_memcpy(&tpid_info.info, info, sizeof(*info));
453
./drivers/net/i40e/i40e_ethdev_vf.c:899: (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
454
./drivers/net/i40e/i40e_ethdev_vf.c:936: (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
455
./drivers/net/i40e/i40e_ethdev_vf.c:1114: (void)rte_memcpy(link, new_link, sizeof(*link));
456
./drivers/net/i40e/i40e_ethdev_vf.c:1223: vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
457
./drivers/net/i40e/i40e_ethdev_vf.c:1326: eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
458
./drivers/net/i40e/i40e_ethdev_vf.c:1827: rte_zmalloc("intr_vec",
459
./drivers/net/i40e/i40e_ethdev_vf.c:1853: (void)rte_memcpy(mac_addr.addr_bytes, hw->mac.addr,
460
./drivers/net/i40e/i40e_ethdev_vf.c:1898: (void)rte_memcpy(mac_addr.addr_bytes, hw->mac.addr,
461
./drivers/net/i40e/i40e_ethdev_vf.c:2142: lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
462
./drivers/net/i40e/i40e_ethdev_vf.c:2181: lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
463
./drivers/net/i40e/i40e_ethdev.c:351:static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
464
./drivers/net/i40e/i40e_ethdev.c:893: dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
465
./drivers/net/i40e/i40e_ethdev.c:1417: rte_zmalloc("intr_vec",
466
./drivers/net/i40e/i40e_ethdev.c:2585: (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
467
./drivers/net/i40e/i40e_ethdev.c:2686: (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
468
./drivers/net/i40e/i40e_ethdev.c:2687: (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
469
./drivers/net/i40e/i40e_ethdev.c:2689: (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
470
./drivers/net/i40e/i40e_ethdev.c:2700: (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
471
./drivers/net/i40e/i40e_ethdev.c:2829: lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
472
./drivers/net/i40e/i40e_ethdev.c:2870: lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
473
./drivers/net/i40e/i40e_ethdev.c:2965: mem->va = rte_zmalloc("i40e", size, 0);
474
./drivers/net/i40e/i40e_ethdev.c:3029: buf = rte_zmalloc("i40e", len, 0);
475
./drivers/net/i40e/i40e_ethdev.c:3174: rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
476
./drivers/net/i40e/i40e_ethdev.c:3216: entry = rte_zmalloc("i40e", sizeof(*entry), 0);
477
./drivers/net/i40e/i40e_ethdev.c:3350:i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
478
./drivers/net/i40e/i40e_ethdev.c:3397: entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
479
./drivers/net/i40e/i40e_ethdev.c:3484: (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
480
./drivers/net/i40e/i40e_ethdev.c:3523: (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
481
./drivers/net/i40e/i40e_ethdev.c:3628: veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
482
./drivers/net/i40e/i40e_ethdev.c:3730: (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
483
./drivers/net/i40e/i40e_ethdev.c:3743: f = rte_zmalloc("macv_filter", sizeof(*f), 0);
484
./drivers/net/i40e/i40e_ethdev.c:3749: (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
485
./drivers/net/i40e/i40e_ethdev.c:3757: (void)rte_memcpy(&filter.mac_addr,
486
./drivers/net/i40e/i40e_ethdev.c:3862: vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
487
./drivers/net/i40e/i40e_ethdev.c:3899: ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
488
./drivers/net/i40e/i40e_ethdev.c:3911: ret = i40e_res_pool_alloc(&pf->msix_pool,
489
./drivers/net/i40e/i40e_ethdev.c:3922: ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
490
./drivers/net/i40e/i40e_ethdev.c:3957: (void)rte_memcpy(&vsi->info, &ctxt.info,
491
./drivers/net/i40e/i40e_ethdev.c:3975: (void)rte_memcpy(&ctxt.info, &vsi->info,
492
./drivers/net/i40e/i40e_ethdev.c:3996: (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
493
./drivers/net/i40e/i40e_ethdev.c:3998: (void)rte_memcpy(&vsi->info.queue_mapping,
494
./drivers/net/i40e/i40e_ethdev.c:4004: (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
495
./drivers/net/i40e/i40e_ethdev.c:4128: memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
496
./drivers/net/i40e/i40e_ethdev.c:4138: (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
497
./drivers/net/i40e/i40e_ethdev.c:4192: (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
498
./drivers/net/i40e/i40e_ethdev.c:4306: ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
499
./drivers/net/i40e/i40e_ethdev.c:4649: pf->vmdq = rte_zmalloc("vmdq_info_struct",
500
./drivers/net/i40e/i40e_ethdev.c:4839: info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
501
./drivers/net/i40e/i40e_ethdev.c:5030: req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
502
./drivers/net/i40e/i40e_ethdev.c:5042: (void)rte_memcpy(req_list[i].mac_addr,
503
./drivers/net/i40e/i40e_ethdev.c:5105: req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
504
./drivers/net/i40e/i40e_ethdev.c:5117: (void)rte_memcpy(req_list[i].mac_addr,
505
./drivers/net/i40e/i40e_ethdev.c:5239: (void)rte_memcpy(&mv_f[i].macaddr,
506
./drivers/net/i40e/i40e_ethdev.c:5268: (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
507
./drivers/net/i40e/i40e_ethdev.c:5295: mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
508
./drivers/net/i40e/i40e_ethdev.c:5304: (void)rte_memcpy(&mv_f[i].macaddr,
509
./drivers/net/i40e/i40e_ethdev.c:5347: mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
510
./drivers/net/i40e/i40e_ethdev.c:5398: mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
511
./drivers/net/i40e/i40e_ethdev.c:5463: mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
512
./drivers/net/i40e/i40e_ethdev.c:5471: (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
513
./drivers/net/i40e/i40e_ethdev.c:5488: f = rte_zmalloc("macv_filter", sizeof(*f), 0);
514
./drivers/net/i40e/i40e_ethdev.c:5494: (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
515
./drivers/net/i40e/i40e_ethdev.c:5533: mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
516
./drivers/net/i40e/i40e_ethdev.c:5541: (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
517
./drivers/net/i40e/i40e_ethdev.c:5821: cld_filter = rte_zmalloc("tunnel_filter",
518
./drivers/net/i40e/i40e_ethdev.c:5831: (void)rte_memcpy(&pfilter->outer_mac, tunnel_filter->outer_mac,
519
./drivers/net/i40e/i40e_ethdev.c:5833: (void)rte_memcpy(&pfilter->inner_mac, tunnel_filter->inner_mac,
520
./drivers/net/i40e/i40e_ethdev.c:5839: (void)rte_memcpy(&pfilter->ipaddr.v4.data,
521
./drivers/net/i40e/i40e_ethdev.c:5844: (void)rte_memcpy(&pfilter->ipaddr.v6.data,
522
./drivers/net/i40e/i40e_ethdev.c:7497: rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
523
./drivers/net/i40e/i40e_ethdev.c:7549: rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
524
./drivers/net/i40e/i40e_ethdev.c:7629: mirr_rule = rte_zmalloc("i40e_mirror_rule",
525
./drivers/net/i40e/i40e_ethdev.c:8205: (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
526
./drivers/net/i40e/i40e_ethdev.c:8207: (void)rte_memcpy(&vsi->info.queue_mapping,
527
./drivers/net/i40e/i40e_rxtx.c:847:rte_rxmbuf_alloc(struct rte_mempool *mp)
528
./drivers/net/i40e/i40e_rxtx.c:851: m = __rte_mbuf_raw_alloc(mp);
529
./drivers/net/i40e/i40e_rxtx.c:1161:i40e_recv_pkts_bulk_alloc(void *rx_queue,
530
./drivers/net/i40e/i40e_rxtx.c:1222: nmb = rte_rxmbuf_alloc(rxq->mp);
531
./drivers/net/i40e/i40e_rxtx.c:1333: nmb = rte_rxmbuf_alloc(rxq->mp);
532
./drivers/net/i40e/i40e_rxtx.c:2730: struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mp);
533
./drivers/net/enic/enic_compat.h:75:#define kzalloc(size, flags) calloc(1, size)
534
./drivers/net/enic/base/vnic_wq.h:264:int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
535
./drivers/net/enic/base/vnic_intr.h:117:int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
536
./drivers/net/enic/base/vnic_rq.c:105:int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
537
./drivers/net/enic/base/vnic_rq.h:263:int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
538
./drivers/net/enic/base/vnic_dev.c:833: rte_memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
539
./drivers/net/enic/base/vnic_wq.c:128:int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
540
./drivers/net/enic/base/vnic_intr.c:43:int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
541
./drivers/net/enic/base/vnic_cq.c:55:int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
542
./drivers/net/enic/base/vnic_cq.h:138:int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
543
./drivers/net/enic/enic_main.c:84:enic_rxmbuf_alloc(struct rte_mempool *mp)
544
./drivers/net/enic/enic_main.c:88: m = __rte_mbuf_raw_alloc(mp);
545
./drivers/net/enic/enic_main.c:324: struct rte_mbuf *mbuf = enic_rxmbuf_alloc(rq->mp);
546
./drivers/net/enic/enic_main.c:337: hdr_mbuf = enic_rxmbuf_alloc(rq->mp);
547
./drivers/net/enic/enic_main.c:627: err = vnic_intr_alloc(enic->vdev, &enic->intr, 0);
548
./drivers/net/enic/enic_main.c:690: err = vnic_rq_alloc(enic->vdev, &enic->rq[queue_idx], queue_idx,
549
./drivers/net/enic/enic_main.c:698: err = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
550
./drivers/net/enic/enic_main.c:743: err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
551
./drivers/net/enic/enic_main.c:751: err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
552
./drivers/net/enic/enic_main.c:861: rte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
553
./drivers/net/enic/enic_main.c:1028: eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
554
./drivers/net/enic/enic_clsf.c:136: key = rte_zmalloc("enic_fdir_node",
555
./drivers/net/nfp/nfp_net.c:946: memcpy(stats, &nfp_dev_stats, sizeof(*stats));
556
./drivers/net/nfp/nfp_net.c:1377: struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
557
./drivers/net/nfp/nfp_net.c:1740: new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
558
./drivers/net/nfp/nfp_net.c:2222: memcpy(&key, &rss_conf->rss_key[i], 1);
559
./drivers/net/nfp/nfp_net.c:2279: memcpy(&rss_conf->rss_key[i], &key, 1);
560
./drivers/net/nfp/nfp_net.c:2410: eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
561
./drivers/net/mlx5/mlx5_rxtx.c:297: memcpy(&(*linear)[offset],
562
./drivers/net/mlx5/mlx5_rxtx.c:772: rep = __rte_mbuf_raw_alloc(rxq->mp);
563
./drivers/net/mlx5/mlx5_rxtx.c:961: rep = __rte_mbuf_raw_alloc(rxq->mp);
564
./drivers/net/mlx5/mlx5_rss.c:125: memcpy(rss_conf->rss_key, key, key_len);
565
./drivers/net/mlx5/mlx5_rss.c:205: memcpy(rss_conf->rss_key,
566
./drivers/net/mlx5/mlx5.c:348: priv = rte_zmalloc("ethdev private structure",
567
./drivers/net/mlx5/mlx5.c:395: priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n,
568
./drivers/net/mlx5/mlx5_rxq.c:252: memcpy((void *)((uintptr_t)flow_attr + offset),
569
./drivers/net/mlx5/mlx5_rxq.c:390: ind_tables = rte_calloc(__func__, ind_tables_n,
570
./drivers/net/mlx5/mlx5_rxq.c:425: hash_rxqs = rte_calloc(__func__, hash_rxqs_n,
571
./drivers/net/mlx5/mlx5_rxq.c:597: * with rte_pktmbuf_alloc().
572
./drivers/net/mlx5/mlx5_rxq.c:635: buf = rte_pktmbuf_alloc(rxq->mp);
573
./drivers/net/mlx5/mlx5_rxq.c:643: /* Headroom is reserved by rte_pktmbuf_alloc(). */
574
./drivers/net/mlx5/mlx5_rxq.c:736: * with rte_pktmbuf_alloc().
575
./drivers/net/mlx5/mlx5_rxq.c:766: buf = rte_pktmbuf_alloc(rxq->mp);
576
./drivers/net/mlx5/mlx5_rxq.c:774: /* Headroom is reserved by rte_pktmbuf_alloc(). */
577
./drivers/net/mlx5/mlx5_rxq.c:969: pool = rte_malloc(__func__, (mbuf_n * sizeof(*pool)), 0);
578
./drivers/net/mlx5/mlx5_rxq.c:1117: buf = rte_pktmbuf_alloc(mp);
579
./drivers/net/mlx5/mlx5_mac.c:89: memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
580
./drivers/net/fm10k/fm10k_ethdev.c:2585: dev->data->mac_addrs = rte_zmalloc("fm10k",
581
./drivers/net/fm10k/fm10k_ethdev.c:2602: memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
582
./drivers/net/fm10k/base/fm10k_vf.c:255: memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN);
583
./drivers/net/fm10k/base/fm10k_vf.c:295: memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN);
584
./drivers/net/fm10k/base/fm10k_vf.c:296: memcpy(hw->mac.addr, perm_addr, ETH_ALEN);
585
./drivers/net/fm10k/base/fm10k_mbx.c:277: memcpy(fifo->buffer, msg + end, (len - end) << 2);
586
./drivers/net/fm10k/base/fm10k_mbx.c:282: memcpy(tail, msg, end << 2);
587
./drivers/net/fm10k/fm10k_rxtx_vec.c:585: memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
588
./drivers/net/null/rte_eth_null.c:113: bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
589
./drivers/net/null/rte_eth_null.c:139: bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
590
./drivers/net/null/rte_eth_null.c:142: rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
591
./drivers/net/null/rte_eth_null.c:184: rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
592
./drivers/net/null/rte_eth_null.c:446: rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
593
./drivers/net/null/rte_eth_null.c:463: rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
594
./drivers/net/null/rte_eth_null.c:547: rte_memcpy(internals->rss_key, default_rss_key, 40);
595
./drivers/net/xenvirt/rte_eth_xenvirt.c:82:rte_rxmbuf_alloc(struct rte_mempool *mp)
596
./drivers/net/xenvirt/rte_eth_xenvirt.c:86: m = __rte_mbuf_raw_alloc(mp);
597
./drivers/net/xenvirt/rte_eth_xenvirt.c:124: new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
598
./drivers/net/xenvirt/rte_eth_xenvirt.c:211: if (grefwatch_from_alloc(&gref_tmp, &ptr)) {
599
./drivers/net/xenvirt/rte_eth_xenvirt.c:295: m = rte_rxmbuf_alloc(rxvq->mpool);
600
./drivers/net/xenvirt/rte_eth_xenvirt.c:356: rte_memcpy(stats, &internals->eth_stats, sizeof(*stats));
601
./drivers/net/xenvirt/rte_eth_xenvirt.c:401: gref_arr = calloc(pg_num, sizeof(gref_arr[0]));
602
./drivers/net/xenvirt/rte_eth_xenvirt.c:402: pa_arr = calloc(pg_num, sizeof(pa_arr[0]));
603
./drivers/net/xenvirt/rte_eth_xenvirt.c:458: vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
604
./drivers/net/xenvirt/rte_eth_xenvirt.c:464: memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
605
./drivers/net/xenvirt/rte_eth_xenvirt.c:468: vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
606
./drivers/net/xenvirt/rte_eth_xenvirt.c:474: memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
607
./drivers/net/xenvirt/rte_eth_xenvirt.c:477: memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
608
./drivers/net/xenvirt/rte_eth_xenvirt.c:579: args = rte_zmalloc(NULL, strlen(params) + 1, RTE_CACHE_LINE_SIZE);
609
./drivers/net/xenvirt/rte_eth_xenvirt.c:584: rte_memcpy(args, params, strlen(params));
610
./drivers/net/xenvirt/rte_eth_xenvirt.c:676: data->mac_addrs = rte_zmalloc("xen_virtio", ETHER_ADDR_LEN, 0);
611
./drivers/net/xenvirt/rte_eth_xenvirt.c:679: memcpy(&data->mac_addrs->addr_bytes, &dict.addr, sizeof(struct ether_addr));
612
./drivers/net/xenvirt/rte_xen_lib.c:162: arg = malloc(sizeof(*arg) + (pg_num - 1) * sizeof(uint32_t));
613
./drivers/net/xenvirt/rte_xen_lib.c:200:grefwatch_from_alloc(uint32_t *gref, void **pptr)
614
./drivers/net/xenvirt/rte_xen_lib.c:383: gref_tmp = malloc(count * sizeof(uint32_t));
615
./drivers/net/xenvirt/rte_mempool_gntalloc.c:120: pa_arr = calloc(pg_num, sizeof(pa_arr[0]));
616
./drivers/net/xenvirt/rte_mempool_gntalloc.c:121: gref_arr = calloc(pg_num, sizeof(gref_arr[0]));
617
./drivers/net/xenvirt/rte_mempool_gntalloc.c:122: gnt_arr = calloc(pg_num, sizeof(gnt_arr[0]));
618
./drivers/net/xenvirt/rte_xen_lib.h:108:grefwatch_from_alloc(uint32_t *gref, void **pptr);
619
./drivers/net/pcap/rte_eth_pcap.c:144: rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len);
620
./drivers/net/pcap/rte_eth_pcap.c:150: m->next = rte_pktmbuf_alloc(mb_pool);
621
./drivers/net/pcap/rte_eth_pcap.c:164: rte_memcpy(rte_pktmbuf_append(m, len), data, len);
622
./drivers/net/pcap/rte_eth_pcap.c:181: rte_memcpy(data + data_len, rte_pktmbuf_mtod(mbuf, void *),
623
./drivers/net/pcap/rte_eth_pcap.c:215: mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
624
./drivers/net/pcap/rte_eth_pcap.c:225: rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
625
./drivers/net/bonding/rte_eth_bond_api.c:660: memcpy(slaves, internals->active_slaves, internals->active_slave_count);
626
./drivers/net/bonding/rte_eth_bond_8023ad.c:333: memcpy(&port->partner, &lacp->actor.port_params,
627
./drivers/net/bonding/rte_eth_bond_8023ad.c:585: lacp_pkt = rte_pktmbuf_alloc(port->mbuf_pool);
628
./drivers/net/bonding/rte_eth_bond_8023ad.c:611: memcpy(&hdr->lacpdu.actor.port_params, &port->actor,
629
./drivers/net/bonding/rte_eth_bond_8023ad.c:620: memcpy(&lacpdu->partner.port_params, &port->partner,
630
./drivers/net/bonding/rte_eth_bond_8023ad.c:862: memcpy(&port->actor, &initial, sizeof(struct port_params));
631
./drivers/net/bonding/rte_eth_bond_8023ad.c:867: memcpy(&port->partner, &initial, sizeof(struct port_params));
632
./drivers/net/bonding/rte_eth_bond_8023ad.c:1215: rte_memcpy(&info->actor, &port->actor, sizeof(port->actor));
633
./drivers/net/bonding/rte_eth_bond_8023ad.c:1218: rte_memcpy(&info->partner, &port->partner, sizeof(port->partner));
634
./drivers/net/bonding/rte_eth_bond_pmd.c:149: memcpy(slaves, internals->active_slaves,
635
./drivers/net/bonding/rte_eth_bond_pmd.c:411: memcpy(slaves, internals->active_slaves,
636
./drivers/net/bonding/rte_eth_bond_pmd.c:439: memcpy(&bufs[nb_pkts - tx_fail_total],
637
./drivers/net/bonding/rte_eth_bond_pmd.c:706: memcpy(slaves, internals->tlb_slaves_order,
638
./drivers/net/bonding/rte_eth_bond_pmd.c:817: upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
639
./drivers/net/bonding/rte_eth_bond_pmd.c:919: memcpy(slaves, internals->active_slaves,
640
./drivers/net/bonding/rte_eth_bond_pmd.c:945: memcpy(&bufs[nb_pkts - tx_fail_total],
641
./drivers/net/bonding/rte_eth_bond_pmd.c:992: memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
642
./drivers/net/bonding/rte_eth_bond_pmd.c:1070: memcpy(slaves, internals->active_slaves,
643
./drivers/net/bonding/rte_eth_bond_pmd.c:1187: memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
644
./drivers/net/bonding/rte_eth_bond_pmd.c:1466: memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
645
./drivers/net/bonding/rte_eth_bond_pmd.c:1670: memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
646
./drivers/net/bonding/rte_eth_bond_pmd.c:1694: memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
647
./drivers/net/bonding/rte_eth_bond_pmd.c:2062: memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
648
./drivers/net/bonding/rte_eth_bond_pmd.c:2104: memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
649
./drivers/net/bonding/rte_eth_bond_pmd.c:2116: memcpy(internals->rss_key, bond_rss_conf.rss_key,
650
./drivers/net/bonding/rte_eth_bond_pmd.c:2139: memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
651
./drivers/net/bonding/rte_eth_bond_pmd.c:2274: memcpy(internals->rss_key, default_rss_key, 40);
652
./drivers/net/bonding/rte_eth_bond_alb.c:143: memcpy(client_info->vlan, eth_h + 1, offset);
653
./drivers/net/bonding/rte_eth_bond_alb.c:193: memcpy(client_info->vlan, eth_h + 1, offset);
654
./drivers/net/bonding/rte_eth_bond_alb.c:209: memcpy(client_info->vlan, eth_h + 1, offset);
655
./drivers/net/bonding/rte_eth_bond_alb.c:243: memcpy(eth_h + 1, client_info->vlan,
656
./drivers/net/bnx2x/bnx2x.h:1490: (void)rte_memcpy(valp, BNX2X_SP(sc, wb_data[0]), (len32) * 4); \
657
./drivers/net/bnx2x/bnx2x.h:1495: (void)rte_memcpy(BNX2X_SP(sc, wb_data[0]), valp, (len32) * 4); \
658
./drivers/net/bnx2x/bnx2x.h:1510: rte_memcpy(GUNZIP_BUF(sc), data, len32 * 4); \
659
./drivers/net/bnx2x/bnx2x.h:1705:int bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size,
660
./drivers/net/bnx2x/bnx2x.c:164:bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma,
661
./drivers/net/bnx2x/bnx2x.c:172: sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg,
662
./drivers/net/bnx2x/bnx2x.c:175: sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg,
663
./drivers/net/bnx2x/bnx2x.c:2194: rte_memcpy(&tx_parse_bd->data.mac_addr.dst_hi,
664
./drivers/net/bnx2x/bnx2x.c:2196: rte_memcpy(&tx_parse_bd->data.mac_addr.dst_mid,
665
./drivers/net/bnx2x/bnx2x.c:2198: rte_memcpy(&tx_parse_bd->data.mac_addr.dst_lo,
666
./drivers/net/bnx2x/bnx2x.c:2200: rte_memcpy(&tx_parse_bd->data.mac_addr.src_hi,
667
./drivers/net/bnx2x/bnx2x.c:2202: rte_memcpy(&tx_parse_bd->data.mac_addr.src_mid,
668
./drivers/net/bnx2x/bnx2x.c:2204: rte_memcpy(&tx_parse_bd->data.mac_addr.src_lo,
669
./drivers/net/bnx2x/bnx2x.c:2349: sc->ilt = rte_malloc("", sizeof(struct ecore_ilt), RTE_CACHE_LINE_SIZE);
670
./drivers/net/bnx2x/bnx2x.c:2356: sc->ilt->lines = rte_calloc("",
671
./drivers/net/bnx2x/bnx2x.c:2417: if (bnx2x_dma_alloc(sc, sc->context[i].size,
672
./drivers/net/bnx2x/bnx2x.c:2496: if (bnx2x_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
673
./drivers/net/bnx2x/bnx2x.c:6634: (void)rte_memcpy(params.ind_table, rss_obj->ind_table,
674
./drivers/net/bnx2x/bnx2x.c:6701: (void)rte_memcpy(ramrod_param.user_req.u.mac.mac, mac,
675
./drivers/net/bnx2x/bnx2x.c:6909: (void)rte_memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
676
./drivers/net/bnx2x/bnx2x.c:8862: if (bnx2x_dma_alloc(sc, sizeof(struct host_sp_status_block),
677
./drivers/net/bnx2x/bnx2x.c:8874: if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE,
678
./drivers/net/bnx2x/bnx2x.c:8887: if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_slowpath),
679
./drivers/net/bnx2x/bnx2x.c:8901: if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE,
680
./drivers/net/bnx2x/bnx2x.c:8916: if (bnx2x_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
681
./drivers/net/bnx2x/bnx2x.c:8943: if (bnx2x_dma_alloc(sc, sizeof(union bnx2x_host_hc_status_block),
682
./drivers/net/bnx2x/bnx2x.c:9110: tmp = rte_malloc("", sizeof(struct bnx2x_prev_list_node),
683
./drivers/net/bnx2x/bnx2x.c:9536: cap = sc->pci_caps = rte_zmalloc("caps", sizeof(struct bnx2x_pci_cap),
684
./drivers/net/bnx2x/bnx2x.c:9566: cap->next = rte_zmalloc("pci_cap",
685
./drivers/net/bnx2x/bnx2x.c:9609: sc->firmware = rte_zmalloc("bnx2x_fw", st.st_size, RTE_CACHE_LINE_SIZE);
686
./drivers/net/bnx2x/bnx2x.c:11525: sc->init_ops = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE);
687
./drivers/net/bnx2x/bnx2x.c:11531: sc->init_ops_offsets = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE);
688
./drivers/net/bnx2x/bnx2x.c:11537: sc->init_data = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE);
689
./drivers/net/bnx2x/bnx2x.c:11552: sc->iro_array = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE);
690
./drivers/net/bnx2x/bnx2x_rxtx.c:15:bnx2x_rxmbuf_alloc(struct rte_mempool *mp)
691
./drivers/net/bnx2x/bnx2x_rxtx.c:19: m = __rte_mbuf_raw_alloc(mp);
692
./drivers/net/bnx2x/bnx2x_rxtx.c:151: mbuf = bnx2x_rxmbuf_alloc(mp);
693
./drivers/net/bnx2x/bnx2x_rxtx.c:278: txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue),
694
./drivers/net/bnx2x/bnx2x_rxtx.c:313: txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
695
./drivers/net/bnx2x/bnx2x_rxtx.c:408: new_mb = bnx2x_rxmbuf_alloc(rxq->mb_pool);
696
./drivers/net/bnx2x/ecore_sp.c:4637: rte_memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
697
./drivers/net/bnx2x/bnx2x_vfpf.c:58: rte_memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN);
698
./drivers/net/bnx2x/bnx2x_vfpf.c:60: rte_memcpy(&bull->vlan, &sc->old_bulletin.vlan, VLAN_HLEN);
699
./drivers/net/bnx2x/bnx2x_vfpf.c:203: memcpy(sc_resp, resp, sizeof(sc->acquire_resp));
700
./drivers/net/bnx2x/bnx2x_vfpf.c:301: (void)rte_memcpy(sc->link_params.mac_addr,
701
./drivers/net/bnx2x/bnx2x_vfpf.c:518: rte_memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN);
702
./drivers/net/bnx2x/bnx2x_vfpf.c:529: rte_memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac,
703
./drivers/net/bnx2x/bnx2x_vfpf.c:531: rte_memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac,
704
./drivers/net/bnx2x/bnx2x_vfpf.c:562: rte_memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key));
705
./drivers/net/bnx2x/bnx2x_vfpf.c:565: rte_memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
706
./drivers/net/bnx2x/ecore_sp.h:116:#define ECORE_MEMCPY(_a, _b, _s) (void)rte_memcpy(_a, _b, _s)
707
./drivers/net/bnx2x/ecore_sp.h:129: rte_zmalloc("", _size, RTE_CACHE_LINE_SIZE)
708
./drivers/net/bnx2x/ecore_sp.h:132: rte_calloc("", _len, _size, RTE_CACHE_LINE_SIZE)
709
./drivers/net/bnx2x/ecore_sp.h:142: x = rte_malloc("", sizeof(struct bnx2x_dma), RTE_CACHE_LINE_SIZE); \
710
./drivers/net/bnx2x/ecore_sp.h:144: if (bnx2x_dma_alloc((struct bnx2x_softc *)sc, \
711
./drivers/net/bnx2x/bnx2x_stats.c:119: rte_memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats,
712
./drivers/net/bnx2x/bnx2x_stats.c:822: rte_memcpy(old, new, sizeof(struct nig_stats));
713
./drivers/net/bnx2x/bnx2x_stats.c:824: rte_memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
714
./drivers/net/bnx2x/bnx2x_ethdev.c:125: if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
715
./drivers/net/bnx2x/bnx2x_ethdev.c:131: if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
716
./drivers/net/e1000/igb_rxtx.c:83:rte_rxmbuf_alloc(struct rte_mempool *mp)
717
./drivers/net/e1000/igb_rxtx.c:87: m = __rte_mbuf_raw_alloc(mp);
718
./drivers/net/e1000/igb_rxtx.c:847: nmb = rte_rxmbuf_alloc(rxq->mb_pool);
719
./drivers/net/e1000/igb_rxtx.c:1030: nmb = rte_rxmbuf_alloc(rxq->mb_pool);
720
./drivers/net/e1000/igb_rxtx.c:1335: txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
721
./drivers/net/e1000/igb_rxtx.c:1369: txq->sw_ring = rte_zmalloc("txq->sw_ring",
722
./drivers/net/e1000/igb_rxtx.c:1466: rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
723
./drivers/net/e1000/igb_rxtx.c:1504: rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
724
./drivers/net/e1000/igb_rxtx.c:1965: struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
725
./drivers/net/e1000/igb_pf.c:74: memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
726
./drivers/net/e1000/igb_pf.c:113: *vfinfo = rte_zmalloc("vf_info", sizeof(struct e1000_vf_info) * vf_num, 0);
727
./drivers/net/e1000/igb_pf.c:320: rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN);
728
./drivers/net/e1000/igb_pf.c:336: rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
729
./drivers/net/e1000/base/e1000_vf.c:294: memcpy(hw->mac.perm_addr, addr, 6);
730
./drivers/net/e1000/base/e1000_vf.c:336: memcpy(msg_addr, addr, 6);
731
./drivers/net/e1000/base/e1000_nvm.c:1030: memcpy(pba->pba_block,
732
./drivers/net/e1000/base/e1000_nvm.c:1088: memcpy(&eeprom_buf[pba->word[1]],
733
./drivers/net/e1000/em_rxtx.c:82:rte_rxmbuf_alloc(struct rte_mempool *mp)
734
./drivers/net/e1000/em_rxtx.c:86: m = __rte_mbuf_raw_alloc(mp);
735
./drivers/net/e1000/em_rxtx.c:738: nmb = rte_rxmbuf_alloc(rxq->mb_pool);
736
./drivers/net/e1000/em_rxtx.c:918: nmb = rte_rxmbuf_alloc(rxq->mb_pool);
737
./drivers/net/e1000/em_rxtx.c:1240: if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
738
./drivers/net/e1000/em_rxtx.c:1245: if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
739
./drivers/net/e1000/em_rxtx.c:1364: if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq),
740
./drivers/net/e1000/em_rxtx.c:1369: if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
741
./drivers/net/e1000/em_rxtx.c:1570: struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
742
./drivers/net/e1000/em_ethdev.c:279: eth_dev->data->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN *
743
./drivers/net/e1000/em_ethdev.c:555: rte_zmalloc("intr_vec",
744
./drivers/net/e1000/igb_ethdev.c:726: eth_dev->data->mac_addrs = rte_zmalloc("e1000",
745
./drivers/net/e1000/igb_ethdev.c:892: eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
746
./drivers/net/e1000/igb_ethdev.c:1175: rte_zmalloc("intr_vec",
747
./drivers/net/e1000/igb_ethdev.c:3199: filter = rte_zmalloc("e1000_2tuple_filter",
748
./drivers/net/e1000/igb_ethdev.c:3347: flex_filter = rte_zmalloc("e1000_flex_filter",
749
./drivers/net/e1000/igb_ethdev.c:3354: memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
750
./drivers/net/e1000/igb_ethdev.c:3454: memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
751
./drivers/net/e1000/igb_ethdev.c:3455: memcpy(flex_filter.filter_info.mask, filter->mask,
752
./drivers/net/e1000/igb_ethdev.c:3662: filter = rte_zmalloc("e1000_5tuple_filter",
753
./drivers/net/ring/rte_eth_ring.c:530: info = rte_zmalloc("struct node_action_list",
754
./drivers/net/vmxnet3/vmxnet3_rxtx.c:96:rte_rxmbuf_alloc(struct rte_mempool *mp)
755
./drivers/net/vmxnet3/vmxnet3_rxtx.c:100: m = __rte_mbuf_raw_alloc(mp);
756
./drivers/net/vmxnet3/vmxnet3_rxtx.c:463: mbuf = rte_rxmbuf_alloc(rxq->mp);
757
./drivers/net/vmxnet3/vmxnet3_rxtx.c:721: txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), RTE_CACHE_LINE_SIZE);
758
./drivers/net/vmxnet3/vmxnet3_rxtx.c:786: ring->buf_info = rte_zmalloc("tx_ring_buf_info",
759
./drivers/net/vmxnet3/vmxnet3_rxtx.c:829: rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), RTE_CACHE_LINE_SIZE);
760
./drivers/net/vmxnet3/vmxnet3_rxtx.c:903: ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
761
./drivers/net/vmxnet3/vmxnet3_rxtx.c:996: memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key, dev_rss_conf->hashKeySize);
762
./drivers/net/vmxnet3/vmxnet3_ethdev.c:274: memcpy(hw->perm_addr , &mac_lo, 4);
763
./drivers/net/vmxnet3/vmxnet3_ethdev.c:275: memcpy(hw->perm_addr+4, &mac_hi, 2);
764
./drivers/net/vmxnet3/vmxnet3_ethdev.c:278: eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
765
./drivers/net/vmxnet3/vmxnet3_ethdev.c:784: memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
766
./drivers/net/vmxnet3/vmxnet3_ethdev.c:860: memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
767
./drivers/net/cxgbe/cxgbe_main.c:358: pi->rss = rte_zmalloc(NULL, pi->rss_size, 0);
768
./drivers/net/cxgbe/cxgbe_main.c:376: bufp += sprintf(bufp, "100/");
769
./drivers/net/cxgbe/cxgbe_main.c:378: bufp += sprintf(bufp, "1000/");
770
./drivers/net/cxgbe/cxgbe_main.c:380: bufp += sprintf(bufp, "10G/");
771
./drivers/net/cxgbe/cxgbe_main.c:382: bufp += sprintf(bufp, "40G/");
772
./drivers/net/cxgbe/cxgbe_main.c:385: sprintf(bufp, "BASE-%s",
773
./drivers/net/cxgbe/cxgbe_main.c:476: strcpy(config_name, "On Flash");
774
./drivers/net/cxgbe/cxgbe_main.c:514: strcpy(config_name, "Firmware Default");
775
./drivers/net/cxgbe/cxgbe_main.c:912: rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
776
./drivers/net/cxgbe/cxgbe_main.c:1171: pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
777
./drivers/net/cxgbe/base/t4_hw.c:354: __be64 *temp = (__be64 *)malloc(size * sizeof(char));
778
./drivers/net/cxgbe/base/t4_hw.c:368: memcpy(p, (const __be64 *)cmd, size);
779
./drivers/net/cxgbe/base/t4_hw.c:1825: memcpy(mac, c.mac, sizeof(c.mac));
780
./drivers/net/cxgbe/base/t4_hw.c:1828: memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
781
./drivers/net/cxgbe/base/t4_hw.c:1831: memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
782
./drivers/net/cxgbe/base/t4_hw.c:1834: memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
783
./drivers/net/cxgbe/base/t4_hw.c:1837: memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
784
./drivers/net/cxgbe/base/t4_hw.c:1982: memcpy(p->macaddr, addr, sizeof(p->macaddr));
785
./drivers/net/cxgbe/base/adapter.h:541:#define t4_os_alloc(_size) t4_alloc_mem((_size))
786
./drivers/net/cxgbe/sge.c:669: memcpy(sgl->sge, buf, part0);
787
./drivers/net/cxgbe/sge.c:671: rte_memcpy(q->desc, RTE_PTR_ADD((u8 *)buf, part0), part1);
788
./drivers/net/cxgbe/cxgbe_ethdev.c:825: adapter = rte_zmalloc(name, sizeof(*adapter), 0);
789
./drivers/net/af_packet/rte_eth_af_packet.c:151: mbuf = rte_pktmbuf_alloc(pkt_q->mb_pool);
790
./drivers/net/af_packet/rte_eth_af_packet.c:158: memcpy(rte_pktmbuf_mtod(mbuf, void *), pbuf, rte_pktmbuf_data_len(mbuf));
791
./drivers/net/af_packet/rte_eth_af_packet.c:211: memcpy(pbuf, rte_pktmbuf_mtod(mbuf, void*), rte_pktmbuf_data_len(mbuf));
792
./drivers/net/af_packet/rte_eth_af_packet.c:494: memcpy(ifr.ifr_name, pair->value, ifnamelen);
793
./drivers/net/af_packet/rte_eth_af_packet.c:516: memcpy(&(*internals)->eth_addr, ifr.ifr_hwaddr.sa_data, ETH_ALEN);
794
./drivers/net/virtio/virtio_ethdev.c:167: memcpy(vq->virtio_net_hdr_mz->addr, ctrl,
795
./drivers/net/virtio/virtio_ethdev.c:239: memcpy(&result, vq->virtio_net_hdr_mz->addr,
796
./drivers/net/virtio/virtio_ethdev.c:255: memcpy(ctrl.data, &nb_queues, sizeof(uint16_t));
797
./drivers/net/virtio/virtio_ethdev.c:321: vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
798
./drivers/net/virtio/virtio_ethdev.c:329: vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
799
./drivers/net/virtio/virtio_ethdev.c:334: vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
800
./drivers/net/virtio/virtio_ethdev.c:831: memcpy(ctrl.data, uc, len[0]);
801
./drivers/net/virtio/virtio_ethdev.c:834: memcpy(ctrl.data + len[0], mc, len[1]);
802
./drivers/net/virtio/virtio_ethdev.c:866: memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN);
803
./drivers/net/virtio/virtio_ethdev.c:897: memcpy(&tbl->macs[tbl->entries++], addrs + i, ETHER_ADDR_LEN);
804
./drivers/net/virtio/virtio_ethdev.c:908: memcpy(hw->mac_addr, mac_addr, ETHER_ADDR_LEN);
805
./drivers/net/virtio/virtio_ethdev.c:918: memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN);
806
./drivers/net/virtio/virtio_ethdev.c:936: memcpy(ctrl.data, &vlan_id, sizeof(vlan_id));
807
./drivers/net/virtio/virtio_ethdev.c:1280: eth_dev->data->mac_addrs = rte_zmalloc("virtio", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0);
808
./drivers/net/virtio/virtio_pci.h:100: * To allow us to malloc(9) each list individually, limit the number
809
./drivers/net/virtio/virtio_pci.h:103: * contigmalloc(9) for the larger allocations, similar to what
810
./drivers/net/virtio/virtio_pci.h:104: * bus_dmamem_alloc(9) does.
811
./drivers/net/virtio/virtio_rxtx.c:258:rte_rxmbuf_alloc(struct rte_mempool *mp)
812
./drivers/net/virtio/virtio_rxtx.c:262: m = __rte_mbuf_raw_alloc(mp);
813
./drivers/net/virtio/virtio_rxtx.c:321: m = rte_rxmbuf_alloc(vq->mpool);
814
./drivers/net/virtio/virtio_rxtx.c:638: new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
815
./drivers/net/virtio/virtio_rxtx.c:804: new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
816
./app/test-pipeline/pipeline_hash.c:480: memcpy(key, ipv6_dst, 16);
817
./app/test-pipeline/pipeline_acl.c:181: memcpy(table_acl_params.field_format, ipv4_field_formats,
818
./app/test-pipeline/pipeline_lpm_ipv6.c:160: memcpy(key.ip, &ip, sizeof(uint32_t));
819
./app/test-acl/main.c:792: memcpy(&cfg.defs, ipv6_defs, sizeof(ipv6_defs));
820
./app/test-acl/main.c:795: memcpy(&cfg.defs, ipv4_defs, sizeof(ipv4_defs));
821
./app/test/test_table_ports.c:80: mbuf[0] = (void *)rte_pktmbuf_alloc(pool);
822
./app/test/test_table_ports.c:93: mbuf[i] = rte_pktmbuf_alloc(pool);
823
./app/test/test_table_ports.c:160: mbuf[0] = rte_pktmbuf_alloc(pool);
824
./app/test/test_table_ports.c:175: mbuf[i] = rte_pktmbuf_alloc(pool);
825
./app/test/test_table_ports.c:191: mbuf[i] = rte_pktmbuf_alloc(pool);
826
./app/test/test_table_ports.c:205: mbuf[i] = rte_pktmbuf_alloc(pool);
827
./app/test/test_link_bonding_mode4.c:505: memcpy(&lacp->partner.port_params, &lacp->actor.port_params,
828
./app/test/test_link_bonding_mode4.c:1108: marker_pkt = rte_pktmbuf_alloc(test_params.mbuf_pool);
829
./app/test/test_acl.c:242: memcpy(&cfg->defs, ipv4_defs, sizeof(ipv4_defs));
830
./app/test/test_acl.c:779: memcpy(&cfg->defs, ipv4_defs, sizeof(ipv4_defs));
831
./app/test/test_acl.c:1164: memcpy(¶m, &acl_param, sizeof(param));
832
./app/test/test_acl.c:1234: memcpy(¶m, &acl_param, sizeof(param));
833
./app/test/test_acl.c:1245: memcpy(&rules[i], &acl_rule,
834
./app/test/test_acl.c:1308: memcpy(&rule, &acl_rule, sizeof(struct rte_acl_ipv4vlan_rule));
835
./app/test/test_acl.c:1416: memcpy(¶m, &acl_param, sizeof(param));
836
./app/test/test_acl.c:1428: memcpy(¶m, &acl_param, sizeof(param));
837
./app/test/test_acl.c:1440: memcpy(¶m, &acl_param, sizeof(param));
838
./app/test/test_acl.c:1452: memcpy(¶m, &acl_param, sizeof(param));
839
./app/test/test_acl.c:1479: memcpy(¶m, &acl_param, sizeof(param));
840
./app/test/test_acl.c:1486: memcpy(&rule, &acl_rule, sizeof(rule));
841
./app/test/test_acl.c:1544: memcpy(¶m, &acl_param, sizeof(param));
842
./app/test/test_acl.c:1644: memcpy(¶m, &acl_param, sizeof(param));
843
./app/test/test_table_tables.c:54: mbuf = rte_pktmbuf_alloc(pool); \
844
./app/test/test_table_pipeline.c:455: m = rte_pktmbuf_alloc(pool);
845
./app/test/test_cryptodev_perf.c:84: struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
846
./app/test/test_cryptodev_perf.c:95: rte_memcpy(dst, string, t_len);
847
./app/test/test_cryptodev_perf.c:1751: rte_memcpy(ut_params->digest, data_params[0].expected.digest,
848
./app/test/test_cryptodev_perf.c:1754: struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
849
./app/test/test_cryptodev_perf.c:1772: rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
850
./app/test/test_cryptodev_perf.c:1925: rte_memcpy(ut_params->digest, data_params[index].expected.digest,
851
./app/test/test_cryptodev_perf.c:1928: struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
852
./app/test/test_cryptodev_perf.c:1947: rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
853
./app/test/test_ring.c:288: src = malloc(RING_SIZE*2*sizeof(void *));
854
./app/test/test_ring.c:298: dst = malloc(RING_SIZE*2*sizeof(void *));
855
./app/test/test_ring.c:496: src = malloc(RING_SIZE*2*sizeof(void *));
856
./app/test/test_ring.c:506: dst = malloc(RING_SIZE*2*sizeof(void *));
857
./app/test/test_ring.c:808: src = malloc(RING_SIZE*2*sizeof(void *));
858
./app/test/test_ring.c:817: dst = malloc(RING_SIZE*2*sizeof(void *));
859
./app/test/test_ring.c:1261: obj = rte_calloc("test_ring_basic_ex_malloc", RING_SIZE, sizeof(void *), 0);
860
./app/test/test_sched.c:168: in_mbufs[i] = rte_pktmbuf_alloc(mp);
861
./app/test/test_malloc.c:104: p1 = rte_zmalloc("dummy", 1000, align1);
862
./app/test/test_malloc.c:117: p2 = rte_malloc("dummy", 1000, align2);
863
./app/test/test_malloc.c:124: p3 = rte_malloc("dummy", 1000, align3);
864
./app/test/test_malloc.c:176: p1 = rte_zmalloc("dummy", 1000, align1);
865
./app/test/test_malloc.c:190: p2 = rte_calloc("dummy", 1000, 16, align2);
866
./app/test/test_malloc.c:192: p3 = rte_malloc("dummy", 1000, align3);
867
./app/test/test_malloc.c:409: char *ptr1 = rte_zmalloc(NULL, size1, RTE_CACHE_LINE_SIZE);
868
./app/test/test_malloc.c:439: char *ptr3 = rte_zmalloc(NULL, size3, RTE_CACHE_LINE_SIZE);
869
./app/test/test_malloc.c:477: char *ptr5 = rte_malloc(NULL, size5, RTE_CACHE_LINE_SIZE);
870
./app/test/test_malloc.c:499: char *ptr7 = rte_malloc(NULL, size7, orig_align);
871
./app/test/test_malloc.c:525: char *ptr9 = rte_malloc(NULL, size9, RTE_CACHE_LINE_SIZE);
872
./app/test/test_malloc.c:530: char *ptr10 = rte_malloc(NULL, size10, RTE_CACHE_LINE_SIZE);
873
./app/test/test_malloc.c:586: struct mem_list *entry = rte_malloc(NULL,
874
./app/test/test_malloc.c:626: char *data_ptr = rte_malloc(NULL, request_size, RTE_CACHE_LINE_SIZE);
875
./app/test/test_malloc.c:655: memcpy(save_buf, over_write_vals, sizeof(save_buf));
876
./app/test/test_malloc.c:660: memcpy(over_write_vals, save_buf, sizeof(save_buf));
877
./app/test/test_malloc.c:672: memcpy(save_buf, over_write_vals, sizeof(save_buf));
878
./app/test/test_malloc.c:677: memcpy(over_write_vals, save_buf, sizeof(save_buf));
879
./app/test/test_malloc.c:696:test_zero_aligned_alloc(void)
880
./app/test/test_malloc.c:698: char *p1 = rte_malloc(NULL,1024, 0);
881
./app/test/test_malloc.c:720: char *bad_ptr = rte_malloc(type, size, align);
882
./app/test/test_malloc.c:728: bad_ptr = rte_malloc(type, size, align);
883
./app/test/test_malloc.c:853:test_malloc(void)
884
./app/test/test_malloc.c:864: if (test_zero_aligned_alloc() < 0){
885
./app/test/test_malloc.c:865: printf("test_zero_aligned_alloc() failed\n");
886
./app/test/test_malloc.c:868: else printf("test_zero_aligned_alloc() passed\n");
887
./app/test/test_distributor.c:273:sanity_test_with_mbuf_alloc(struct rte_distributor *d, struct rte_mempool *p)
888
./app/test/test_distributor.c:545: if (sanity_test_with_mbuf_alloc(d, p) < 0)
889
./app/test/test_mempool.c:158: objtable = malloc(MEMPOOL_SIZE * sizeof(void *));
890
./app/test/test_mempool.c:362: obj = rte_calloc("test_mempool_basic_ex", MEMPOOL_SIZE , sizeof(void *), 0);
891
./app/test/virtual_pmd.c:220: rte_memcpy(stats, &dev_private->eth_stats, sizeof(*stats));
892
./app/test/virtual_pmd.c:610: eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);
893
./app/test/virtual_pmd.c:614: memcpy(eth_dev->data->mac_addrs, mac_addr,
894
./app/test/test_memcpy_perf.c:98: large_buf_read = rte_malloc("memcpy", LARGE_BUFFER_SIZE + ALIGNMENT_UNIT, ALIGNMENT_UNIT);
895
./app/test/test_memcpy_perf.c:102: large_buf_write = rte_malloc("memcpy", LARGE_BUFFER_SIZE + ALIGNMENT_UNIT, ALIGNMENT_UNIT);
896
./app/test/test_memcpy_perf.c:106: small_buf_read = rte_malloc("memcpy", SMALL_BUFFER_SIZE + ALIGNMENT_UNIT, ALIGNMENT_UNIT);
897
./app/test/test_memcpy_perf.c:110: small_buf_write = rte_malloc("memcpy", SMALL_BUFFER_SIZE + ALIGNMENT_UNIT, ALIGNMENT_UNIT);
898
./app/test/test_memcpy_perf.c:182: rte_memcpy(dst+dst_addrs[j], src+src_addrs[j], size);
899
./app/test/test_memcpy_perf.c:203: rte_memcpy(dst+dst_addrs[t], src+src_addrs[t], size); \
900
./app/test/test_memcpy_perf.c:211: memcpy(dst+dst_addrs[t], src+src_addrs[t], size); \
901
./app/test/test_memcpy_perf.c:312: printf("\n** rte_memcpy() - memcpy perf. tests (C = compile-time constant) **\n"
902
./app/test/test_hash_perf.c:114: sprintf(name, "test_hash%d_data", hashtest_key_lens[table_index]);
903
./app/test/test_hash_perf.c:116: sprintf(name, "test_hash%d", hashtest_key_lens[table_index]);
904
./app/test/test_hash_perf.c:150: memcpy(temp_key, keys[i], hashtest_key_lens[table_index]);
905
./app/test/test_hash_perf.c:154: memcpy(keys[i], keys[swap_idx], hashtest_key_lens[table_index]);
906
./app/test/test_hash_perf.c:158: memcpy(keys[swap_idx], temp_key, hashtest_key_lens[table_index]);
907
./app/test/test_hash_perf.c:588: keys = rte_zmalloc(NULL, ENTRIES * sizeof(*keys), 0);
908
./app/test/test_link_bonding.c:270: test_params->pkt_eth_hdr = malloc(sizeof(struct ether_hdr) +
909
./app/test/test_link_bonding.c:936: memcpy(&slave_mac_addr, slave_mac, sizeof(struct ether_addr));
910
./app/test/test_link_bonding.c:937: memcpy(&bonded_mac_addr, slave_mac, sizeof(struct ether_addr));
911
./app/test/test_link_bonding.c:4594: pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
912
./app/test/test_link_bonding.c:4595: memcpy(client_mac.addr_bytes, mac_client1, ETHER_ADDR_LEN);
913
./app/test/test_link_bonding.c:4604: pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
914
./app/test/test_link_bonding.c:4605: memcpy(client_mac.addr_bytes, mac_client2, ETHER_ADDR_LEN);
915
./app/test/test_link_bonding.c:4614: pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
916
./app/test/test_link_bonding.c:4615: memcpy(client_mac.addr_bytes, mac_client3, ETHER_ADDR_LEN);
917
./app/test/test_link_bonding.c:4624: pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
918
./app/test/test_link_bonding.c:4625: memcpy(client_mac.addr_bytes, mac_client4, ETHER_ADDR_LEN);
919
./app/test/test_link_bonding.c:4707: pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
920
./app/test/test_link_bonding.c:4708: memcpy(client_mac.addr_bytes, mac_client1, ETHER_ADDR_LEN);
921
./app/test/test_link_bonding.c:4718: pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
922
./app/test/test_link_bonding.c:4719: memcpy(client_mac.addr_bytes, mac_client2, ETHER_ADDR_LEN);
923
./app/test/test_link_bonding.c:4729: pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
924
./app/test/test_link_bonding.c:4730: memcpy(client_mac.addr_bytes, mac_client3, ETHER_ADDR_LEN);
925
./app/test/test_link_bonding.c:4740: pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
926
./app/test/test_link_bonding.c:4741: memcpy(client_mac.addr_bytes, mac_client4, ETHER_ADDR_LEN);
927
./app/test/test_link_bonding.c:4833: pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
928
./app/test/test_link_bonding.c:4834: memcpy(client_mac.addr_bytes, mac_client1, ETHER_ADDR_LEN);
929
./app/test/test_reorder.c:102: b = rte_malloc(NULL, size, 0);
930
./app/test/test_reorder.c:109: b = rte_malloc(NULL, size, 0);
931
./app/test/test_timer.c:319: timers = rte_malloc(NULL, sizeof(*timers) * NB_STRESS2_TIMERS, 0);
932
./app/test/test_lpm6.c:1612: memcpy(ip, large_route_table[i].ip, 16);
933
./app/test/test_lpm6.c:1620: memcpy(ip, large_ips_table[i].ip, 16);
934
./app/test/test_lpm6.c:1862: memcpy(ip_batch[i], large_ips_table[i].ip, 16);
935
./app/test/test_table.h:88: m = rte_pktmbuf_alloc(pool); \
936
./app/test/test_memcpy.c:91:test_single_memcpy(unsigned int off_src, unsigned int off_dst, size_t size)
937
./app/test/test_memcpy.c:105: ret = rte_memcpy(dest + off_dst, src + off_src, size);
938
./app/test/test_memcpy.c:107: printf("rte_memcpy() returned %p, not %p\n",
939
./app/test/test_memcpy.c:114: printf("rte_memcpy() failed for %u bytes (offsets=%u,%u): "
940
./app/test/test_memcpy.c:124: printf("rte_memcpy() failed for %u bytes (offsets=%u,%u): "
941
./app/test/test_memcpy.c:134: printf("rte_memcpy() failed for %u bytes (offsets=%u,%u): "
942
./app/test/test_memcpy.c:156: ret = test_single_memcpy(off_src, off_dst,
943
./app/test/test_memcpy.c:167:test_memcpy(void)
944
./app/test/commands.c:442: commands = malloc(commands_len);
945
./app/test/commands.c:448: ptr += sprintf(ptr, "%s#", t->command);
946
./app/test/test_kni.c:188: pkts_burst[nb_rx] = rte_pktmbuf_alloc(mp);
947
./app/test/test_kni.c:396: kni = rte_kni_alloc(mp, &conf, &ops);
948
./app/test/test_kni.c:449: kni = rte_kni_alloc(mp, &conf, &ops);
949
./app/test/test_kni.c:546: kni = rte_kni_alloc(NULL, &conf, &ops);
950
./app/test/test_kni.c:555: kni = rte_kni_alloc(mp, NULL, NULL);
951
./app/test/test_kni.c:575: kni = rte_kni_alloc(mp, &conf, &ops);
952
./app/test/packet_burst_generator.c:64: rte_memcpy(seg_buf, buf, (size_t) copy_len);
953
./app/test/packet_burst_generator.c:70: rte_memcpy(seg_buf, buf, (size_t) len);
954
./app/test/packet_burst_generator.c:77: rte_memcpy(rte_pktmbuf_mtod_offset(pkt, char *, offset), buf,
955
./app/test/packet_burst_generator.c:147: rte_memcpy(ip_hdr->src_addr, src_addr, sizeof(ip_hdr->src_addr));
956
./app/test/packet_burst_generator.c:148: rte_memcpy(ip_hdr->dst_addr, dst_addr, sizeof(ip_hdr->dst_addr));
957
./app/test/packet_burst_generator.c:223: pkt = rte_pktmbuf_alloc(mp);
958
./app/test/packet_burst_generator.c:234: pkt_seg->next = rte_pktmbuf_alloc(mp);
959
./app/test/test_hash.c:1001: memcpy(¶ms, &ut_params, sizeof(params));
960
./app/test/test_hash.c:1011: memcpy(¶ms, &ut_params, sizeof(params));
961
./app/test/test_hash.c:1021: memcpy(¶ms, &ut_params, sizeof(params));
962
./app/test/test_hash.c:1031: memcpy(¶ms, &ut_params, sizeof(params));
963
./app/test/test_hash.c:1058: memcpy(¶ms, &ut_params, sizeof(params));
964
./app/test/test_hash.c:1072: memcpy(¶ms, &ut_params, sizeof(params));
965
./app/test/test_hash.c:1088: memcpy(¶ms, &ut_params, sizeof(params));
966
./app/test/test_table_acl.c:455: memcpy(acl_params.field_format, ipv4_defs, sizeof(ipv4_defs));
967
./app/test/test_table_acl.c:681: mbuf = rte_pktmbuf_alloc(pool);
968
./app/test/test_table_acl.c:696: memcpy(rte_pktmbuf_mtod(mbuf, char *), &five_tuple,
969
./app/test/test_timer_perf.c:73: tms = rte_malloc(NULL, sizeof(*tms) * MAX_ITERATIONS, 0);
970
./app/test/test_mbuf.c:152: m = rte_pktmbuf_alloc(pktmbuf_pool);
971
./app/test/test_mbuf.c:195: m = rte_pktmbuf_alloc(pktmbuf_pool);
972
./app/test/test_mbuf.c:339: m = rte_pktmbuf_alloc(pktmbuf_pool);
973
./app/test/test_mbuf.c:367: m->next = rte_pktmbuf_alloc(pktmbuf_pool);
974
./app/test/test_mbuf.c:443: m = rte_pktmbuf_alloc(pktmbuf_pool);
975
./app/test/test_mbuf.c:454: clone = rte_pktmbuf_alloc(pktmbuf_pool2);
976
./app/test/test_mbuf.c:483: clone2 = rte_pktmbuf_alloc(pktmbuf_pool2);
977
./app/test/test_mbuf.c:552: m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
978
./app/test/test_mbuf.c:554: printf("rte_pktmbuf_alloc() failed (%u)\n", i);
979
./app/test/test_mbuf.c:559: extra = rte_pktmbuf_alloc(pktmbuf_pool);
980
./app/test/test_mbuf.c:593: m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
981
./app/test/test_mbuf.c:595: printf("rte_pktmbuf_alloc() failed (%u)\n", i);
982
./app/test/test_mbuf.c:613: m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
983
./app/test/test_mbuf.c:615: printf("rte_pktmbuf_alloc() failed (%u)\n", i);
984
./app/test/test_mbuf.c:646: m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
985
./app/test/test_mbuf.c:648: printf("rte_pktmbuf_alloc() failed (%u)\n", i);
986
./app/test/test_mbuf.c:717: i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL;
987
./app/test/test_mbuf.c:877: buf = rte_pktmbuf_alloc(pktmbuf_pool);
988
./app/test/test_cryptodev.c:89: struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
989
./app/test/test_cryptodev.c:101: rte_memcpy(dst, string, t_len);
990
./app/test/test_cryptodev.c:793: ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
991
./app/test/test_cryptodev.c:813: rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
992
./app/test/test_cryptodev.c:863: ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
993
./app/test/test_cryptodev.c:903: rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
994
./app/test/test_cryptodev.c:955: rte_memcpy(ut_params->digest,
995
./app/test/test_cryptodev.c:984: ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
996
./app/test/test_cryptodev.c:1005: rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
997
./app/test/test_cryptodev.c:1092: ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
998
./app/test/test_cryptodev.c:1113: rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
999
./app/test/test_cryptodev.c:1165: rte_memcpy(ut_params->digest,
1000
./app/test/test_cryptodev.c:1194: ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
1001
./app/test/test_cryptodev.c:1215: rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
1002
./app/test/test_cryptodev.c:1310: ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
1003
./app/test/test_cryptodev.c:1331: rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
1004
./app/test/test_cryptodev.c:1438: rte_memcpy(ut_params->digest,
1005
./app/test/test_cryptodev.c:1443: ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
1006
./app/test/test_cryptodev.c:1465: rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
1007
./app/test/test_cryptodev.c:1548: ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
1008
./app/test/test_cryptodev.c:1565: rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
1009
./app/test/test_cryptodev.c:1612: rte_memcpy(ut_params->digest,
1010
./app/test/test_cryptodev.c:1641: ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
1011
./app/test/test_cryptodev.c:1657: rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
1012
./app/test/test_cryptodev.c:1763: sessions = rte_malloc(NULL, (sizeof(struct rte_cryptodev_session *) *
1013
./app/test/test_cryptodev.c:1802: struct rte_mbuf *dst_m = rte_pktmbuf_alloc(ts_params->mbuf_pool);
1014
./app/test/test_cryptodev.c:1824: rte_memcpy(ut_params->digest,
1015
./app/test/test_cryptodev.c:1829: ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
1016
./app/test/test_cryptodev.c:1851: rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
1017
./app/proc_info/main.c:256: xstats = malloc(sizeof(xstats[0]) * len);
1018
./app/test-pmd/icmpecho.c:248: sprintf(buf, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
1019
./app/test-pmd/mempool_anon.c:129: if ((pa = calloc(pg_num, sizeof (*pa))) != NULL &&
1020
./app/test-pmd/flowgen.c:93:tx_mbuf_alloc(struct rte_mempool *mp)
1021
./app/test-pmd/flowgen.c:97: m = __rte_mbuf_raw_alloc(mp);
1022
./app/test-pmd/flowgen.c:170: pkt = tx_mbuf_alloc(mbp);
1023
./app/test-pmd/csumonly.c:499: memcpy(dst, src, len);
1024
./app/test-pmd/csumonly.c:547: memcpy(seglen, tx_pkt_seg_lengths, nb_seg * sizeof(seglen[0]));
1025
./app/test-pmd/csumonly.c:566: p = rte_pktmbuf_alloc(mp);
1026
./app/test-pmd/txonly.c:90:tx_mbuf_alloc(struct rte_mempool *mp)
1027
./app/test-pmd/txonly.c:94: m = __rte_mbuf_raw_alloc(mp);
1028
./app/test-pmd/txonly.c:115: rte_memcpy(seg_buf, buf, (size_t) copy_len);
1029
./app/test-pmd/txonly.c:121: rte_memcpy(seg_buf, buf, (size_t) len);
1030
./app/test-pmd/txonly.c:128: rte_memcpy(rte_pktmbuf_mtod_offset(pkt, char *, offset),
1031
./app/test-pmd/txonly.c:228: pkt = tx_mbuf_alloc(mbp);
1032
./app/test-pmd/txonly.c:243: pkt_seg->next = tx_mbuf_alloc(mbp);
1033
./app/test-pmd/testpmd.c:485: fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1034
./app/test-pmd/testpmd.c:489: rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1035
./app/test-pmd/testpmd.c:493: fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1036
./app/test-pmd/testpmd.c:497: rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1037
./app/test-pmd/testpmd.c:663: fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1038
./app/test-pmd/testpmd.c:666: rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
1039
./app/test-pmd/testpmd.c:670: fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
1040
./app/test-pmd/testpmd.c:673: rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
1041
./app/test-pmd/testpmd.c:1951: memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1042
./app/test-pmd/testpmd.c:1973: ports = rte_zmalloc("testpmd: ports",
1043
./app/test-pmd/testpmd.c:1978: "rte_zmalloc(%d struct rte_port) failed\n",
1044
./app/test-pmd/config.c:245: xstats = malloc(sizeof(xstats[0]) * len);
1045
./app/test-pmd/config.c:2160: (void)rte_memcpy(&flex_conf->flex_mask[idx],
1046
./app/test-pmd/config.c:2190: (void)rte_memcpy(&flex_conf->flex_set[idx],
1047
./app/test-pmd/cmdline.c:6117: (void)rte_memcpy(&filter.mac_addr, &res->address, ETHER_ADDR_LEN);
1048
./app/test-pmd/cmdline.c:6650: memcpy(&(tunnel_filter_conf.ip_addr.ipv6_addr),
1049
./app/test-pmd/cmdline.c:7925: (void)rte_memcpy(&filter.mac_addr, &res->mac_addr,
1050
./app/test-pmd/cmdline.c:8106: (void)rte_memcpy(&(ip), \
1051
./app/test-pmd/cmdline.c:8226: (void)rte_memcpy(&entry.input.flow.mac_vlan_flow.mac_addr,
1052
./app/test-pmd/cmdline.c:8231: (void)rte_memcpy(&entry.input.flow.tunnel_flow.mac_addr,
1053
./app/test-pmd/cmdline.c:8240: (void)rte_memcpy(entry.input.flow_ext.flexbytes,
1054
./app/test-pmd/cmdline.c:8889: (void)rte_memcpy(&port->dev_conf.fdir_conf.flex_conf.flex_mask[0],
1055
./lib/librte_cryptodev/rte_cryptodev.c:162:rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
1056
./lib/librte_cryptodev/rte_cryptodev.c:228: int retval = rte_cryptodev_data_alloc(dev_id, &cryptodev_data,
1057
./lib/librte_cryptodev/rte_cryptodev.c:872: user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1058
./lib/librte_mbuf_offload/rte_mbuf_offload.h:203:__rte_pktmbuf_offload_raw_alloc(struct rte_mempool *mp)
1059
./lib/librte_mbuf_offload/rte_mbuf_offload.h:225:rte_pktmbuf_offload_alloc(struct rte_mempool *mpool,
1060
./lib/librte_mbuf_offload/rte_mbuf_offload.h:228: struct rte_mbuf_offload *ol = __rte_pktmbuf_offload_raw_alloc(mpool);
1061
./lib/librte_port/rte_port_frag.c:169: memcpy(pkts, &p->frags[p->pos_frags], n_pkts * sizeof(void *));
1062
./lib/librte_port/rte_port_frag.c:176: memcpy(pkts, &p->frags[p->pos_frags], p->n_frags * sizeof(void *));
1063
./lib/librte_port/rte_port_frag.c:237: memcpy(dst, src, p->metadata_size);
1064
./lib/librte_port/rte_port_frag.c:246: memcpy(&pkts[n_pkts_out], p->frags,
1065
./lib/librte_port/rte_port_frag.c:254: memcpy(&pkts[n_pkts_out], p->frags,
1066
./lib/librte_port/rte_port_frag.c:282: memcpy(stats, &p->stats, sizeof(p->stats));
1067
./lib/librte_port/rte_port_ethdev.c:126: memcpy(stats, &p->stats, sizeof(p->stats));
1068
./lib/librte_port/rte_port_ethdev.c:306: memcpy(stats, &p->stats, sizeof(p->stats));
1069
./lib/librte_port/rte_port_ethdev.c:515: memcpy(stats, &p->stats, sizeof(p->stats));
1070
./lib/librte_port/rte_port_ras.c:332: memcpy(stats, &p->stats, sizeof(p->stats));
1071
./lib/librte_port/rte_port_ring.c:154: memcpy(stats, &p->stats, sizeof(p->stats));
1072
./lib/librte_port/rte_port_ring.c:422: memcpy(stats, &p->stats, sizeof(p->stats));
1073
./lib/librte_port/rte_port_ring.c:743: memcpy(stats, &p->stats, sizeof(p->stats));
1074
./lib/librte_port/rte_port_source_sink.c:131: memcpy(stats, &p->stats, sizeof(p->stats));
1075
./lib/librte_port/rte_port_source_sink.c:229: memcpy(stats, &p->stats, sizeof(p->stats));
1076
./lib/librte_port/rte_port_sched.c:124: memcpy(stats, &p->stats, sizeof(p->stats));
1077
./lib/librte_port/rte_port_sched.c:298: memcpy(stats, &p->stats, sizeof(p->stats));
1078
./lib/librte_vhost/virtio-net.c:310: virtqueue = rte_malloc(NULL,
1079
./lib/librte_vhost/virtio-net.c:357: new_ll_dev = rte_zmalloc(NULL, sizeof(struct virtio_net_config_ll), 0);
1080
./lib/librte_vhost/virtio-net.c:590: memcpy(new_vq, old_vq, sizeof(*new_vq));
1081
./lib/librte_vhost/virtio-net.c:592: memcpy(new_ll_dev, old_ll_dev, sizeof(*new_ll_dev));
1082
./lib/librte_vhost/vhost_user/vhost-net-user.c:182: memcpy(fds, CMSG_DATA(cmsg), fdsize);
1083
./lib/librte_vhost/vhost_user/vhost-net-user.c:245: memcpy(CMSG_DATA(cmsg), fds, fdsize);
1084
./lib/librte_vhost/vhost_user/vhost-net-user.c:298: ctx = calloc(1, sizeof(*ctx));
1085
./lib/librte_vhost/vhost_user/vhost-net-user.c:463: vserver = calloc(sizeof(struct vhost_server), 1);
1086
./lib/librte_vhost/vhost_user/virtio-net-user.c:117: dev->mem = calloc(1,
1087
./lib/librte_vhost/vhost_cuse/virtio-net-cdev.c:177: memcpy(&procmap.prot, in[2], PROT_SZ);
1088
./lib/librte_vhost/vhost_cuse/virtio-net-cdev.c:178: memcpy(&procmap.fname, in[7], PATH_MAX);
1089
./lib/librte_vhost/vhost_cuse/virtio-net-cdev.c:288: dev->mem = calloc(1, sizeof(struct virtio_memory) +
1090
./lib/librte_vhost/vhost_rxtx.c:166: rte_memcpy((void *)(uintptr_t)(buff_addr + vb_offset),
1091
./lib/librte_vhost/vhost_rxtx.c:220: rte_memcpy((void *)(uintptr_t)buff_hdr_addr,
1092
./lib/librte_vhost/vhost_rxtx.c:293: rte_memcpy((void *)(uintptr_t)vb_hdr_addr,
1093
./lib/librte_vhost/vhost_rxtx.c:334: rte_memcpy((void *)(uintptr_t)(vb_addr + vb_offset),
1094
./lib/librte_vhost/vhost_rxtx.c:658: m = rte_pktmbuf_alloc(mbuf_pool);
1095
./lib/librte_vhost/vhost_rxtx.c:674: rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *, seg_offset),
1096
./lib/librte_vhost/vhost_rxtx.c:692: cur = rte_pktmbuf_alloc(mbuf_pool);
1097
./lib/librte_vhost/vhost_rxtx.c:724: cur = rte_pktmbuf_alloc(mbuf_pool);
1098
./lib/librte_pipeline/rte_pipeline.c:380: memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops));
1099
./lib/librte_pipeline/rte_pipeline.c:451: memcpy(table->default_entry, default_entry, table->entry_size);
1100
./lib/librte_pipeline/rte_pipeline.c:481: memcpy(entry, table->default_entry, table->entry_size);
1101
./lib/librte_pipeline/rte_pipeline.c:856: memcpy(&port->ops, params->ops, sizeof(struct rte_port_in_ops));
1102
./lib/librte_pipeline/rte_pipeline.c:906: memcpy(&port->ops, params->ops, sizeof(struct rte_port_out_ops));
1103
./lib/librte_mempool/rte_dom0_mempool.c:109: pa = calloc(pg_num, sizeof (*pa));
1104
./lib/librte_mempool/rte_mempool.c:536: te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
1105
./lib/librte_mempool/rte_mempool.c:620: memcpy(mp->elt_pa, paddr, sizeof (mp->elt_pa[0]) * pg_num);
1106
./lib/librte_ether/rte_ethdev.c:141:rte_eth_dev_data_alloc(void)
1107
./lib/librte_ether/rte_ethdev.c:199: rte_eth_dev_data_alloc();
1108
./lib/librte_ether/rte_ethdev.c:266: eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
1109
./lib/librte_ether/rte_ethdev.c:423: strcpy(name, tmp);
1110
./lib/librte_ether/rte_ethdev.c:677: dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
1111
./lib/librte_ether/rte_ethdev.c:837: dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1112
./lib/librte_ether/rte_ethdev.c:935: memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
1113
./lib/librte_ether/rte_ethdev.c:2481: user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1114
./lib/librte_ether/rte_ethdev.c:2853: struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1115
./lib/librte_ether/rte_ethdev.c:2894: struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1116
./lib/librte_ether/rte_ether.h:241: rte_memcpy(addr, p, ETHER_ADDR_LEN);
1117
./lib/librte_ivshmem/rte_ivshmem.c:273: e_local = malloc(sizeof(config->metadata->entry));
1118
./lib/librte_ivshmem/rte_ivshmem.c:276: ms_local = malloc(sizeof(config->memseg_cache));
1119
./lib/librte_ivshmem/rte_ivshmem.c:282: memcpy(e_local, config->metadata->entry, sizeof(config->metadata->entry));
1120
./lib/librte_ivshmem/rte_ivshmem.c:283: memcpy(ms_local, config->memseg_cache, sizeof(config->memseg_cache));
1121
./lib/librte_ivshmem/rte_ivshmem.c:371: memcpy(&tmp, &pages[biggest_idx], sizeof(struct rte_memseg));
1122
./lib/librte_ivshmem/rte_ivshmem.c:378: memcpy(&pages[j+1], &pages[j], sizeof(struct rte_memseg));
1123
./lib/librte_ivshmem/rte_ivshmem.c:385: memcpy(&pages[i], &tmp, sizeof(struct rte_memseg));
1124
./lib/librte_ivshmem/rte_ivshmem.c:454: memcpy(config->metadata->entry, e_local, sizeof(config->metadata->entry));
1125
./lib/librte_ivshmem/rte_ivshmem.c:455: memcpy(config->memseg_cache, ms_local, sizeof(config->memseg_cache));
1126
./lib/librte_ivshmem/rte_ivshmem.c:491: memcpy(&entry->mz, mz, sizeof(struct rte_memzone));
1127
./lib/librte_ring/rte_ring.c:177: te = rte_zmalloc("RING_TAILQ_ENTRY", sizeof(*te), 0);
1128
./lib/librte_eal/bsdapp/eal/eal_pci.c:174: *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
1129
./lib/librte_eal/bsdapp/eal/eal_pci.c:182: memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr));
1130
./lib/librte_eal/bsdapp/eal/eal_pci.c:207: maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
1131
./lib/librte_eal/bsdapp/eal/eal_pci.c:236: strcpy(maps[map_idx].path, devname);
1132
./lib/librte_eal/bsdapp/eal/eal_pci.c:253: dev = malloc(sizeof(*dev));
1133
./lib/librte_eal/bsdapp/eal/eal_pci.c:430: memcpy(buf, &pi.pi_data, len);
1134
./lib/librte_eal/bsdapp/eal/eal_pci.c:462: memcpy(&pi.pi_data, buf, len);
1135
./lib/librte_eal/bsdapp/eal/eal_memory.c:75: addr = malloc(internal_config.memory);
1136
./lib/librte_eal/bsdapp/eal/eal_hugepage_info.c:126: memcpy(tmp_hpi, hpi, sizeof(struct hugepage_info));
1137
./lib/librte_eal/bsdapp/eal/eal.c:195: memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
1138
./lib/librte_eal/bsdapp/contigmem/contigmem.c:143: contigmalloc(contigmem_buffer_size, M_CONTIGMEM, M_ZERO, 0,
1139
./lib/librte_eal/linuxapp/kni/kni_net.c:74: memcpy(dev->dev_addr, kni->lad_dev->dev_addr, ETH_ALEN);
1140
./lib/librte_eal/linuxapp/kni/kni_net.c:173: memcpy(skb_put(skb, len), data_kva, len);
1141
./lib/librte_eal/linuxapp/kni/kni_net.c:254: memcpy(alloc_data_kva, data_kva, len);
1142
./lib/librte_eal/linuxapp/kni/kni_net.c:331: memcpy(skb_put(skb, len), data_kva, len);
1143
./lib/librte_eal/linuxapp/kni/kni_net.c:346: memcpy(skb_put(skb, len), data_kva, len);
1144
./lib/librte_eal/linuxapp/kni/kni_net.c:429: memcpy(data_kva, skb->data, len);
1145
./lib/librte_eal/linuxapp/kni/kni_net.c:547: memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request));
1146
./lib/librte_eal/linuxapp/kni/kni_net.c:569: memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request));
1147
./lib/librte_eal/linuxapp/kni/kni_net.c:597: memcpy(eth->h_source, saddr ? saddr : dev->dev_addr, dev->addr_len);
1148
./lib/librte_eal/linuxapp/kni/kni_net.c:598: memcpy(eth->h_dest, daddr ? daddr : dev->dev_addr, dev->addr_len);
1149
./lib/librte_eal/linuxapp/kni/kni_net.c:615: memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
1150
./lib/librte_eal/linuxapp/kni/kni_net.c:616: memcpy(eth->h_dest, dev->dev_addr, dev->addr_len);
1151
./lib/librte_eal/linuxapp/kni/kni_net.c:634: memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1152
./lib/librte_eal/linuxapp/kni/kni_vhost.c:669: if (!(q = (struct kni_vhost_queue *)sk_alloc(
1153
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:488: memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN);
1154
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:982: memcpy(&adapter->mac_table[0].addr, hw->mac.perm_addr,
1155
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:1523: //memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
1156
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:2275: memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1157
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:2276: memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
1158
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:2377: hw = vmalloc(sizeof(struct ixgbe_hw));
1159
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:2655: memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
1160
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:2657: memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
1161
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:2671: memcpy(&adapter->mac_table[0].addr, hw->mac.perm_addr,
1162
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h:1417:#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
1163
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h:1675:#define vmalloc_node(a,b) vmalloc(a)
1164
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h:1976: memcpy(skb->data, from, len)
1165
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h:1978: memcpy(skb->data + offset, from, len)
1166
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h:2826: void *addr = vmalloc(size);
1167
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:829: eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
1168
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:840: memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
1169
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:866: eeprom_buff = kmalloc(max_len, GFP_KERNEL);
1170
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:898: memcpy(ptr, bytes, eeprom->len);
1171
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:991: tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring));
1172
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1018: rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring));
1173
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1181: memcpy(data, *ixgbe_gstrings_test,
1174
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1186: memcpy(p, ixgbe_gstrings_net_stats[i].stat_string,
1175
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1191: memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1176
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1196: sprintf(p, "tx_queue_%u_packets", i);
1177
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1198: sprintf(p, "tx_queue_%u_bytes", i);
1178
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1202: sprintf(p, "rx_queue_%u_packets", i);
1179
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1204: sprintf(p, "rx_queue_%u_bytes", i);
1180
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1209: sprintf(p, "tx_pb_%u_pxon", i);
1181
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1211: sprintf(p, "tx_pb_%u_pxoff", i);
1182
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1215: sprintf(p, "rx_pb_%u_pxon", i);
1183
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1217: sprintf(p, "rx_pb_%u_pxoff", i);
1184
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1222: sprintf(p, "VF %d Rx Packets", i);
1185
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1224: sprintf(p, "VF %d Rx Bytes", i);
1186
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1226: sprintf(p, "VF %d Tx Packets", i);
1187
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1228: sprintf(p, "VF %d Tx Bytes", i);
1188
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1230: sprintf(p, "VF %d MC Packets", i);
1189
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:2640: memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
1190
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c:648: memcpy(dest, src, len);
1191
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c:686: buf = kmalloc(len, gfp);
1192
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c:688: memcpy(buf, s, len);
1193
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c:697: void *ret = kmalloc(size, flags);
1194
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c:760: adapter->config_space = kmalloc(size, GFP_KERNEL);
1195
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c:823: memcpy(p, src, len);
1196
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:281: regbuf = kmalloc(reglen, GFP_USER);
1197
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:398: data = kmalloc(eeprom.len, GFP_USER);
1198
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:443: data = kmalloc(eeprom.len, GFP_USER);
1199
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:667: data = kmalloc(test.len * sizeof(u64), GFP_USER);
1200
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:714: data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
1201
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:760: data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER);
1202
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_hwmon.c:54: return sprintf(buf, "loc%u\n",
1203
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_hwmon.c:74: return sprintf(buf, "%u\n", value);
1204
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_hwmon.c:88: return sprintf(buf, "%u\n", value);
1205
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_hwmon.c:102: return sprintf(buf, "%u\n", value);
1206
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_hwmon.c:223: igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
1207
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h:1523:#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
1208
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h:1801:#define vmalloc_node(a,b) vmalloc(a)
1209
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h:2143: memcpy(skb->data, from, len)
1210
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h:2145: memcpy(skb->data + offset, from, len)
1211
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h:3224: void *addr = vmalloc(size);
1212
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:684: sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
1213
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:687: sprintf(q_vector->name, "%s-tx-%u", netdev->name,
1214
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:690: sprintf(q_vector->name, "%s-rx-%u", netdev->name,
1215
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:693: sprintf(q_vector->name, "%s-unused", netdev->name);
1216
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:938: adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
1217
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:1023: adapter->msix_entries = kcalloc(numvecs,
1218
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:2773: memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1219
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:2775: memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1220
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:2786: memcpy(&adapter->mac_table[0].addr, hw->mac.addr, netdev->addr_len);
1221
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:4220: memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1222
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:4221: memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
1223
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:4274: memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
1224
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:4276: memcpy(mta_list + (i++ * ETH_ALEN), ha->dmi_addr, ETH_ALEN);
1225
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:4287: memcpy(mta_list + (i++ * ETH_ALEN),
1226
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:4290: memcpy(mta_list + (i++ * ETH_ALEN),
1227
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:6641: memcpy(addr, vf_mac, 6);
1228
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:7208: memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
1229
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:7286: memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1230
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:7678: memcpy(sh_info->frags + sh_info->nr_frags,
1231
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:9301: memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN);
1232
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:9332: memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
1233
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:9467: memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
1234
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:10001: memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1235
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:10003: memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1236
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:10014: memcpy(&adapter->mac_table[0].addr, hw->mac.addr, netdev->addr_len);
1237
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c:213: memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1238
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:731: eeprom_buff = kmalloc(sizeof(u16) *
1239
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:753: memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
1240
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:780: eeprom_buff = kmalloc(max_len, GFP_KERNEL);
1241
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:804: memcpy(ptr, bytes, eeprom->len);
1242
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:892: temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
1243
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:894: temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
1244
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:910: memcpy(&temp_ring[i], adapter->tx_ring[i],
1245
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:927: memcpy(adapter->tx_ring[i], &temp_ring[i],
1246
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:936: memcpy(&temp_ring[i], adapter->rx_ring[i],
1247
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:954: memcpy(adapter->rx_ring[i], &temp_ring[i],
1248
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2113: memcpy(data, *igb_gstrings_test,
1249
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2118: memcpy(p, igb_gstrings_stats[i].stat_string,
1250
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2123: memcpy(p, igb_gstrings_net_stats[i].stat_string,
1251
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2128: sprintf(p, "tx_queue_%u_packets", i);
1252
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2130: sprintf(p, "tx_queue_%u_bytes", i);
1253
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2132: sprintf(p, "tx_queue_%u_restart", i);
1254
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2136: sprintf(p, "rx_queue_%u_packets", i);
1255
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2138: sprintf(p, "rx_queue_%u_bytes", i);
1256
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2140: sprintf(p, "rx_queue_%u_drops", i);
1257
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2142: sprintf(p, "rx_queue_%u_csum_err", i);
1258
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2144: sprintf(p, "rx_queue_%u_alloc_failed", i);
1259
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2146: sprintf(p, "rx_queue_%u_ipv4_packets", i);
1260
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2148: sprintf(p, "rx_queue_%u_ipv4e_packets", i);
1261
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2150: sprintf(p, "rx_queue_%u_ipv6_packets", i);
1262
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2152: sprintf(p, "rx_queue_%u_ipv6e_packets", i);
1263
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2154: sprintf(p, "rx_queue_%u_tcp_packets", i);
1264
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2156: sprintf(p, "rx_queue_%u_udp_packets", i);
1265
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2158: sprintf(p, "rx_queue_%u_sctp_packets", i);
1266
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:2160: sprintf(p, "rx_queue_%u_nfs_packets", i);
1267
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.c:598: memcpy(dest, src, len);
1268
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.c:672: buf = kmalloc(len, gfp);
1269
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.c:674: memcpy(buf, s, len);
1270
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.c:683: void *ret = kmalloc(size, flags);
1271
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.c:747: adapter->config_space = kmalloc(size, GFP_KERNEL);
1272
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.c:811: memcpy(p, src, len);
1273
./lib/librte_eal/linuxapp/kni/kni_misc.c:114: knet = kmalloc(sizeof(struct kni_net), GFP_KERNEL);
1274
./lib/librte_eal/linuxapp/eal/eal_pci.c:265: dev = malloc(sizeof(*dev));
1275
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:156: memcpy(&ms[i], &ms[i+1], sizeof(struct ivshmem_segment));
1276
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:362: memcpy(&ivshmem_config->segment[idx].entry, entry,
1277
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:617: memcpy(&ms_tbl[i], seg, sizeof(struct ivshmem_segment));
1278
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:651: memcpy(&ms_tbl[i].entry.mz, &mz, sizeof(struct rte_memzone));
1279
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:726: memcpy(&mcfg->memseg[i], &ms,
1280
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:781: memcpy(&mcfg->memzone[idx], &seg->entry.mz,
1281
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:815: te = rte_zmalloc("RING_TAILQ_ENTRY", sizeof(*te), 0);
1282
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:286: *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
1283
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:294: memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr));
1284
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:323: maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
1285
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:357: strcpy(maps[map_idx].path, devname);
1286
./lib/librte_eal/linuxapp/eal/eal_memory.c:745: memcpy(&tmp, &hugepg_tbl[compare_idx],
1287
./lib/librte_eal/linuxapp/eal/eal_memory.c:747: memcpy(&hugepg_tbl[compare_idx], &hugepg_tbl[i],
1288
./lib/librte_eal/linuxapp/eal/eal_memory.c:749: memcpy(&hugepg_tbl[i], &tmp, sizeof(struct hugepage_file));
1289
./lib/librte_eal/linuxapp/eal/eal_memory.c:789: memcpy(&dst[dst_pos], &src[src_pos], sizeof(struct hugepage_file));
1290
./lib/librte_eal/linuxapp/eal/eal_memory.c:1156: tmp_hp = malloc(nr_hugepages * sizeof(struct hugepage_file));
1291
./lib/librte_eal/linuxapp/eal/eal_log.c:76: memcpy(copybuf, buf, size);
1292
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:307: memcpy(&fd_ptr[RTE_INTR_VEC_RXTX_OFFSET], intr_handle->efds,
1293
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:441: callback = rte_zmalloc("interrupt callback list",
1294
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:467: if ((src = rte_zmalloc("interrupt source list",
1295
./lib/librte_eal/linuxapp/eal/eal.c:209: memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
1296
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:76: memcpy((chdr).__cmsg_data, &(fd), sizeof(fd));\
1297
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:79: memcpy(&(fd), (chdr).__cmsg_data, sizeof(fd))
1298
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:712: vfio_res = rte_zmalloc("VFIO_RES", sizeof(*vfio_res), 0);
1299
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:719: memcpy(&vfio_res->pci_addr, &dev->addr, sizeof(vfio_res->pci_addr));
1300
./lib/librte_eal/linuxapp/eal/eal_alarm.c:156: new_alarm = rte_zmalloc(NULL, sizeof(*new_alarm), 0);
1301
./lib/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c:405: rsv_mm_info = vmalloc(sizeof(struct memblock_info) * num_block);
1302
./lib/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c:505: memcpy(mm_data->name, meminfo->name, DOM0_NAME_MAX);
1303
./lib/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c:708: mm_data = vmalloc(sizeof(struct dom0_mm_data));
1304
./lib/librte_eal/common/malloc_heap.c:155:malloc_heap_alloc(struct malloc_heap *heap,
1305
./lib/librte_eal/common/malloc_heap.c:168: elem = malloc_elem_alloc(elem, size, align, bound);
1306
./lib/librte_eal/common/malloc_elem.c:206:malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
1307
./lib/librte_eal/common/malloc_heap.h:56:malloc_heap_alloc(struct malloc_heap *heap, const char *type, size_t size,
1308
./lib/librte_eal/common/eal_common_log.c:147: memcpy(hist_buf->buf, buf, size);
1309
./lib/librte_eal/common/eal_common_memzone.c:202: void *mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[socket], NULL,
1310
./lib/librte_eal/common/eal_common_memzone.c:211: mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[i],
1311
./lib/librte_eal/common/malloc_elem.h:162:malloc_elem_alloc(struct malloc_elem *elem, size_t size,
1312
./lib/librte_eal/common/eal_common_options.c:163: solib = malloc(sizeof(*solib));
1313
./lib/librte_eal/common/eal_common_options.c:165: RTE_LOG(ERR, EAL, "malloc(solib) failed\n");
1314
./lib/librte_eal/common/eal_common_options.c:665: rte_memcpy(&lcore_config[idx].cpuset, &cpuset,
1315
./lib/librte_eal/common/eal_common_devargs.c:89: devargs = malloc(sizeof(*devargs));
1316
./lib/librte_eal/common/include/generic/rte_memcpy.h:40: * Functions for vectorised implementation of memcpy().
1317
./lib/librte_eal/common/include/generic/rte_memcpy.h:133:rte_memcpy(void *dst, const void *src, size_t n);
1318
./lib/librte_eal/common/include/generic/rte_memcpy.h:138: * memcpy() function used by rte_memcpy macro
1319
./lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h:97:#define rte_memcpy(dst, src, n) \
1320
./lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h:99: memcpy((dst), (src), (n)) : \
1321
./lib/librte_eal/common/include/arch/x86/rte_memcpy.h:40: * Functions for SSE/AVX/AVX2 implementation of memcpy().
1322
./lib/librte_eal/common/include/arch/x86/rte_memcpy.h:68:rte_memcpy(void *dst, const void *src, size_t n) __attribute__((always_inline));
1323
./lib/librte_eal/common/include/arch/x86/rte_memcpy.h:196:rte_memcpy(void *dst, const void *src, size_t n)
1324
./lib/librte_eal/common/include/arch/x86/rte_memcpy.h:494:rte_memcpy(void *dst, const void *src, size_t n)
1325
./lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h:48: memcpy(dst, src, 16);
1326
./lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h:54: memcpy(dst, src, 32);
1327
./lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h:60: memcpy(dst, src, 48);
1328
./lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h:66: memcpy(dst, src, 64);
1329
./lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h:72: memcpy(dst, src, 128);
1330
./lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h:78: memcpy(dst, src, 256);
1331
./lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h:81:#define rte_memcpy(d, s, n) memcpy((d), (s), (n))
1332
./lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h:86: return memcpy(dst, src, n);
1333
./lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h:146:#define rte_memcpy(dst, src, n) \
1334
./lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h:148: memcpy((dst), (src), (n)) : \
1335
./lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h:283: memcpy(dst, src, 16);
1336
./lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h:289: memcpy(dst, src, 32);
1337
./lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h:295: memcpy(dst, src, 48);
1338
./lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h:301: memcpy(dst, src, 64);
1339
./lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h:307: memcpy(dst, src, 128);
1340
./lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h:313: memcpy(dst, src, 256);
1341
./lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h:317:rte_memcpy(void *dst, const void *src, size_t n)
1342
./lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h:319: return memcpy(dst, src, n);
1343
./lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h:325: return memcpy(dst, src, n);
1344
./lib/librte_eal/common/include/arch/tile/rte_memcpy.h:48: memcpy(dst, src, 16);
1345
./lib/librte_eal/common/include/arch/tile/rte_memcpy.h:54: memcpy(dst, src, 32);
1346
./lib/librte_eal/common/include/arch/tile/rte_memcpy.h:60: memcpy(dst, src, 48);
1347
./lib/librte_eal/common/include/arch/tile/rte_memcpy.h:66: memcpy(dst, src, 64);
1348
./lib/librte_eal/common/include/arch/tile/rte_memcpy.h:72: memcpy(dst, src, 128);
1349
./lib/librte_eal/common/include/arch/tile/rte_memcpy.h:78: memcpy(dst, src, 256);
1350
./lib/librte_eal/common/include/arch/tile/rte_memcpy.h:81:#define rte_memcpy(d, s, n) memcpy((d), (s), (n))
1351
./lib/librte_eal/common/include/arch/tile/rte_memcpy.h:86: return memcpy(dst, src, n);
1352
./lib/librte_eal/common/include/rte_malloc.h:75: * variable (in the same manner as malloc()).
1353
./lib/librte_eal/common/include/rte_malloc.h:85:rte_malloc(const char *type, size_t size, unsigned align);
1354
./lib/librte_eal/common/include/rte_malloc.h:90: * Equivalent to rte_malloc() except that the memory zone is
1355
./lib/librte_eal/common/include/rte_malloc.h:101: * variable (in the same manner as malloc()).
1356
./lib/librte_eal/common/include/rte_malloc.h:111:rte_zmalloc(const char *type, size_t size, unsigned align);
1357
./lib/librte_eal/common/include/rte_malloc.h:114: * Replacement function for calloc(), using huge-page memory. Memory area is
1358
./lib/librte_eal/common/include/rte_malloc.h:127: * variable (in the same manner as malloc()).
1359
./lib/librte_eal/common/include/rte_malloc.h:137:rte_calloc(const char *type, size_t num, size_t size, unsigned align);
1360
./lib/librte_eal/common/include/rte_malloc.h:150: * variable (in the same manner as malloc()).
1361
./lib/librte_eal/common/include/rte_malloc.h:173: * variable (in the same manner as malloc()).
1362
./lib/librte_eal/common/include/rte_malloc.h:179: * will behave the same as rte_malloc().
1363
./lib/librte_eal/common/include/rte_malloc.h:191: * Equivalent to rte_malloc() except that the memory zone is
1364
./lib/librte_eal/common/include/rte_malloc.h:201: * variable (in the same manner as malloc()).
1365
./lib/librte_eal/common/include/rte_malloc.h:207: * will behave the same as rte_zmalloc().
1366
./lib/librte_eal/common/include/rte_malloc.h:217: * Replacement function for calloc(), using huge-page memory. Memory area is
1367
./lib/librte_eal/common/include/rte_malloc.h:229: * variable (in the same manner as malloc()).
1368
./lib/librte_eal/common/include/rte_malloc.h:235: * will behave the same as rte_calloc().
1369
./lib/librte_eal/common/include/rte_malloc.h:248: * rte_malloc(), rte_zmalloc(), rte_calloc() or rte_realloc(). The behaviour of
1370
./lib/librte_eal/common/include/rte_malloc.h:266: * by a previous call to rte_malloc(), rte_zmalloc(), rte_calloc()
1371
./lib/librte_eal/common/rte_malloc.c:91: ret = malloc_heap_alloc(&mcfg->malloc_heaps[socket], type,
1372
./lib/librte_eal/common/rte_malloc.c:102: ret = malloc_heap_alloc(&mcfg->malloc_heaps[i], type,
1373
./lib/librte_eal/common/rte_malloc.c:115:rte_malloc(const char *type, size_t size, unsigned align)
1374
./lib/librte_eal/common/rte_malloc.c:137:rte_zmalloc(const char *type, size_t size, unsigned align)
1375
./lib/librte_eal/common/rte_malloc.c:155:rte_calloc(const char *type, size_t num, size_t size, unsigned align)
1376
./lib/librte_eal/common/rte_malloc.c:157: return rte_zmalloc(type, num * size, align);
1377
./lib/librte_eal/common/rte_malloc.c:167: return rte_malloc(NULL, size, align);
1378
./lib/librte_eal/common/rte_malloc.c:181: void *new_ptr = rte_malloc(NULL, size, align);
1379
./lib/librte_eal/common/rte_malloc.c:185: rte_memcpy(new_ptr, ptr, old_size < size ? old_size : size);
1380
./lib/librte_eal/common/rte_keepalive.c:95: keepcfg = rte_zmalloc("RTE_EAL_KEEPALIVE",
1381
./lib/librte_sched/rte_sched.c:626: port = rte_zmalloc("qos_params", mem_size, RTE_CACHE_LINE_SIZE);
1382
./lib/librte_sched/rte_sched.c:640: memcpy(port->qsize, params->qsize, sizeof(params->qsize));
1383
./lib/librte_sched/rte_sched.c:1003: memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
1384
./lib/librte_sched/rte_sched.c:1032: memcpy(stats, &qe->stats, sizeof(struct rte_sched_queue_stats));
1385
./lib/librte_ip_frag/rte_ipv6_fragmentation.c:56: rte_memcpy(dst, src, sizeof(*dst));
1386
./lib/librte_ip_frag/rte_ipv6_fragmentation.c:134: out_pkt = rte_pktmbuf_alloc(pool_direct);
1387
./lib/librte_ip_frag/rte_ipv6_fragmentation.c:151: out_seg = rte_pktmbuf_alloc(pool_indirect);
1388
./lib/librte_ip_frag/rte_ipv4_fragmentation.c:57: rte_memcpy(dst, src, sizeof(*dst));
1389
./lib/librte_ip_frag/rte_ipv4_fragmentation.c:136: out_pkt = rte_pktmbuf_alloc(pool_direct);
1390
./lib/librte_ip_frag/rte_ipv4_fragmentation.c:153: out_seg = rte_pktmbuf_alloc(pool_indirect);
1391
./lib/librte_ip_frag/rte_ipv6_reassembly.c:166: rte_memcpy(&key.src_dst[0], ip_hdr->src_addr, 16);
1392
./lib/librte_ip_frag/rte_ipv6_reassembly.c:167: rte_memcpy(&key.src_dst[2], ip_hdr->dst_addr, 16);
1393
./lib/librte_reorder/rte_reorder.c:163: te = rte_zmalloc("REORDER_TAILQ_ENTRY", sizeof(*te), 0);
1394
./lib/librte_lpm/rte_lpm.c:188: te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
1395
./lib/librte_lpm/rte_lpm.c:385:tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
1396
./lib/librte_lpm/rte_lpm.c:500: tbl8_group_index = tbl8_alloc(lpm->tbl8);
1397
./lib/librte_lpm/rte_lpm.c:537: tbl8_group_index = tbl8_alloc(lpm->tbl8);
1398
./lib/librte_lpm/rte_lpm6.c:189: te = rte_zmalloc("LPM6_TAILQ_ENTRY", sizeof(*te), 0);
1399
./lib/librte_lpm/rte_lpm6.c:324: rte_memcpy(lpm->rules_tbl[rule_index].ip, ip, RTE_LPM6_IPV6_ADDR_SIZE);
1400
./lib/librte_lpm/rte_lpm6.c:523: memcpy(masked_ip, ip, RTE_LPM6_IPV6_ADDR_SIZE);
1401
./lib/librte_lpm/rte_lpm6.c:711: memcpy(ip_masked, ip, RTE_LPM6_IPV6_ADDR_SIZE);
1402
./lib/librte_lpm/rte_lpm6.c:759: memcpy(ip_masked, ip, RTE_LPM6_IPV6_ADDR_SIZE);
1403
./lib/librte_lpm/rte_lpm6.c:819: memcpy(ip_masked, ips[i], RTE_LPM6_IPV6_ADDR_SIZE);
1404
./lib/librte_power/rte_power_acpi_cpufreq.c:456: rte_memcpy(freqs, pi->freqs, pi->nb_freqs * sizeof(uint32_t));
1405
./lib/librte_acl/rte_acl.c:247: te = rte_zmalloc("ACL_TAILQ_ENTRY", sizeof(*te), 0);
1406
./lib/librte_acl/rte_acl.c:291: memcpy(pos, rules, num * ctx->rule_sz);
1407
./lib/librte_acl/tb_mem.c:40: * Note, that tb_pool/tb_alloc() are not supposed to return NULL.
1408
./lib/librte_acl/tb_mem.c:44: * in the pool->fail before calling tb_alloc() for the given pool first time.
1409
./lib/librte_acl/tb_mem.c:55: block = calloc(1, size + sizeof(*pool->block));
1410
./lib/librte_acl/tb_mem.c:78:tb_alloc(struct tb_mem_pool *pool, size_t size)
1411
./lib/librte_acl/acl_bld.c:128:acl_build_alloc(struct acl_build_context *context, size_t n, size_t s)
1412
./lib/librte_acl/acl_bld.c:150: p = tb_alloc(&context->pool, alloc_size);
1413
./lib/librte_acl/acl_bld.c:192: node = acl_build_alloc(context, sizeof(struct rte_acl_node), 1);
1414
./lib/librte_acl/acl_bld.c:301: ptrs = acl_build_alloc(context, num_ptrs, sizeof(*ptrs));
1415
./lib/librte_acl/acl_bld.c:305: memcpy(ptrs, node->ptrs,
1416
./lib/librte_acl/acl_bld.c:426: next->ptrs = acl_build_alloc(context,
1417
./lib/librte_acl/acl_bld.c:449: next->mrt = acl_build_alloc(context, 1, sizeof(*next->mrt));
1418
./lib/librte_acl/acl_bld.c:450: memcpy(next->mrt, node->mrt, sizeof(*next->mrt));
1419
./lib/librte_acl/acl_bld.c:1000: end->mrt = acl_build_alloc(context, 1,
1420
./lib/librte_acl/acl_bld.c:1354: config = acl_build_alloc(context, 1, sizeof(*config));
1421
./lib/librte_acl/acl_bld.c:1355: memcpy(config, rule_sets[n]->config, sizeof(*config));
1422
./lib/librte_acl/acl_bld.c:1415: br = tb_alloc(&bcx->pool, sz);
1423
./lib/librte_acl/acl_bld.c:1452: memcpy(ctx->data_indexes + ofs, ctx->trie[i].data_index,
1424
./lib/librte_acl/tb_mem.h:69:void *tb_alloc(struct tb_mem_pool *pool, size_t size);
1425
./lib/librte_acl/acl_gen.c:112: memcpy(dst + node->dfa_gr64[i] * RTE_ACL_DFA_GR64_SIZE,
1426
./lib/librte_acl/acl_gen.c:402: memcpy(match + index->match_index, node->mrt,
1427
./lib/librte_acl/acl_gen.c:557: memcpy(ctx->trie, trie, sizeof(ctx->trie));
1428
./lib/librte_table/rte_table_lpm.c:243: memcpy(nht_entry, entry, lpm->entry_size);
1429
./lib/librte_table/rte_table_lpm.c:314: memcpy(entry, &lpm->nht[nht_pos * lpm->entry_size],
1430
./lib/librte_table/rte_table_lpm.c:367: memcpy(stats, &t->stats, sizeof(t->stats));
1431
./lib/librte_table/rte_table_stub.c:104: memcpy(stats, &t->stats, sizeof(t->stats));
1432
./lib/librte_table/rte_table_hash_lru.c:266: memcpy(data, entry, t->entry_size);
1433
./lib/librte_table/rte_table_hash_lru.c:295: memcpy(bkt_key, key, t->key_size);
1434
./lib/librte_table/rte_table_hash_lru.c:296: memcpy(data, entry, t->entry_size);
1435
./lib/librte_table/rte_table_hash_lru.c:314: memcpy(bkt_key, key, t->key_size);
1436
./lib/librte_table/rte_table_hash_lru.c:315: memcpy(data, entry, t->entry_size);
1437
./lib/librte_table/rte_table_hash_lru.c:353: memcpy(entry, data, t->entry_size);
1438
./lib/librte_table/rte_table_hash_lru.c:1074: memcpy(stats, &t->stats, sizeof(t->stats));
1439
./lib/librte_table/rte_table_hash_key8.c:222: memcpy(bucket_data, entry, f->entry_size);
1440
./lib/librte_table/rte_table_hash_key8.c:239: memcpy(bucket_data, entry, f->entry_size);
1441
./lib/librte_table/rte_table_hash_key8.c:251: memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
1442
./lib/librte_table/rte_table_hash_key8.c:288: memcpy(entry, bucket_data, f->entry_size);
1443
./lib/librte_table/rte_table_hash_key8.c:438: memcpy(bucket_data, entry, f->entry_size);
1444
./lib/librte_table/rte_table_hash_key8.c:460: memcpy(bucket_data, entry, f->entry_size);
1445
./lib/librte_table/rte_table_hash_key8.c:480: memcpy(&bucket->data[0], entry, f->entry_size);
1446
./lib/librte_table/rte_table_hash_key8.c:523: memcpy(entry, bucket_data,
1447
./lib/librte_table/rte_table_hash_key8.c:1421: memcpy(stats, &t->stats, sizeof(t->stats));
1448
./lib/librte_table/rte_table_acl.c:157: memcpy(&acl->cfg.defs[0], &p->field_format[0],
1449
./lib/librte_table/rte_table_acl.c:295: memcpy(&acl_rule.field[0],
1450
./lib/librte_table/rte_table_acl.c:321: memcpy(*entry_ptr, entry, acl->entry_size);
1451
./lib/librte_table/rte_table_acl.c:338: memcpy(rule_location, &acl_rule, acl->acl_params.rule_size);
1452
./lib/librte_table/rte_table_acl.c:359: memcpy(*entry_ptr, entry, acl->entry_size);
1453
./lib/librte_table/rte_table_acl.c:440: memcpy(entry, &acl->memory[pos * acl->entry_size],
1454
./lib/librte_table/rte_table_acl.c:533: memcpy(&acl_rule.field[0],
1455
./lib/librte_table/rte_table_acl.c:559: memcpy(entries_ptr[i], entries[i], acl->entry_size);
1456
./lib/librte_table/rte_table_acl.c:579: memcpy(rule_location, &acl_rule, acl->acl_params.rule_size);
1457
./lib/librte_table/rte_table_acl.c:627: memcpy(entries_ptr[i], entries[i], acl->entry_size);
1458
./lib/librte_table/rte_table_acl.c:746: memcpy(entries[i], &acl->memory[rule_pos[i] * acl->entry_size],
1459
./lib/librte_table/rte_table_acl.c:818: memcpy(stats, &acl->stats, sizeof(acl->stats));
1460
./lib/librte_table/rte_table_array.c:165: memcpy(table_entry, entry, t->entry_size);
1461
./lib/librte_table/rte_table_array.c:220: memcpy(stats, &array->stats, sizeof(array->stats));
1462
./lib/librte_table/rte_table_hash_key32.c:222: memcpy(bucket_data, entry, f->entry_size);
1463
./lib/librte_table/rte_table_hash_key32.c:239: memcpy(bucket_key, key, f->key_size);
1464
./lib/librte_table/rte_table_hash_key32.c:240: memcpy(bucket_data, entry, f->entry_size);
1465
./lib/librte_table/rte_table_hash_key32.c:252: memcpy(bucket->key[pos], key, f->key_size);
1466
./lib/librte_table/rte_table_hash_key32.c:253: memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
1467
./lib/librte_table/rte_table_hash_key32.c:291: memcpy(entry, bucket_data, f->entry_size);
1468
./lib/librte_table/rte_table_hash_key32.c:437: memcpy(bucket_data, entry, f->entry_size);
1469
./lib/librte_table/rte_table_hash_key32.c:458: memcpy(bucket_key, key, f->key_size);
1470
./lib/librte_table/rte_table_hash_key32.c:459: memcpy(bucket_data, entry, f->entry_size);
1471
./lib/librte_table/rte_table_hash_key32.c:478: memcpy(bucket->key[0], key, f->key_size);
1472
./lib/librte_table/rte_table_hash_key32.c:479: memcpy(&bucket->data[0], entry, f->entry_size);
1473
./lib/librte_table/rte_table_hash_key32.c:521: memcpy(entry, bucket_data,
1474
./lib/librte_table/rte_table_hash_key32.c:1116: memcpy(stats, &t->stats, sizeof(t->stats));
1475
./lib/librte_table/rte_table_hash_ext.c:298: memcpy(data, entry, t->entry_size);
1476
./lib/librte_table/rte_table_hash_ext.c:330: memcpy(bkt_key, key, t->key_size);
1477
./lib/librte_table/rte_table_hash_ext.c:331: memcpy(data, entry, t->entry_size);
1478
./lib/librte_table/rte_table_hash_ext.c:361: memcpy(bkt_key, key, t->key_size);
1479
./lib/librte_table/rte_table_hash_ext.c:362: memcpy(data, entry, t->entry_size);
1480
./lib/librte_table/rte_table_hash_ext.c:404: memcpy(entry, data, t->entry_size);
1481
./lib/librte_table/rte_table_hash_ext.c:1131: memcpy(stats, &t->stats, sizeof(t->stats));
1482
./lib/librte_table/rte_table_hash_key16.c:230: memcpy(bucket_data, entry, f->entry_size);
1483
./lib/librte_table/rte_table_hash_key16.c:247: memcpy(bucket_key, key, f->key_size);
1484
./lib/librte_table/rte_table_hash_key16.c:248: memcpy(bucket_data, entry, f->entry_size);
1485
./lib/librte_table/rte_table_hash_key16.c:260: memcpy(bucket->key[pos], key, f->key_size);
1486
./lib/librte_table/rte_table_hash_key16.c:261: memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
1487
./lib/librte_table/rte_table_hash_key16.c:299: memcpy(entry, bucket_data, f->entry_size);
1488
./lib/librte_table/rte_table_hash_key16.c:452: memcpy(bucket_data, entry, f->entry_size);
1489
./lib/librte_table/rte_table_hash_key16.c:471: memcpy(bucket_key, key, f->key_size);
1490
./lib/librte_table/rte_table_hash_key16.c:472: memcpy(bucket_data, entry, f->entry_size);
1491
./lib/librte_table/rte_table_hash_key16.c:490: memcpy(bucket->key[0], key, f->key_size);
1492
./lib/librte_table/rte_table_hash_key16.c:491: memcpy(&bucket->data[0], entry, f->entry_size);
1493
./lib/librte_table/rte_table_hash_key16.c:533: memcpy(entry, bucket_data,
1494
./lib/librte_table/rte_table_hash_key16.c:1469: memcpy(stats, &t->stats, sizeof(t->stats));
1495
./lib/librte_table/rte_table_lpm_ipv6.c:254: memcpy(nht_entry, entry, lpm->entry_size);
1496
./lib/librte_table/rte_table_lpm_ipv6.c:328: memcpy(entry, &lpm->nht[nht_pos * lpm->entry_size],
1497
./lib/librte_table/rte_table_lpm_ipv6.c:381: memcpy(stats, &t->stats, sizeof(t->stats));
1498
./lib/librte_cfgfile/rte_cfgfile.c:103: cfg = malloc(sizeof(*cfg) + sizeof(cfg->sections[0]) *
1499
./lib/librte_cfgfile/rte_cfgfile.c:163: cfg->sections[curr_section] = malloc(
1500
./lib/librte_cfgfile/rte_cfgfile.c:207: sect->entries[curr_entry] = malloc(
1501
./lib/librte_hash/rte_fbk_hash.c:146: te = rte_zmalloc("FBK_HASH_TAILQ_ENTRY", sizeof(*te), 0);
1502
./lib/librte_hash/rte_cuckoo_hash.c:236: te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
1503
./lib/librte_hash/rte_cuckoo_hash.c:632: rte_memcpy(new_k->key, key, h->key_len);
1504
./lib/librte_kvargs/rte_kvargs.c:196: kvlist = malloc(sizeof(*kvlist));
1505
./lib/librte_mbuf/rte_mbuf.h:1050: * Please use rte_pktmbuf_alloc().
1506
./lib/librte_mbuf/rte_mbuf.h:1058:static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp)
1507
./lib/librte_mbuf/rte_mbuf.h:1121:#define rte_ctrlmbuf_alloc(mp) rte_pktmbuf_alloc(mp)
1508
./lib/librte_mbuf/rte_mbuf.h:1330:static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1509
./lib/librte_mbuf/rte_mbuf.h:1333: if ((m = __rte_mbuf_raw_alloc(mp)) != NULL)
1510
./lib/librte_mbuf/rte_mbuf.h:1505: if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
1511
./lib/librte_mbuf/rte_mbuf.h:1519: (mi = rte_pktmbuf_alloc(mp)) != NULL);
1512
./lib/librte_mbuf/rte_mbuf.c:108: memcpy(mbp_priv, user_mbp_priv, sizeof(*mbp_priv));
1513
./lib/librte_kni/rte_kni.h:127:extern struct rte_kni *rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
1514
./lib/librte_kni/rte_kni.c:152:kni_memzone_pool_alloc(void)
1515
./lib/librte_kni/rte_kni.c:225: rte_malloc(NULL,
1516
./lib/librte_kni/rte_kni.c:316:rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
1517
./lib/librte_kni/rte_kni.c:338: slot = kni_memzone_pool_alloc();
1518
./lib/librte_kni/rte_kni.c:355: memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));
1519
./lib/librte_kni/rte_kni.c:605: pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
1520
./lib/librte_kni/rte_kni.c:688: memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops));
1521
./lib/librte_cmdline/cmdline_parse_etheraddr.c:166: memcpy(res, tmp, sizeof(struct ether_addr));
1522
./lib/librte_cmdline/cmdline_parse.c:151: memcpy(&token_hdr, token_p, sizeof(token_hdr));
1523
./lib/librte_cmdline/cmdline_parse.c:193: memcpy(&token_hdr, token_p, sizeof(token_hdr));
1524
./lib/librte_cmdline/cmdline_parse.c:309: memcpy(&f, &inst->f, sizeof(f));
1525
./lib/librte_cmdline/cmdline_parse.c:310: memcpy(&data, &inst->data, sizeof(data));
1526
./lib/librte_cmdline/cmdline_parse.c:397: memcpy(&token_hdr, token_p, sizeof(token_hdr));
1527
./lib/librte_cmdline/cmdline_parse.c:490: memcpy(&token_hdr, token_p, sizeof(token_hdr));
1528
./lib/librte_cmdline/cmdline_cirbuf.c:95: memcpy(cbuf->buf + cbuf->start - n + e, c, n);
1529
./lib/librte_cmdline/cmdline_cirbuf.c:102: memcpy(cbuf->buf, c + n - (cbuf->start + e) , cbuf->start + e);
1530
./lib/librte_cmdline/cmdline_cirbuf.c:103: memcpy(cbuf->buf + cbuf->maxlen - n + (cbuf->start + e), c,
1531
./lib/librte_cmdline/cmdline_cirbuf.c:126: memcpy(cbuf->buf + cbuf->end + !e, c, n);
1532
./lib/librte_cmdline/cmdline_cirbuf.c:133: memcpy(cbuf->buf + cbuf->end + !e, c, cbuf->maxlen -
1533
./lib/librte_cmdline/cmdline_cirbuf.c:135: memcpy(cbuf->buf, c + cbuf->maxlen - cbuf->end - 1 + e,
1534
./lib/librte_cmdline/cmdline_cirbuf.c:392: memcpy(c, cbuf->buf + cbuf->start , n);
1535
./lib/librte_cmdline/cmdline_cirbuf.c:398: memcpy(c, cbuf->buf + cbuf->start , n);
1536
./lib/librte_cmdline/cmdline_cirbuf.c:405: memcpy(c, cbuf->buf + cbuf->start , cbuf->maxlen - cbuf->start);
1537
./lib/librte_cmdline/cmdline_cirbuf.c:406: memcpy(c + cbuf->maxlen - cbuf->start, cbuf->buf,
1538
./lib/librte_cmdline/cmdline_cirbuf.c:430: memcpy(c, cbuf->buf + cbuf->end - n + 1, n);
1539
./lib/librte_cmdline/cmdline_cirbuf.c:436: memcpy(c, cbuf->buf + cbuf->end - n + 1, n);
1540
./lib/librte_cmdline/cmdline_cirbuf.c:443: memcpy(c + cbuf->maxlen - cbuf->start,
1541
./lib/librte_cmdline/cmdline_cirbuf.c:445: memcpy(c, cbuf->buf + cbuf->maxlen - n + cbuf->end +1,
1542
./lib/librte_cmdline/cmdline_parse_ipaddr.c:193: memcpy(dst, tmp, INADDRSZ);
1543
./lib/librte_cmdline/cmdline_parse_ipaddr.c:304: memcpy(dst, tmp, IN6ADDRSZ);
1544
./lib/librte_cmdline/cmdline_parse_ipaddr.c:360: memcpy(res, &ipaddr, sizeof(ipaddr));
1545
./lib/librte_cmdline/cmdline_parse_ipaddr.c:367: memcpy(res, &ipaddr, sizeof(ipaddr));
1546
./lib/librte_cmdline/cmdline_socket.c:99: memcpy(&term, &oldterm, sizeof(term));
1547
./lib/librte_cmdline/cmdline_socket.c:107: memcpy(&cl->oldterm, &oldterm, sizeof(term));
1548
./lib/librte_cmdline/cmdline_parse_string.c:223: memcpy(dstbuf, s, len);
1549
./lib/librte_cmdline/cmdline_parse_num.c:172: memcpy(&nd, &((struct cmdline_token_num *)tk)->num_data, sizeof(nd));
1550
./lib/librte_cmdline/cmdline_parse_num.c:391: memcpy(&nd, &((struct cmdline_token_num *)tk)->num_data, sizeof(nd));
1551
./lib/librte_cmdline/cmdline.c:137: cl = malloc(sizeof(struct cmdline));
1552
./lib/librte_cmdline/cmdline.c:190: buf = malloc(BUFSIZ);
1553
./lib/librte_cmdline/cmdline_rdline.c:122: memcpy(rdl->prompt, prompt, rdl->prompt_size);
1554
./lib/librte_cmdline/cmdline_rdline.c:181: memcpy(rdl->left_buf+len_l, rdl->right_buf, len_r);
1555
./drivers/net/enic/base/vnic_devcmd.h:271: * in: (u32)a0=new vlan rewrite mode
1556
./drivers/net/enic/base/vnic_devcmd.h:307: * in: (u16)a0=new default vlan
1559
./examples/l3fwd-acl/main.c:1031: FILE *fh = fopen(rule_path, "rb");
1560
./examples/netmap_compat/bridge/bridge.c:201:netmap_port_open(uint32_t idx)
1561
./examples/netmap_compat/bridge/bridge.c:209: port->fd = rte_netmap_open("/dev/netmap", O_RDWR);
1562
./examples/netmap_compat/bridge/bridge.c:303: err = netmap_port_open(i);
1563
./examples/netmap_compat/lib/compat_netmap.h:75:int rte_netmap_open(const char *pathname, int flags);
1564
./examples/netmap_compat/lib/compat_netmap.c:843:rte_netmap_open(__rte_unused const char *pathname, __rte_unused int flags)
1565
./examples/vhost_xen/vhost_monitor.c:97: xs = xs_daemon_open();
1566
./examples/vhost_xen/xenstore_parse.c:765: d_fd = open(XEN_GNTDEV_FNAME, O_RDWR);
1567
./examples/vhost_xen/xenstore_parse.c:778: xs = xs_daemon_open();
1568
./examples/l2fwd-ivshmem/host/host.c:183: file = fopen(path, "w");
1569
./examples/ip_pipeline/cpu_core_map.c:264: fd = fopen(FILE_LINUX_CPU_N_LCORES, "r");
1570
./examples/ip_pipeline/cpu_core_map.c:293: fd = fopen(buffer, "r");
1571
./examples/ip_pipeline/cpu_core_map.c:319: fd = fopen(buffer, "r");
1572
./examples/ip_pipeline/config_parse.c:2444: file = fopen(file_name, "w");
1573
./examples/ip_pipeline/pipeline/pipeline_firewall.c:284: f = fopen(filename, "r");
1574
./examples/ip_pipeline/pipeline/pipeline_firewall.c:471: f = fopen(filename, "r");
1575
./examples/ip_pipeline/pipeline/pipeline_master_be.c:109: int fd = open(app->script_file, O_RDONLY);
1576
./examples/ip_pipeline/pipeline/pipeline_common_fe.c:1219: fd = open(file_name, O_RDONLY);
1577
./examples/ethtool/ethtool-app/ethapp.c:246: fp_regs = fopen(params->opt, "wb");
1578
./examples/ethtool/ethtool-app/ethapp.c:286: fp_eeprom = fopen(params->opt, "wb");
1579
./examples/ethtool/ethtool-app/ethapp.c:389: stat = rte_ethtool_net_open(params->port);
1580
./examples/ethtool/lib/rte_ethtool.h:234:int rte_ethtool_net_open(uint8_t port_id);
1581
./examples/ethtool/lib/rte_ethtool.c:260:rte_ethtool_net_open(uint8_t port_id)
1582
./examples/exception_path/main.c:195: fd = open("/dev/net/tun", O_RDWR);
1583
./drivers/net/szedata2/rte_eth_szedata2.c:930: szedata_open(internals->sze_dev);
1584
./drivers/net/szedata2/rte_eth_szedata2.c:1028: szedata_open(internals->sze_dev);
1585
./drivers/net/mpipe/mpipe_tilegx.c:712: rc = gxio_mpipe_link_open(&priv->link, priv->context,
1586
./drivers/net/mlx4/mlx4.c:415: file = fopen(path, "rb");
1587
./drivers/net/mlx4/mlx4.c:484: file = fopen(path, "rb");
1588
./drivers/net/mlx4/mlx4.c:528: file = fopen(path, "wb");
1589
./drivers/net/mlx4/mlx4.c:5031: file = fopen(path, "rb");
1590
./drivers/net/enic/base/vnic_dev.h:168:int vnic_dev_open(struct vnic_dev *vdev, int arg);
1591
./drivers/net/enic/base/vnic_dev.c:576:int vnic_dev_open(struct vnic_dev *vdev, int arg)
1592
./drivers/net/enic/enic_main.c:827:static int enic_dev_open(struct enic *enic)
1593
./drivers/net/enic/enic_main.c:1069: err = enic_dev_open(enic);
1594
./drivers/net/mlx5/mlx5_ethdev.c:112: file = fopen(path, "rb");
1595
./drivers/net/mlx5/mlx5_ethdev.c:181: file = fopen(path, "rb");
1596
./drivers/net/mlx5/mlx5_ethdev.c:225: file = fopen(path, "wb");
1597
./drivers/net/mlx5/mlx5_ethdev.c:795: file = fopen(path, "rb");
1598
./drivers/net/xenvirt/rte_eth_xenvirt.c:749: if (gntalloc_open() != 0) {
1599
./drivers/net/xenvirt/rte_xen_lib.c:114: if ((fd = open(PAGEMAP_FNAME, O_RDONLY)) < 0 ||
1600
./drivers/net/xenvirt/rte_xen_lib.c:131:gntalloc_open(void)
1601
./drivers/net/xenvirt/rte_xen_lib.c:133: gntalloc_fd = open(XEN_GNTALLOC_FNAME, O_RDWR);
1602
./drivers/net/xenvirt/rte_xen_lib.c:283: xs = xs_domain_open();
1603
./drivers/net/xenvirt/rte_xen_lib.h:81:gntalloc_open(void);
1604
./drivers/net/pcap/rte_eth_pcap.c:684: * with pcap_dump_open(). We create big enough an Ethernet
1605
./drivers/net/pcap/rte_eth_pcap.c:695: if ((*dumper = pcap_dump_open(tx_pcap, pcap_filename)) == NULL) {
1606
./drivers/net/bnx2x/bnx2x.c:9597: f = open(fwname, O_RDONLY);
1607
./drivers/net/virtio/virtio_ethdev.c:974: f = fopen(filename, "r");
1608
./drivers/net/virtio/virtio_ethdev.c:1117: pci_dev->intr_handle.fd = open(dirname, O_RDWR);
1609
./drivers/net/virtio/virtio_ethdev.c:1147: fp = fopen("/proc/ioports", "r");
1610
./app/test-acl/main.c:509: f = fopen(config.trace_file, "r");
1611
./app/test-acl/main.c:817: f = fopen(config.rule_file, "r");
1612
./app/test/test_eal_fs.c:86: fd = fopen(filename,"w");
1613
./app/test/test_eal_fs.c:106: fd = fopen(filename,"w");
1614
./app/test/test_eal_fs.c:127: fd = fopen(filename,"w");
1615
./app/test/test_eal_fs.c:141: fd = fopen(filename,"w");
1616
./app/test/test_eal_fs.c:156: fd = fopen(filename,"w");
1617
./app/test/test_eal_fs.c:170: fd = fopen(filename,"w");
1618
./app/test/test_eal_flags.c:130: hugedir_handle = fopen("/proc/mounts", "r");
1619
./app/test/test_eal_flags.c:904: hugedir_handle = fopen("/proc/mounts", "r");
1620
./app/test/test_ivshmem.c:101: fd = open(pathname, O_RDWR, 0660);
1621
./app/test/test_power_acpi_cpufreq.c:68: f = fopen(fullpath, "r");
1622
./app/test-pmd/mempool_anon.c:66: if ((fd = open(PAGEMAP_FNAME, O_RDONLY)) < 0)
1623
./app/test-pmd/parameters.c:207: config_file = fopen(config_filename, "r");
1624
./lib/librte_vhost/vhost_cuse/vhost-net-cdev.c:84:vhost_net_open(fuse_req_t req, struct fuse_file_info *fi)
1625
./lib/librte_vhost/vhost_cuse/vhost-net-cdev.c:99: fuse_reply_open(req, fi);
1626
./lib/librte_vhost/vhost_cuse/virtio-net-cdev.c:112: fmap = fopen(mapfile, "r");
1627
./lib/librte_vhost/vhost_cuse/virtio-net-cdev.c:233: fd = open(memfile, O_RDWR);
1628
./lib/librte_vhost/vhost_cuse/eventfd_copy.c:57: eventfd_link = open(eventfd_cdev, O_RDWR);
1629
./lib/librte_ivshmem/rte_ivshmem.c:188: f = fopen("/proc/self/maps", "r");
1630
./lib/librte_ivshmem/rte_ivshmem.c:641: fd = open(pathname, O_RDWR | O_CREAT, 0660);
1631
./lib/librte_eal/bsdapp/eal/eal_pci.c:165: dev->intr_handle.fd = open(devname, O_RDWR);
1632
./lib/librte_eal/bsdapp/eal/eal_pci.c:217: fd = open(devname, O_RDWR);
1633
./lib/librte_eal/bsdapp/eal/eal_pci.c:367: fd = open("/dev/pci", O_RDONLY);
1634
./lib/librte_eal/bsdapp/eal/eal_pci.c:420: fd = open("/dev/pci", O_RDONLY);
1635
./lib/librte_eal/bsdapp/eal/eal_pci.c:464: fd = open("/dev/pci", O_RDONLY);
1636
./lib/librte_eal/bsdapp/eal/eal_memory.c:144: fd_hugepage_info = open(eal_hugepage_info_path(), O_RDONLY);
1637
./lib/librte_eal/bsdapp/eal/eal_memory.c:159: fd_hugepage = open(hpi->hugedir, O_RDWR);
1638
./lib/librte_eal/bsdapp/eal/eal_hugepage_info.c:54: int fd = open(filename, O_CREAT | O_RDWR, 0666);
1639
./lib/librte_eal/bsdapp/eal/eal_hugepage_info.c:97: fd = open(CONTIGMEM_DEV, O_RDWR);
1640
./lib/librte_eal/bsdapp/eal/eal.c:129: if ((f = fopen(filename, "r")) == NULL) {
1641
./lib/librte_eal/bsdapp/eal/eal.c:171: mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0660);
1642
./lib/librte_eal/bsdapp/eal/eal.c:210: mem_cfg_fd = open(pathname, O_RDWR);
1643
./lib/librte_eal/bsdapp/eal/eal.c:234: if (((mem_cfg_fd = open(pathname, O_RDWR)) >= 0) &&
1644
./lib/librte_eal/bsdapp/eal/eal.c:482: fd = open("/dev/io", O_RDWR);
1645
./lib/librte_eal/bsdapp/contigmem/contigmem.c:200:contigmem_open(struct cdev *cdev, int fflags, int devtype,
1646
./lib/librte_eal/bsdapp/nic_uio/nic_uio.c:164:nic_uio_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
1647
./lib/librte_eal/linuxapp/kni/kni_net.c:67:kni_net_open(struct net_device *dev)
1648
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:1798:int ixgbe_open(struct net_device *netdev)
1649
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1757: dev_open(netdev);
1650
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h:3520:int _kc_simple_open(struct inode *inode, struct file *file);
1651
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:131:static int igb_open(struct net_device *);
1652
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:3219:static int __igb_open(struct net_device *netdev, bool resuming)
1653
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:3324:static int igb_open(struct net_device *netdev)
1654
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:3326: return __igb_open(netdev, false);
1655
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:8996: err = __igb_open(netdev, true);
1656
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c:36:int igb_vmdq_open(struct net_device *dev)
1657
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c:1822: dev_open(netdev);
1658
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.c:1261:int _kc_simple_open(struct inode *inode, struct file *file)
1659
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.h:32:int igb_vmdq_open(struct net_device *dev);
1660
./lib/librte_eal/linuxapp/kni/kni_misc.c:59:static int kni_open(struct inode *inode, struct file *file);
1661
./lib/librte_eal/linuxapp/kni/kni_misc.c:227:kni_open(struct inode *inode, struct file *file)
1662
./lib/librte_eal/linuxapp/eal/eal_timer.c:176: fd = open(DEV_HPET, O_RDONLY);
1663
./lib/librte_eal/linuxapp/eal/eal_timer.c:239: stream = fopen("/proc/cpuinfo", "r");
1664
./lib/librte_eal/linuxapp/eal/eal_pci.c:72: f = fopen(filename, "w");
1665
./lib/librte_eal/linuxapp/eal/eal_pci.c:210: f = fopen(filename, "r");
1666
./lib/librte_eal/linuxapp/eal/eal_pci.c:501: f = fopen(filename, "rw+");
1667
./lib/librte_eal/linuxapp/eal/eal_pci.c:541: f = fopen(filename, "rw+");
1668
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:474: fd = open(path, O_CREAT | O_RDWR, 0600);
1669
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:525: fd = open(path, O_RDONLY);
1670
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:662: fd_zero = open("/dev/zero", O_RDWR);
1671
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:691: fd = open(seg->path, O_RDWR);
1672
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:879: fd = open(path, O_RDWR);
1673
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:111: f = fopen(filename, "r");
1674
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:257: dev->intr_handle.fd = open(devname, O_RDWR);
1675
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:266: dev->intr_handle.uio_cfg_fd = open(cfgname, O_RDWR);
1676
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:333: fd = open(devname, O_RDWR);
1677
./lib/librte_eal/linuxapp/eal/eal_xen_memory.c:91: fd = open("/dev/zero", O_RDONLY);
1678
./lib/librte_eal/linuxapp/eal/eal_xen_memory.c:205: xen_fd = open(DOM0_MM_DEV, O_RDWR);
1679
./lib/librte_eal/linuxapp/eal/eal_xen_memory.c:323: xen_fd = open(DOM0_MM_DEV, O_RDWR);
1680
./lib/librte_eal/linuxapp/eal/eal_memory.c:128: int fd = open("/proc/self/pagemap", O_RDONLY);
1681
./lib/librte_eal/linuxapp/eal/eal_memory.c:172: fd = open("/proc/self/pagemap", O_RDONLY);
1682
./lib/librte_eal/linuxapp/eal/eal_memory.c:236: int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY);
1683
./lib/librte_eal/linuxapp/eal/eal_memory.c:275: fd = open("/dev/zero", O_RDONLY);
1684
./lib/librte_eal/linuxapp/eal/eal_memory.c:395: fd = open(hugepg_tbl[i].filepath, O_CREAT | O_RDWR, 0755);
1685
./lib/librte_eal/linuxapp/eal/eal_memory.c:511: fd = open(filepath, O_CREAT | O_RDWR, 0755);
1686
./lib/librte_eal/linuxapp/eal/eal_memory.c:637: f = fopen("/proc/self/numa_maps", "r");
1687
./lib/librte_eal/linuxapp/eal/eal_memory.c:762: int fd = open(filename, O_CREAT | O_RDWR, 0666);
1688
./lib/librte_eal/linuxapp/eal/eal_memory.c:896: fd = open(hp->filepath, O_RDWR);
1689
./lib/librte_eal/linuxapp/eal/eal_memory.c:1468: fd_zero = open("/dev/zero", O_RDONLY);
1690
./lib/librte_eal/linuxapp/eal/eal_memory.c:1473: fd_hugepage = open(eal_hugepage_info_path(), O_RDONLY);
1691
./lib/librte_eal/linuxapp/eal/eal_memory.c:1559: fd = open(hp[i].filepath, O_RDWR);
1692
./lib/librte_eal/linuxapp/eal/eal_hugepage_info.c:112: FILE *fd = fopen(proc_meminfo, "r");
1693
./lib/librte_eal/linuxapp/eal/eal_hugepage_info.c:148: FILE *fd = fopen(proc_mounts, "r");
1694
./lib/librte_eal/linuxapp/eal/eal_hugepage_info.c:320: hpi->lock_descriptor = open(hpi->hugedir, O_RDONLY);
1695
./lib/librte_eal/linuxapp/eal/eal.c:135: if ((f = fopen(filename, "r")) == NULL) {
1696
./lib/librte_eal/linuxapp/eal/eal.c:185: mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0660);
1697
./lib/librte_eal/linuxapp/eal/eal.c:230: mem_cfg_fd = open(pathname, O_RDWR);
1698
./lib/librte_eal/linuxapp/eal/eal.c:281: if (((mem_cfg_fd = open(pathname, O_RDWR)) >= 0) &&
1699
./lib/librte_eal/linuxapp/eal/eal.c:911: FILE *fd = fopen("/proc/modules", "r");
1700
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:356: vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR);
1701
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:437: vfio_group_fd = open(filename, O_RDWR);
1702
./lib/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c:92:static int dom0_open(struct inode *inode, struct file *file);
1703
./lib/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c:621:dom0_open(struct inode *inode, struct file *file)
1704
./lib/librte_eal/common/eal_common_pci_uio.c:71: fd = open(uio_res->maps[i].path, O_RDWR);
1705
./lib/librte_eal/common/eal_common_options.c:232: solib->lib_handle = dlopen(solib->name, RTLD_NOW);
1706
./lib/librte_eal/common/include/arch/ppc_64/rte_cpuflags.h:146: auxv_fd = open("/proc/self/auxv", O_RDONLY);
1707
./lib/librte_eal/common/include/arch/arm/rte_cpuflags_32.h:144: auxv_fd = open("/proc/self/auxv", O_RDONLY);
1708
./lib/librte_eal/common/include/arch/arm/rte_cpuflags_64.h:107: auxv_fd = open("/proc/self/auxv", O_RDONLY);
1709
./lib/librte_power/guest_channel.c:77: fd = open(fd_path, O_RDWR);
1710
./lib/librte_power/rte_power_acpi_cpufreq.c:164: f = fopen(fullpath, "rw+");
1711
./lib/librte_power/rte_power_acpi_cpufreq.c:214: f = fopen(fullpath, "r");
1712
./lib/librte_power/rte_power_acpi_cpufreq.c:270: f = fopen(fullpath, "rw+");
1713
./lib/librte_power/rte_power_acpi_cpufreq.c:367: f = fopen(fullpath, "rw+");
1714
./lib/librte_cfgfile/rte_cfgfile.c:99: FILE *f = fopen(filename, "r");
1715
./lib/librte_kni/rte_kni.c:218: kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
1716
./lib/librte_cmdline/cmdline_socket.c:84: fd = open(path, O_RDONLY, 0);
1717
./lib/librte_cmdline/cmdline_socket.c:86: dprintf("open() failed\n");
1720
./examples/quota_watermark/qw/init.c:138: rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
1721
./examples/quota_watermark/qw/init.c:170: rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
1722
./examples/quota_watermark/qw/main.c:341: rte_panic("%s\n", rte_strerror(rte_errno));
1723
./examples/load_balancer/init.c:267: rte_panic("Algorithmic error (I/O RX rings)\n");
1724
./examples/load_balancer/init.c:279: rte_panic("Algorithmic error (worker input rings)\n");
1725
./examples/load_balancer/init.c:309: rte_panic("Algorithmic error (no I/O core to handle TX of port %u)\n",
1726
./examples/load_balancer/init.c:350: rte_panic("Algorithmic error (I/O TX rings)\n");
1727
./examples/load_balancer/config.c:891: rte_panic("Algorithmic error (too many worker lcores)\n");
1728
./examples/packet_ordering/main.c:628: rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
1729
./examples/packet_ordering/main.c:657: rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
1730
./examples/packet_ordering/main.c:662: rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
1731
./examples/packet_ordering/main.c:668: rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
1732
./examples/l2fwd-jobstats/main.c:999: lcore_id, rte_strerror(-ret));
1733
./examples/l2fwd-jobstats/main.c:1021: lcore_id, qconf->rx_port_list[i], rte_strerror(-ret));
1734
./examples/qos_meter/main.c:370: rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_rx, ret);
1735
./examples/qos_meter/main.c:376: rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_rx, ret);
1736
./examples/qos_meter/main.c:382: rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_rx, ret);
1737
./examples/qos_meter/main.c:386: rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_tx, ret);
1738
./examples/qos_meter/main.c:392: rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_tx, ret);
1739
./examples/qos_meter/main.c:398: rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_tx, ret);
1740
./examples/qos_meter/main.c:402: rte_exit(EXIT_FAILURE, "Port %d start error (%d)\n", port_rx, ret);
1741
./examples/qos_meter/main.c:406: rte_exit(EXIT_FAILURE, "Port %d start error (%d)\n", port_tx, ret);
1742
./examples/multi_process/l2fwd_fork/main.c:248: printf("sched_setaffinity failed:%s\n", strerror(errno));
1743
./examples/multi_process/l2fwd_fork/main.c:265: printf("sched_getaffinity failed:%s\n", strerror(errno));
1744
./examples/vm_power_manager/guest_cli/vm_power_cli_guest.h:45:int guest_channel_send_msg(struct channel_packet *pkt, unsigned lcore_id);
1745
./examples/vm_power_manager/guest_cli/vm_power_cli_guest.c:113: cmdline_printf(cl, "Error sending message: %s\n", strerror(ret));
1746
./examples/vm_power_manager/channel_manager.c:259: strerror(errno),
1747
./examples/vm_power_manager/channel_manager.c:271: "'%s'\n", strerror(errno), info->channel_path);
1748
./examples/vm_power_manager/channel_manager.c:278: "socket for '%s'\n", strerror(errno), info->channel_path);
1749
./examples/vm_power_manager/channel_manager.c:305: " for '%s'\n", strerror(errno), info->channel_path);
1750
./examples/vm_power_manager/channel_manager.c:371: CHANNEL_MGR_SOCKET_PATH, strerror(errno));
1751
./examples/vm_power_manager/channel_manager.c:475: "%s\n", socket_path, strerror(errno));
1752
./examples/vm_power_manager/channel_monitor.c:174: "error %s\n", strerror(errno));
1753
./examples/vm_power_manager/channel_monitor.c:222: chan_info->channel_path, strerror(err));
1754
./examples/vhost_xen/xenstore_parse.c:96: domid, refid, strerror(errno));
1755
./examples/vhost_xen/xenstore_parse.c:109: domid, refid, strerror(errno));
1756
./examples/performance-thread/common/lthread.c:230: perror("Failed to create scheduler");
1757
./examples/performance-thread/pthread_shim/pthread_shim.c:180: error_str = dlerror(); \
1758
./examples/ip_pipeline/init.c:869: "init error (%" PRId32 ")\n",
1759
./examples/ip_pipeline/init.c:898: "%s init error (%" PRId32 ")\n",
1760
./examples/ip_pipeline/init.c:924: "%s init error (%" PRId32 ")\n",
1761
./examples/ip_pipeline/init.c:1012: " init error (%" PRId32 ")\n",
1762
./examples/ip_pipeline/init.c:1036: "init error (% " PRId32 ")\n",
1763
./examples/ip_pipeline/pipeline/pipeline_master_be.c:129: rte_panic("CLI poll error (%" PRId32 ")\n", status);
1764
./examples/ethtool/ethtool-app/ethapp.c:488: strerror(-stat));
1765
./drivers/net/ixgbe/base/ixgbe_mbx.c:96:s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
1766
./drivers/net/ixgbe/base/ixgbe_mbx.c:104: ret_val = mbx->ops.check_for_msg(hw, mbx_id);
1767
./drivers/net/ixgbe/base/ixgbe_mbx.c:156:STATIC s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
1768
./drivers/net/ixgbe/base/ixgbe_mbx.c:166: while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
1769
./drivers/net/ixgbe/base/ixgbe_mbx.c:233: ret_val = ixgbe_poll_for_msg(hw, mbx_id);
1770
./drivers/net/ixgbe/base/ixgbe_mbx.h:143:s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
1771
./drivers/net/ixgbe/ixgbe_pf.c:384:ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
1772
./drivers/net/ixgbe/ixgbe_pf.c:422: ixgbe_vf_reset_msg(dev, vf);
1773
./drivers/net/ixgbe/ixgbe_pf.c:688: if (!ixgbe_check_for_msg(hw, vf))
1774
./drivers/net/mpipe/mpipe_tilegx.c:1437: if (unlikely(gxio_mpipe_idesc_has_error(idesc))) {
1775
./drivers/net/mlx4/mlx4.c:489: if (((size_t)ret < size) && (ferror(file)))
1776
./drivers/net/mlx4/mlx4.c:533: if (((size_t)ret < size) || (ferror(file)))
1777
./drivers/net/mlx4/mlx4.c:565: name, strerror(errno));
1778
./drivers/net/mlx4/mlx4.c:573: strerror(errno));
1779
./drivers/net/mlx4/mlx4.c:602: name, value_str, value, strerror(errno));
1780
./drivers/net/mlx4/mlx4.c:1769: (void *)dev, strerror(ret));
1781
./drivers/net/mlx4/mlx4.c:1780: (void *)dev, strerror(ret));
1782
./drivers/net/mlx4/mlx4.c:1819: (void *)dev, strerror(ret));
1783
./drivers/net/mlx4/mlx4.c:1836: (void *)dev, strerror(ret));
1784
./drivers/net/mlx4/mlx4.c:1842: (void *)dev, strerror(ret));
1785
./drivers/net/mlx4/mlx4.c:1851: (void *)dev, strerror(ret));
1786
./drivers/net/mlx4/mlx4.c:1858: (void *)dev, strerror(ret));
1787
./drivers/net/mlx4/mlx4.c:2452: (errno ? strerror(errno) : "Unknown error"));
1788
./drivers/net/mlx4/mlx4.c:2682: (errno ? strerror(errno) : "Unknown error"));
1789
./drivers/net/mlx4/mlx4.c:2740: (errno ? strerror(errno) : "Unknown error"));
1790
./drivers/net/mlx4/mlx4.c:3116: strerror(ret));
1791
./drivers/net/mlx4/mlx4.c:3520: ERROR("%p: cannot reset QP: %s", (void *)dev, strerror(err));
1792
./drivers/net/mlx4/mlx4.c:3526: ERROR("%p: cannot resize CQ: %s", (void *)dev, strerror(err));
1793
./drivers/net/mlx4/mlx4.c:3544: (void *)dev, strerror(err));
1794
./drivers/net/mlx4/mlx4.c:3625: strerror(err));
1795
./drivers/net/mlx4/mlx4.c:3634: (void *)dev, strerror(err));
1796
./drivers/net/mlx4/mlx4.c:3734: (void *)dev, strerror(ret));
1797
./drivers/net/mlx4/mlx4.c:3748: (void *)dev, strerror(ret));
1798
./drivers/net/mlx4/mlx4.c:3759: (void *)dev, strerror(ret));
1799
./drivers/net/mlx4/mlx4.c:3776: (void *)dev, strerror(ret));
1800
./drivers/net/mlx4/mlx4.c:3793: (void *)dev, strerror(ret));
1801
./drivers/net/mlx4/mlx4.c:3801: (void *)dev, strerror(ret));
1802
./drivers/net/mlx4/mlx4.c:3814: (void *)dev, strerror(ret));
1803
./drivers/net/mlx4/mlx4.c:3826: strerror(ret));
1804
./drivers/net/mlx4/mlx4.c:3836: (void *)dev, strerror(ret));
1805
./drivers/net/mlx4/mlx4.c:4037: (void *)dev, strerror(ret));
1806
./drivers/net/mlx4/mlx4.c:4617: WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
1807
./drivers/net/mlx4/mlx4.c:4626: strerror(errno));
1808
./drivers/net/mlx4/mlx4.c:4700: strerror(ret));
1809
./drivers/net/mlx4/mlx4.c:4786: strerror(ret));
1810
./drivers/net/mlx4/mlx4.c:4849: strerror(ret));
1811
./drivers/net/mlx4/mlx4.c:5412: ERROR("port query failed: %s", strerror(err));
1812
./drivers/net/mlx4/mlx4.c:5522: " (errno: %s)", strerror(errno));
1813
./drivers/net/i40e/i40e_pf.h:121:void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
1814
./drivers/net/i40e/i40e_pf.c:902:i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
1815
./drivers/net/i40e/base/i40e_nvm.c:85: i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
1816
./drivers/net/i40e/base/i40e_nvm.c:120: i40e_debug(hw, I40E_DEBUG_NVM,
1817
./drivers/net/i40e/base/i40e_nvm.c:142: i40e_debug(hw, I40E_DEBUG_NVM,
1818
./drivers/net/i40e/base/i40e_nvm.c:205: i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
1819
./drivers/net/i40e/base/i40e_nvm.c:244: i40e_debug(hw, I40E_DEBUG_NVM,
1820
./drivers/net/i40e/base/i40e_nvm.c:269: i40e_debug(hw, I40E_DEBUG_NVM,
1821
./drivers/net/i40e/base/i40e_nvm.c:440: i40e_debug(hw, I40E_DEBUG_NVM,
1822
./drivers/net/i40e/base/i40e_nvm.c:445: i40e_debug(hw, I40E_DEBUG_NVM,
1823
./drivers/net/i40e/base/i40e_nvm.c:451: i40e_debug(hw, I40E_DEBUG_NVM,
1824
./drivers/net/i40e/base/i40e_nvm.c:792: i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
1825
./drivers/net/i40e/base/i40e_nvm.c:799: i40e_debug(hw, I40E_DEBUG_NVM,
1826
./drivers/net/i40e/base/i40e_nvm.c:833: i40e_debug(hw, I40E_DEBUG_NVM,
1827
./drivers/net/i40e/base/i40e_nvm.c:964: i40e_debug(hw, I40E_DEBUG_NVM,
1828
./drivers/net/i40e/base/i40e_nvm.c:1008: i40e_debug(hw, I40E_DEBUG_NVM,
1829
./drivers/net/i40e/base/i40e_nvm.c:1090: i40e_debug(hw, I40E_DEBUG_NVM,
1830
./drivers/net/i40e/base/i40e_nvm.c:1112: i40e_debug(hw, I40E_DEBUG_ALL,
1831
./drivers/net/i40e/base/i40e_nvm.c:1118: i40e_debug(hw, I40E_DEBUG_ALL,
1832
./drivers/net/i40e/base/i40e_nvm.c:1159: i40e_debug(hw, I40E_DEBUG_NVM,
1833
./drivers/net/i40e/base/i40e_nvm.c:1248: i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1834
./drivers/net/i40e/base/i40e_nvm.c:1257: i40e_debug(hw, I40E_DEBUG_NVM,
1835
./drivers/net/i40e/base/i40e_nvm.c:1273: i40e_debug(hw, I40E_DEBUG_NVM,
1836
./drivers/net/i40e/base/i40e_nvm.c:1288: i40e_debug(hw, I40E_DEBUG_NVM,
1837
./drivers/net/i40e/base/i40e_nvm.c:1316: i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1838
./drivers/net/i40e/base/i40e_nvm.c:1323: i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1839
./drivers/net/i40e/base/i40e_nvm.c:1333: i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1840
./drivers/net/i40e/base/i40e_nvm.c:1343: i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1841
./drivers/net/i40e/base/i40e_nvm.c:1359: i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1842
./drivers/net/i40e/base/i40e_nvm.c:1395: i40e_debug(hw, I40E_DEBUG_NVM,
1843
./drivers/net/i40e/base/i40e_nvm.c:1398: i40e_debug(hw, I40E_DEBUG_NVM,
1844
./drivers/net/i40e/base/i40e_nvm.c:1434: i40e_debug(hw, I40E_DEBUG_NVM,
1845
./drivers/net/i40e/base/i40e_nvm.c:1437: i40e_debug(hw, I40E_DEBUG_NVM,
1846
./drivers/net/i40e/base/i40e_nvm.c:1475: i40e_debug(hw, I40E_DEBUG_NVM,
1847
./drivers/net/i40e/base/i40e_nvm.c:1478: i40e_debug(hw, I40E_DEBUG_NVM,
1848
./drivers/net/i40e/base/i40e_prototype.h:116:enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1849
./drivers/net/i40e/base/i40e_adminq.c:752: i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1850
./drivers/net/i40e/base/i40e_adminq.c:822: i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1851
./drivers/net/i40e/base/i40e_adminq.c:830: i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1852
./drivers/net/i40e/base/i40e_adminq.c:864: i40e_debug(hw,
1853
./drivers/net/i40e/base/i40e_adminq.c:873: i40e_debug(hw,
1854
./drivers/net/i40e/base/i40e_adminq.c:888: i40e_debug(hw,
1855
./drivers/net/i40e/base/i40e_adminq.c:920: i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
1856
./drivers/net/i40e/base/i40e_adminq.c:956: i40e_debug(hw,
1857
./drivers/net/i40e/base/i40e_adminq.c:972: i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1858
./drivers/net/i40e/base/i40e_adminq.c:984: i40e_debug(hw,
1859
./drivers/net/i40e/base/i40e_adminq.c:1070: i40e_debug(hw,
1860
./drivers/net/i40e/base/i40e_adminq.c:1085: i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1861
./drivers/net/i40e/base/i40e_common.c:335: i40e_debug(hw, mask,
1862
./drivers/net/i40e/base/i40e_common.c:341: i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
1863
./drivers/net/i40e/base/i40e_common.c:344: i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
1864
./drivers/net/i40e/base/i40e_common.c:347: i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
1865
./drivers/net/i40e/base/i40e_common.c:352: i40e_debug(hw, mask, "AQ CMD Buffer:\n");
1866
./drivers/net/i40e/base/i40e_common.c:357: i40e_debug(hw, mask,
1867
./drivers/net/i40e/base/i40e_common.c:371: i40e_debug(hw, mask,
1868
./drivers/net/i40e/base/i40e_common.c:2104:enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1869
./drivers/net/i40e/base/i40e_common.c:2575: i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
1870
./drivers/net/i40e/base/i40e_common.c:3544: i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
1871
./drivers/net/i40e/base/i40e_osdep.h:112:#define i40e_debug(h, m, s, ...) \
1872
./drivers/net/i40e/i40e_ethdev_vf.c:248:i40evf_parse_pfmsg(struct i40e_vf *vf,
1873
./drivers/net/i40e/i40e_ethdev_vf.c:304:i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
1874
./drivers/net/i40e/i40e_ethdev_vf.c:325: result = i40evf_parse_pfmsg(vf, &event, data);
1875
./drivers/net/i40e/i40e_ethdev_vf.c:345: ret = i40evf_read_pfmsg(dev, data);
1876
./drivers/net/i40e/i40e_ethdev.c:4831:i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
1877
./drivers/net/i40e/i40e_ethdev.c:4859: i40e_pf_host_handle_vf_msg(dev,
1878
./drivers/net/i40e/i40e_ethdev.c:4915: i40e_dev_handle_aq_msg(dev);
1879
./drivers/net/i40e/i40e_ethdev.c:4980: i40e_dev_handle_aq_msg(dev);
1880
./drivers/net/enic/enic_compat.h:67:#define pr_warn(y, args...) dev_warning(0, y, ##args)
1881
./drivers/net/enic/enic_compat.h:84:#define dev_debug(x, args...) dev_printk(DEBUG, args)
1882
./drivers/net/enic/base/vnic_dev.c:771: pr_warn("notify block %p still allocated.\n" \
1883
./drivers/net/enic/base/vnic_devcmd.h:522: STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
1884
./drivers/net/enic/enic_main.c:126:static void enic_log_q_error(struct enic *enic)
1885
./drivers/net/enic/enic_main.c:573: enic_log_q_error(enic);
1886
./drivers/net/enic/enic_main.c:1052: dev_debug(enic, " Initializing ENIC PMD version %s\n", DRV_VERSION);
1887
./drivers/net/mlx5/mlx5_rxmode.c:92: (errno ? strerror(errno) : "Unknown error"));
1888
./drivers/net/mlx5/mlx5_rxmode.c:151: ERROR("cannot enable promiscuous mode: %s", strerror(ret));
1889
./drivers/net/mlx5/mlx5_rxmode.c:252: (errno ? strerror(errno) : "Unknown error"));
1890
./drivers/net/mlx5/mlx5_rxmode.c:315: ERROR("cannot enable allmulticast mode: %s", strerror(ret));
1891
./drivers/net/mlx5/mlx5_ethdev.c:186: if (((size_t)ret < size) && (ferror(file)))
1892
./drivers/net/mlx5/mlx5_ethdev.c:230: if (((size_t)ret < size) || (ferror(file)))
1893
./drivers/net/mlx5/mlx5_ethdev.c:262: name, strerror(errno));
1894
./drivers/net/mlx5/mlx5_ethdev.c:270: strerror(errno));
1895
./drivers/net/mlx5/mlx5_ethdev.c:299: name, value_str, value, strerror(errno));
1896
./drivers/net/mlx5/mlx5_ethdev.c:550: WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
1897
./drivers/net/mlx5/mlx5_ethdev.c:559: strerror(errno));
1898
./drivers/net/mlx5/mlx5_ethdev.c:629: strerror(ret));
1899
./drivers/net/mlx5/mlx5_ethdev.c:704: strerror(ret));
1900
./drivers/net/mlx5/mlx5_ethdev.c:765: strerror(ret));
1901
./drivers/net/mlx5/mlx5.c:329: ERROR("port query failed: %s", strerror(err));
1902
./drivers/net/mlx5/mlx5.c:410: " (errno: %s)", strerror(errno));
1903
./drivers/net/mlx5/mlx5_rxq.c:395: strerror(err));
1904
./drivers/net/mlx5/mlx5_rxq.c:421: err, strerror(err));
1905
./drivers/net/mlx5/mlx5_rxq.c:430: strerror(err));
1906
./drivers/net/mlx5/mlx5_rxq.c:472: strerror(err));
1907
./drivers/net/mlx5/mlx5_rxq.c:964: ERROR("%p: cannot reset WQ: %s", (void *)dev, strerror(err));
1908
./drivers/net/mlx5/mlx5_rxq.c:1026: (void *)dev, strerror(err));
1909
./drivers/net/mlx5/mlx5_rxq.c:1150: (void *)dev, strerror(ret));
1910
./drivers/net/mlx5/mlx5_rxq.c:1163: (void *)dev, strerror(ret));
1911
./drivers/net/mlx5/mlx5_rxq.c:1175: (void *)dev, strerror(ret));
1912
./drivers/net/mlx5/mlx5_rxq.c:1203: (void *)dev, strerror(ret));
1913
./drivers/net/mlx5/mlx5_rxq.c:1212: (void *)dev, strerror(ret));
1914
./drivers/net/mlx5/mlx5_rxq.c:1248: (void *)dev, strerror(ret));
1915
./drivers/net/mlx5/mlx5_txq.c:290: (void *)dev, strerror(ret));
1916
./drivers/net/mlx5/mlx5_txq.c:301: (void *)dev, strerror(ret));
1917
./drivers/net/mlx5/mlx5_txq.c:340: (void *)dev, strerror(ret));
1918
./drivers/net/mlx5/mlx5_txq.c:357: (void *)dev, strerror(ret));
1919
./drivers/net/mlx5/mlx5_txq.c:363: (void *)dev, strerror(ret));
1920
./drivers/net/mlx5/mlx5_txq.c:372: (void *)dev, strerror(ret));
1921
./drivers/net/mlx5/mlx5_txq.c:379: (void *)dev, strerror(ret));
1922
./drivers/net/mlx5/mlx5_trigger.c:85: (void *)priv, strerror(err));
1923
./drivers/net/mlx5/mlx5_mac.c:294: (errno ? strerror(errno) : "Unknown error"));
1924
./drivers/net/fm10k/base/fm10k_tlv.c:647:s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results,
1925
./drivers/net/fm10k/base/fm10k_tlv.h:168:s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results,
1926
./drivers/net/fm10k/base/fm10k_mbx.c:962:STATIC void fm10k_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err)
1927
./drivers/net/fm10k/base/fm10k_mbx.c:1333:STATIC s32 fm10k_mbx_process_error(struct fm10k_hw *hw,
1928
./drivers/net/fm10k/base/fm10k_mbx.c:1412: err = fm10k_mbx_process_error(hw, mbx);
1929
./drivers/net/fm10k/base/fm10k_mbx.c:1422: fm10k_mbx_create_error_msg(mbx, err);
1930
./drivers/net/fm10k/base/fm10k_mbx.c:1877:STATIC void fm10k_sm_mbx_process_error(struct fm10k_mbx_info *mbx)
1931
./drivers/net/fm10k/base/fm10k_mbx.c:1913:STATIC void fm10k_sm_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err)
1932
./drivers/net/fm10k/base/fm10k_mbx.c:1928: fm10k_sm_mbx_process_error(mbx);
1933
./drivers/net/fm10k/base/fm10k_mbx.c:2163: fm10k_sm_mbx_process_error(mbx);
1934
./drivers/net/fm10k/base/fm10k_mbx.c:2178: fm10k_sm_mbx_create_error_msg(mbx, err);
1935
./drivers/net/xenvirt/rte_mempool_gntalloc.c:190: arg.index, strerror(errno));
1936
./drivers/net/bonding/rte_eth_bond_8023ad.c:909: slave_id, mem_name, rte_strerror(rte_errno));
1937
./drivers/net/bonding/rte_eth_bond_8023ad.c:918: mem_name, rte_strerror(rte_errno));
1938
./drivers/net/bonding/rte_eth_bond_8023ad.c:928: mem_name, rte_strerror(rte_errno));
1939
./drivers/net/bonding/rte_eth_bond_pmd.c:315:mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
1940
./drivers/net/bonding/rte_eth_bond_pmd.c:374: mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
1941
./drivers/net/bonding/rte_eth_bond_pmd.c:380: mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
1942
./drivers/net/bonding/rte_eth_bond_pmd.c:727: mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
1943
./drivers/net/bonding/rte_eth_bond_pmd.c:855: mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
1944
./drivers/net/bonding/rte_eth_bond_pmd.c:872: mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
1945
./drivers/net/bnx2x/elink.c:12945:static uint8_t elink_analyze_link_error(struct elink_params *params,
1946
./drivers/net/bnx2x/elink.c:13051: elink_analyze_link_error(params, vars, lss_status,
1947
./drivers/net/bnx2x/elink.c:13070: elink_analyze_link_error(params, vars, lss_status,
1948
./drivers/net/bnx2x/elink.c:13099: led_change = elink_analyze_link_error(params, vars, value,
1949
./drivers/net/bnx2x/ecore_sp.h:821: * Negative value in case of an error (including an
1950
./drivers/net/e1000/igb_pf.c:283:igb_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
1951
./drivers/net/e1000/igb_pf.c:311: igb_vf_reset_msg(dev, vf);
1952
./drivers/net/e1000/igb_pf.c:526: if (!e1000_check_for_msg(hw, vf))
1953
./drivers/net/e1000/base/e1000_mbx.h:98:s32 e1000_check_for_msg(struct e1000_hw *, u16);
1954
./drivers/net/e1000/base/e1000_mbx.c:122:s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
1955
./drivers/net/e1000/base/e1000_mbx.c:130: ret_val = mbx->ops.check_for_msg(hw, mbx_id);
1956
./drivers/net/e1000/base/e1000_mbx.c:182:STATIC s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
1957
./drivers/net/e1000/base/e1000_mbx.c:192: while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
1958
./drivers/net/e1000/base/e1000_mbx.c:257: ret_val = e1000_poll_for_msg(hw, mbx_id);
1959
./drivers/net/cxgbe/cxgbe_main.c:462: dev_warn(adapter, "Firmware reset failed, error %d\n",
1960
./drivers/net/cxgbe/cxgbe_main.c:471: dev_warn(adapter, "Finding address for firmware config file in flash failed, error %d\n",
1961
./drivers/net/cxgbe/cxgbe_main.c:525: dev_warn(adapter, "Configuration File checksum mismatch: [fini] csum=%#x, computed csum=%#x\n",
1962
./drivers/net/cxgbe/cxgbe_main.c:549: dev_warn(adapter, "Unable to finalize Firmware Capabilities %d\n",
1963
./drivers/net/cxgbe/cxgbe_main.c:559: dev_warn(adapter, "Unable to do init0-tweaks %d\n", -ret);
1964
./drivers/net/cxgbe/cxgbe_main.c:569: dev_warn(adapter, "Initializing Firmware failed, error %d\n",
1965
./drivers/net/cxgbe/cxgbe_main.c:592: dev_warn(adapter, "\"%s\" configuration file error %d\n",
1966
./drivers/net/cxgbe/cxgbe_main.c:595: dev_debug(adapter, "%s: returning ret = %d ..\n", __func__, ret);
1967
./drivers/net/cxgbe/cxgbe_main.c:680: dev_debug(adap, "%s: adap->params.nports = %u\n", __func__,
1968
./drivers/net/cxgbe/cxgbe_main.c:798: dev_debug(adap, "%s: returning zero..\n", __func__);
1969
./drivers/net/cxgbe/cxgbe_main.c:956: dev_debug(adapter, "%s: pi->rss_size = %u; pi->n_rx_qsets = %u\n",
1970
./drivers/net/cxgbe/cxgbe_main.c:1113: dev_warn(adapter, "Incorrect SGE EGRESS QUEUES_PER_PAGE configuration, continuing in debug mode\n");
1971
./drivers/net/cxgbe/base/t4_hw.c:254:static void t4_report_fw_error(struct adapter *adap)
1972
./drivers/net/cxgbe/base/t4_hw.c:292: pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
1973
./drivers/net/cxgbe/base/t4_hw.c:402: t4_report_fw_error(adap);
1974
./drivers/net/cxgbe/base/t4_hw.c:445: t4_report_fw_error(adap);
1975
./drivers/net/cxgbe/base/t4_hw.c:560: t4_report_fw_error(adap);
1976
./drivers/net/cxgbe/base/t4_hw.c:787: dev_debug(adapter, "%s: p->cclk = %u\n", __func__, p->cclk);
1977
./drivers/net/cxgbe/base/t4_hw.c:1248: * an error (negative integer) or the mailbox of the Master PF.
1978
./drivers/net/cxgbe/base/t4_hw.c:1283: t4_report_fw_error(adap);
1979
./drivers/net/cxgbe/base/t4_hw.c:2196: dev_warn(adap, "Port %d link down, reason: %s\n",
1980
./drivers/net/cxgbe/base/t4_hw.c:2205: dev_warn(adap, "Unknown firmware reply %d\n", opcode);
1981
./drivers/net/cxgbe/base/t4_hw.c:2322: dev_warn(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n",
1982
./drivers/net/cxgbe/cxgbe_compat.h:54:#define dev_warn(x, args...) dev_printf(WARNING, args)
1983
./drivers/net/cxgbe/cxgbe_compat.h:57:#define dev_debug(x, args...) dev_printf(DEBUG, args)
1984
./drivers/net/cxgbe/cxgbe_compat.h:59:#define dev_debug(x, args...) do { } while (0)
1985
./drivers/net/cxgbe/cxgbe_compat.h:94:#define pr_warn(y, args...) dev_warn(0, y, ##args)
1986
./drivers/net/cxgbe/cxgbe_compat.h:108: pr_warn("WARN_ON: \"" #x "\" at %s:%d\n", __func__, __LINE__); \
1987
./drivers/net/cxgbe/sge.c:399: dev_debug(adap, "%s: failed to allocated fl entries in bulk ..\n",
1988
./drivers/net/cxgbe/sge.c:411: dev_debug(adap, "%s: mbuf alloc failed\n", __func__);
1989
./drivers/net/cxgbe/sge.c:1114: dev_warn(adap, "%s: mapping err for coalesce\n",
1990
./drivers/net/cxgbe/sge.c:1135: dev_debug(adap, "%s: Tx ring %u full; credits = %d\n",
1991
./drivers/net/cxgbe/sge.c:1265: dev_debug(adapter, "%s: nelem = %zu; elem_size = %zu; sw_size = %zu; "
1992
./drivers/net/cxgbe/sge.c:1272: dev_debug(adapter, "%s: tz exists...returning existing..\n",
1993
./drivers/net/cxgbe/sge.c:1818: dev_warn(adap->pdev_dev, "Failed to set Congestion Manager Context for Ingress Queue %d: %d\n",
1994
./drivers/net/cxgbe/cxgbe_ethdev.c:412: dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
1995
./drivers/net/cxgbe/cxgbe_ethdev.c:428: dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
1996
./drivers/net/cxgbe/cxgbe_ethdev.c:451: dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
1997
./drivers/net/cxgbe/cxgbe_ethdev.c:469: dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
1998
./drivers/net/cxgbe/cxgbe_ethdev.c:485: dev_debug(adapter, "%s: txq->q.cntxt_id= %d err = %d\n",
1999
./drivers/net/cxgbe/cxgbe_ethdev.c:500: dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
2000
./drivers/net/cxgbe/cxgbe_ethdev.c:515: dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
2001
./drivers/net/cxgbe/cxgbe_ethdev.c:535: dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
2002
./drivers/net/cxgbe/cxgbe_ethdev.c:564: dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
2003
./drivers/net/cxgbe/cxgbe_ethdev.c:593: dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
2004
./drivers/net/cxgbe/cxgbe_ethdev.c:619: dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n",
2005
./drivers/net/cxgbe/cxgbe_ethdev.c:634: dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
2006
./drivers/net/virtio/virtio_ethdev.c:1120: dirname, strerror(errno));
2007
./app/test-acl/main.c:759: n, rc, strerror(-rc));
2008
./app/test-acl/main.c:772: n, rc, strerror(-rc));
2009
./app/test/test_eal_fs.c:65: perror("mkstemp() failure");
2010
./app/test/test_eal_fs.c:70: perror("readlink() failure");
2011
./app/test/test_eal_fs.c:88: printf("line %d, Error opening %s: %s\n", __LINE__, filename, strerror(errno));
2012
./app/test/test_eal_fs.c:108: printf("line %d, Error opening %s: %s\n", __LINE__, filename, strerror(errno));
2013
./app/test/test_eal_fs.c:129: printf("line %d, Error opening %s: %s\n", __LINE__, filename, strerror(errno));
2014
./app/test/test_eal_fs.c:143: printf("line %d, Error opening %s: %s\n", __LINE__, filename, strerror(errno));
2015
./app/test/test_eal_fs.c:158: printf("line %d, Error opening %s: %s\n", __LINE__, filename, strerror(errno));
2016
./app/test/test_eal_fs.c:172: printf("line %d, Error opening %s: %s\n", __LINE__, filename, strerror(errno));
2017
./app/test/test_debug.c:136:test_debug(void)
2018
./app/test/test_link_bonding_mode4.c:407: rte_strerror(rte_errno));
2019
./app/test/test_link_bonding_mode4.c:416: rte_strerror(rte_errno));
2020
./app/test/test_eal_flags.c:147: printf("Error reading %s: %s\n", hugedir, strerror(errno));
2021
./app/test/test_eal_flags.c:173: dirent->d_name, strerror(errno));
2022
./app/test/test_eal_flags.c:189: dirent->d_name, strerror(errno));
2023
./app/test/test_eal_flags.c:244: printf("Error opening %s: %s\n", nodedir, strerror(errno));
2024
./app/test/test_eal_flags.c:839: /* With empty --syslog (should fail) */
2025
./app/test/test_eal_flags.c:947: /* With empty --syslog (should fail) */
2026
./app/test/test_memzone.c:424: rte_strerror(rte_errno));
2027
./app/test/test_memzone.c:461: rte_strerror(rte_errno));
2028
./app/test/test_red.c:294: perror("get_machclk_freq()");
2029
./app/test/test_errno.c:69: rte_retval = rte_strerror(std_errs[i]);
2030
./app/test/test_errno.c:70: libc_retval = strerror(std_errs[i]);
2031
./app/test/test_errno.c:80: rte_retval = rte_strerror(rte_errs[i]);
2032
./app/test/test_errno.c:81: libc_retval = strerror(rte_errs[i]);
2033
./app/test/test_errno.c:99: rte_retval = rte_strerror(RTE_MAX_ERRNO + 1);
2034
./app/test/test_errno.c:100: libc_retval = strerror(RTE_MAX_ERRNO + 1);
2035
./app/test/test_logs.c:76: RTE_LOG(DEBUG, TESTAPP2, "debug level message (not displayed)\n");
2036
./app/test/test_logs.c:86: RTE_LOG(DEBUG, TESTAPP2, "debug level message (not displayed)\n");
2037
./app/test/test_table_acl.c:507: n, ret, strerror(-ret));
2038
./app/test/test_table_acl.c:547: n, ret, strerror(-ret));
2039
./app/test/test_table_acl.c:583: n, ret, strerror(-ret));
2040
./app/test/test_table_acl.c:608: n, ret, strerror(-ret));
2041
./app/test/test_table_acl.c:635: n, ret, strerror(-ret));
2042
./app/test-pmd/parameters.c:209: perror("Failed to open eth config file\n");
2043
./app/test-pmd/config.c:330: port_id, queue_id, strerror(-rc), rc);
2044
./app/test-pmd/config.c:363: port_id, queue_id, strerror(-rc), rc);
2045
./app/test-pmd/cmdline.c:4347: printf("set_bond_mac_addr error: (%s)\n", strerror(-ret));
2046
./app/test-pmd/cmdline.c:4403: printf("set_bond_mac_addr error: (%s)\n", strerror(-ret));
2047
./app/test-pmd/cmdline.c:5886: printf("mac_addr_cmd error: (%s)\n", strerror(-ret));
2048
./app/test-pmd/cmdline.c:6365: printf("vf_mac_addr_cmd error: (%s)\n", strerror(-ret));
2049
./app/test-pmd/cmdline.c:6495: printf("queue_rate_limit_cmd error: (%s)\n", strerror(-ret));
2050
./app/test-pmd/cmdline.c:6565: printf("vf_rate_limit_cmd error: (%s)\n", strerror(-ret));
2051
./app/test-pmd/cmdline.c:6696: strerror(-ret));
2052
./app/test-pmd/cmdline.c:6788: printf("udp tunneling add error: (%s)\n", strerror(-ret));
2053
./app/test-pmd/cmdline.c:6961: printf("mirror rule add error: (%s)\n", strerror(-ret));
2054
./app/test-pmd/cmdline.c:7051: printf("mirror rule add error: (%s)\n", strerror(-ret));
2055
./app/test-pmd/cmdline.c:7109: printf("mirror rule remove error: (%s)\n", strerror(-ret));
2056
./app/test-pmd/cmdline.c:7282: strerror(-ret));
2057
./app/test-pmd/cmdline.c:7396: strerror(-ret));
2058
./app/test-pmd/cmdline.c:7574: strerror(-ret));
2059
./app/test-pmd/cmdline.c:7794: printf("flex filter setting error: (%s)\n", strerror(-ret));
2060
./app/test-pmd/cmdline.c:7945: strerror(-ret));
2061
./app/test-pmd/cmdline.c:8287: strerror(-ret));
2062
./app/test-pmd/cmdline.c:8605: strerror(-ret));
2063
./lib/librte_vhost/vhost_user/vhost-net-user.c:149:read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
2064
./lib/librte_vhost/vhost_user/vhost-net-user.c:167: ret = recvmsg(sockfd, &msgh, 0);
2065
./lib/librte_vhost/vhost_user/vhost-net-user.c:192:read_vhost_message(int sockfd, struct VhostUserMsg *msg)
2066
./lib/librte_vhost/vhost_user/vhost-net-user.c:196: ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
2067
./lib/librte_vhost/vhost_user/vhost-net-user.c:221:send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
2068
./lib/librte_vhost/vhost_user/vhost-net-user.c:252: ret = sendmsg(sockfd, &msgh, 0);
2069
./lib/librte_vhost/vhost_user/vhost-net-user.c:264:send_vhost_message(int sockfd, struct VhostUserMsg *msg)
2070
./lib/librte_vhost/vhost_user/vhost-net-user.c:275: ret = send_fd_message(sockfd, (char *)msg,
2071
./lib/librte_vhost/vhost_user/vhost-net-user.c:335: ret = read_vhost_message(connfd, &msg);
2072
./lib/librte_vhost/vhost_user/vhost-net-user.c:363: send_vhost_message(connfd, &msg);
2073
./lib/librte_vhost/vhost_user/vhost-net-user.c:373: send_vhost_message(connfd, &msg);
2074
./lib/librte_vhost/vhost_user/vhost-net-user.c:412: send_vhost_message(connfd, &msg);
2075
./lib/librte_vhost/vhost_user/vhost-net-user.c:431: send_vhost_message(connfd, &msg);
2076
./lib/librte_ether/rte_ethdev.h:2197: * - negative value on error (invalid port id)
2077
./lib/librte_eal/bsdapp/eal/eal_timer.c:69: RTE_LOG(WARNING, EAL, "%s\n", strerror(errno));
2078
./lib/librte_eal/bsdapp/eal/eal_timer.c:76: RTE_LOG(WARNING, EAL, "%s\n", strerror(errno));
2079
./lib/librte_eal/bsdapp/eal/eal_timer.c:82: RTE_LOG(WARNING, EAL, "%s\n", strerror(errno));
2080
./lib/librte_eal/bsdapp/eal/eal_pci.c:168: devname, strerror(errno));
2081
./lib/librte_eal/bsdapp/eal/eal_pci.c:210: strerror(errno));
2082
./lib/librte_eal/bsdapp/eal/eal_pci.c:220: devname, strerror(errno));
2083
./lib/librte_eal/bsdapp/eal/eal_pci.c:377: __func__, strerror(errno));
2084
./lib/librte_eal/linuxapp/kni/kni_vhost.c:357:kni_sock_sndmsg(struct kiocb *iocb, struct socket *sock,
2085
./lib/librte_eal/linuxapp/kni/kni_vhost.c:360:kni_sock_sndmsg(struct socket *sock,
2086
./lib/librte_eal/linuxapp/kni/kni_vhost.c:396:kni_sock_rcvmsg(struct kiocb *iocb, struct socket *sock,
2087
./lib/librte_eal/linuxapp/kni/kni_vhost.c:399:kni_sock_rcvmsg(struct socket *sock,
2088
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:726: e_warn(drv, "Packet Buffer(%i) can not provide enough"
2089
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:968: e_dev_warn("This device is a pre-production adapter/LOM. "
2090
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:2768: e_dev_warn("This device is a pre-production adapter/LOM. "
2091
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:2825: e_dev_warn("PCI-Express bandwidth available for this "
2092
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:2828: e_dev_warn("For optimal performance a x8 PCI-Express "
2093
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:2965:void ewarn(struct ixgbe_hw *hw, const char *st, u32 status)
2094
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:2969: netif_warn(adapter, drv, adapter->netdev, "%s", st);
2095
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h:64:#define e_dev_warn(format, arg...) \
2096
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h:65: dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg)
2097
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h:74:#define e_warn(msglvl, format, arg...) \
2098
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h:75: netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
2099
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h:120:extern void ewarn(struct ixgbe_hw *hw, const char *str, u32 status);
2100
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h:130:#define EWARN(H, W, S) ewarn(H, W, S)
2101
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_sriov.h:38:void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
2102
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_mbx.h:98:s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
2103
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h:1142:#define dev_warn(dev, fmt, args...) \
2104
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h:1345:static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
2105
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h:2295:#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr)
2106
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h:2624:#define netdev_warn(dev, format, args...) \
2107
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h:2671:#define netif_warn(priv, type, dev, fmt, args...) \
2108
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:1702: e_warn(drv, "Please take active VFS "
2109
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c:2782: e_warn(drv, "enabling UDP RSS: fragmented packets"
2110
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.h:81:s32 e1000_check_for_msg(struct e1000_hw *, u16);
2111
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c:114:s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
2112
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c:122: ret_val = mbx->ops.check_for_msg(hw, mbx_id);
2113
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c:174:static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
2114
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c:184: while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
2115
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c:249: ret_val = e1000_poll_for_msg(hw, mbx_id);
2116
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h:1196:#define dev_warn(dev, fmt, args...) \
2117
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h:1441:static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
2118
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h:2487:#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr)
2119
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h:2957:#define netdev_warn(dev, format, args...) \
2120
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h:3004:#define netif_warn(priv, type, dev, fmt, args...) \
2121
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:975: dev_warn(pci_dev_to_dev(pdev),
2122
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:1036: dev_warn(pci_dev_to_dev(pdev), "Failed to initialize MSI-X interrupts. "
2123
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:1043: dev_warn(pci_dev_to_dev(pdev), "Failed to initialize MSI "
2124
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:5192: dev_warn(tx_ring->dev,
2125
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:5218: dev_warn(tx_ring->dev,
2126
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:5311: if (dma_mapping_error(tx_ring->dev, dma))
2127
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:6434: dev_warn(&adapter->pdev->dev,
2128
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:6437: dev_warn(&adapter->pdev->dev,
2129
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:6617:static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
2130
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:6699: igb_vf_reset_msg(adapter, vf);
2131
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:6774: if (!e1000_check_for_msg(hw, vf))
2132
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:8388: if (dma_mapping_error(rx_ring->dev, dma)) {
2133
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:8425: if (dma_mapping_error(rx_ring->dev, dma)) {
2134
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:9350: dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
2135
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:9352: dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
2136
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:9561: dev_warn(pci_dev_to_dev(adapter->pdev),
2137
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ptp.c:431: dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang");
2138
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ptp.c:451: pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
2139
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ptp.c:498: dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang");
2140
./lib/librte_eal/linuxapp/eal/eal_timer.c:179: strerror(errno));
2141
./lib/librte_eal/linuxapp/eal/eal_pci.c:459: __func__, strerror(errno));
2142
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:477: RTE_LOG(ERR, EAL, "Could not open %s: %s\n", path, strerror(errno));
2143
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:483: RTE_LOG(ERR, EAL, "Locking %s failed: %s\n", path, strerror(errno));
2144
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:489: RTE_LOG(ERR, EAL, "ftruncate failed: %s\n", strerror(errno));
2145
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:503: RTE_LOG(ERR, EAL, "Locking %s failed: %s \n", path, strerror(errno));
2146
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:533: path, strerror(errno));
2147
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:545: strerror(errno));
2148
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:567: RTE_LOG(ERR, EAL, "Locking %s failed: %s \n", path, strerror(errno));
2149
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:665: RTE_LOG(ERR, EAL, "Cannot open /dev/zero: %s\n", strerror(errno));
2150
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:695: strerror(errno));
2151
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:709: strerror(errno));
2152
./lib/librte_eal/linuxapp/eal/eal_ivshmem.c:930: strerror(errno));
2153
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:133: __func__, strerror(errno));
2154
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:260: devname, strerror(errno));
2155
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:269: cfgname, strerror(errno));
2156
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:326: strerror(errno));
2157
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:336: devname, strerror(errno));
2158
./lib/librte_eal/linuxapp/eal/eal_memory.c:134: strerror(errno));
2159
./lib/librte_eal/linuxapp/eal/eal_memory.c:175: __func__, strerror(errno));
2160
./lib/librte_eal/linuxapp/eal/eal_memory.c:183: __func__, strerror(errno));
2161
./lib/librte_eal/linuxapp/eal/eal_memory.c:189: __func__, strerror(errno));
2162
./lib/librte_eal/linuxapp/eal/eal_memory.c:290: strerror(errno));
2163
./lib/librte_eal/linuxapp/eal/eal_memory.c:398: strerror(errno));
2164
./lib/librte_eal/linuxapp/eal/eal_memory.c:406: strerror(errno));
2165
./lib/librte_eal/linuxapp/eal/eal_memory.c:422: __func__, strerror(errno));
2166
./lib/librte_eal/linuxapp/eal/eal_memory.c:513: RTE_LOG(ERR, EAL, "%s(): open failed: %s\n", __func__, strerror(errno));
2167
./lib/librte_eal/linuxapp/eal/eal_memory.c:537: RTE_LOG(ERR, EAL, "%s(): mmap failed: %s\n", __func__, strerror(errno));
2168
./lib/librte_eal/linuxapp/eal/eal_memory.c:553: __func__, strerror(errno));
2169
./lib/librte_eal/linuxapp/eal/eal_memory.c:814: __func__, hp->filepath, strerror(errno));
2170
./lib/librte_eal/linuxapp/eal/eal_memory.c:871: __func__, hp->filepath, strerror(errno));
2171
./lib/librte_eal/linuxapp/eal/eal_memory.c:899: hp->filepath, strerror(errno));
2172
./lib/librte_eal/linuxapp/eal/eal_memory.c:904: hp->filepath, strerror(errno));
2173
./lib/librte_eal/linuxapp/eal/eal_memory.c:1119: strerror(errno));
2174
./lib/librte_eal/linuxapp/eal/eal_memory.c:1511: mcfg->memseg[s].addr, strerror(errno));
2175
./lib/librte_eal/linuxapp/eal/eal_log.c:80: syslog(loglevel, "%s", copybuf);
2176
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:403: intr_handle->fd, strerror(errno));
2177
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:417: intr_handle->fd, strerror(errno));
2178
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:717: strerror(errno));
2179
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:828: intr_pipe.readfd, strerror(errno));
2180
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:847: src->intr_handle.fd, strerror(errno));
2181
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:936: fd, strerror(errno));
2182
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:1021: strerror(errno));
2183
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:1070: op, fd, strerror(errno));
2184
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:1161: errno, strerror(errno));
2185
./lib/librte_eal/linuxapp/eal/eal_hugepage_info.c:257: strerror(errno));
2186
./lib/librte_eal/linuxapp/eal/eal.c:914: " error %i (%s)\n", errno, strerror(errno));
2187
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:132: ret = sendmsg(socket, &hdr, 0);
2188
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:156: ret = recvmsg(socket, &hdr, 0);
2189
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:190: ret = sendmsg(socket, &hdr, 0);
2190
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:220: ret = recvmsg(socket, &hdr, 0);
2191
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:354: RTE_LOG(ERR, EAL, "Failed to bind socket: %s!\n", strerror(errno));
2192
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:361: RTE_LOG(ERR, EAL, "Failed to listen: %s!\n", strerror(errno));
2193
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:222: "error %i (%s)\n", errno, strerror(errno));
2194
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:244: "error %i (%s)\n", errno, strerror(errno));
2195
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:297: "error %i (%s)\n", errno, strerror(errno));
2196
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:316: "error %i (%s)\n", errno, strerror(errno));
2197
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:359: "error %i (%s)\n", errno, strerror(errno));
2198
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:368: "error %i (%s)\n", errno, strerror(errno));
2199
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:381: strerror(errno));
2200
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:442: strerror(errno));
2201
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:625: "error %i (%s)\n", pci_addr, errno, strerror(errno));
2202
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:649: "error %i (%s)\n", pci_addr, errno, strerror(errno));
2203
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:673: "error %i (%s)\n", pci_addr, errno, strerror(errno));
2204
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:694: "error %i (%s)\n", pci_addr, errno, strerror(errno));
2205
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:756: "error %i (%s)\n", pci_addr, errno, strerror(errno));
2206
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:844: strerror(errno));
2207
./lib/librte_eal/common/eal_common_errno.c:47:rte_strerror(int errnum)
2208
./lib/librte_eal/common/eal_common_pci.c:116: strerror(errno), mapaddr);
2209
./lib/librte_eal/common/eal_common_pci.c:134: strerror(errno));
2210
./lib/librte_eal/common/eal_common_pci_uio.c:74: uio_res->maps[i].path, strerror(errno));
2211
./lib/librte_eal/common/eal_common_options.c:189: path, strerror(errno));
2212
./lib/librte_eal/common/eal_common_options.c:234: RTE_LOG(ERR, EAL, "%s\n", dlerror());
2213
./lib/librte_eal/common/eal_common_options.c:684:eal_parse_syslog(const char *facility, struct internal_config *conf)
2214
./lib/librte_eal/common/eal_common_options.c:873: if (eal_parse_syslog(optarg, conf) < 0) {
2215
./lib/librte_eal/common/include/rte_errno.h:71:const char *rte_strerror(int errnum);
2216
./lib/librte_sched/rte_sched.c:2074: rte_panic("Algorithmic error (invalid state)\n");
2217
./lib/librte_power/rte_power_kvm_vm.c:97:send_msg(unsigned lcore_id, uint32_t scale_direction)
2218
./lib/librte_power/rte_power_kvm_vm.c:107: ret = guest_channel_send_msg(&pkt[lcore_id], lcore_id);
2219
./lib/librte_power/rte_power_kvm_vm.c:110: RTE_LOG(DEBUG, POWER, "Error sending message: %s\n", strerror(ret));
2220
./lib/librte_power/rte_power_kvm_vm.c:117: return send_msg(lcore_id, CPU_POWER_SCALE_UP);
2221
./lib/librte_power/rte_power_kvm_vm.c:123: return send_msg(lcore_id, CPU_POWER_SCALE_DOWN);
2222
./lib/librte_power/rte_power_kvm_vm.c:129: return send_msg(lcore_id, CPU_POWER_SCALE_MAX);
2223
./lib/librte_power/rte_power_kvm_vm.c:135: return send_msg(lcore_id, CPU_POWER_SCALE_MIN);
2224
./lib/librte_power/guest_channel.c:80: "%s\n", fd_path, strerror(errno));
2225
./lib/librte_power/guest_channel.c:105: ret = guest_channel_send_msg(&pkt, lcore_id);
2226
./lib/librte_power/guest_channel.c:108: "test: %s\n", fd_path, strerror(ret));
2227
./lib/librte_power/guest_channel.c:120:guest_channel_send_msg(struct channel_packet *pkt, unsigned lcore_id)
2228
./lib/librte_power/guest_channel.h:82:int guest_channel_send_msg(struct channel_packet *pkt, unsigned lcore_id);
2229
./lib/librte_cmdline/cmdline_parse.c:171: printf("Parse error(%s:%d): Token offset(%u) "
2232
./drivers/net/mlx4/mlx4.c:5130: const char *val = getenv(name);
2233
./drivers/net/mlx4/mlx4.c:5694: setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
2234
./drivers/net/mlx5/mlx5.c:559: setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
2235
./app/test/test.c:136: if ((recursive_call = getenv(RECURSIVE_ENV_VAR)) != NULL)
2236
./app/test/test_ivshmem.c:377: setenv(RTE_IVSHMEM_TEST_ID, &testid, 1);
2237
./app/test/test_ivshmem.c:400: testid = *(getenv(RTE_IVSHMEM_TEST_ID)) - FIRST_TEST;
2238
./app/test/process.h:92: if (setenv(RECURSIVE_ENV_VAR, env_value, 1) != 0)
2239
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:90: const char *home_dir = getenv("HOME");
2240
./lib/librte_eal/common/eal_filesystem.h:61: const char *home_dir = getenv("HOME");
2241
./lib/librte_eal/common/eal_filesystem.h:78: const char *home_dir = getenv("HOME");
2244
./examples/netmap_compat/bridge/bridge.c:215: err = rte_netmap_ioctl(port->fd, NIOCGINFO, &req);
2245
./examples/netmap_compat/bridge/bridge.c:225: err = rte_netmap_ioctl(port->fd, NIOCREGIF, &req);
2246
./examples/netmap_compat/bridge/bridge.c:366: err = rte_netmap_ioctl(ports.p[i].fd, NIOCUNREGIF, &req);
2247
./examples/netmap_compat/netmap/netmap_user.h:41: * value returned from ioctl(.., NIOCREG, ...) and the mmap region:
2248
./examples/netmap_compat/netmap/netmap_user.h:42: * ioctl(fd, NIOCREG, &req);
2249
./examples/netmap_compat/lib/compat_netmap.h:74:int rte_netmap_ioctl(int fd, uint32_t op, void *param);
2250
./examples/netmap_compat/lib/compat_netmap.c:776:int rte_netmap_ioctl(int fd, uint32_t op, void *param)
2251
./examples/vhost_xen/xenstore_parse.c:93: int rv = ioctl(d_fd, IOCTL_GNTDEV_MAP_GRANT_REF, &arg);
2252
./examples/vhost_xen/xenstore_parse.c:128: rv = ioctl(d_fd, IOCTL_GNTDEV_UNMAP_GRANT_REF, &arg);
2253
./examples/exception_path/main.c:207: ret = ioctl(fd, TUNSETIFF, (void *) &ifr);
2254
./drivers/net/mlx4/mlx4.c:609: * Perform ifreq ioctl() on associated Ethernet device.
2255
./drivers/net/mlx4/mlx4.c:614: * Request number to pass to ioctl().
2256
./drivers/net/mlx4/mlx4.c:630: ret = ioctl(sock, req, ifr);
2257
./drivers/net/mlx4/mlx4.c:4617: WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
2258
./drivers/net/mlx4/mlx4.c:4625: WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
2259
./drivers/net/mlx4/mlx4.c:4784: WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)"
2260
./drivers/net/mlx4/mlx4.c:4847: WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
2261
./drivers/net/mlx5/mlx5_ethdev.c:306: * Perform ifreq ioctl() on associated Ethernet device.
2262
./drivers/net/mlx5/mlx5_ethdev.c:311: * Request number to pass to ioctl().
2263
./drivers/net/mlx5/mlx5_ethdev.c:327: ret = ioctl(sock, req, ifr);
2264
./drivers/net/mlx5/mlx5_ethdev.c:550: WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
2265
./drivers/net/mlx5/mlx5_ethdev.c:558: WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
2266
./drivers/net/mlx5/mlx5_ethdev.c:702: WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)"
2267
./drivers/net/mlx5/mlx5_ethdev.c:763: WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
2268
./drivers/net/xenvirt/rte_xen_lib.c:169: rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_ALLOC_GREF, arg);
2269
./drivers/net/xenvirt/rte_xen_lib.c:181: ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, arg_d);
2270
./drivers/net/xenvirt/rte_xen_lib.c:215: rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_ALLOC_GREF, &arg);
2271
./drivers/net/xenvirt/rte_xen_lib.c:226: ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d);
2272
./drivers/net/xenvirt/rte_xen_lib.c:235: rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_SET_UNMAP_NOTIFY, ¬ify);
2273
./drivers/net/xenvirt/rte_xen_lib.c:239: ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d);
2274
./drivers/net/xenvirt/rte_xen_lib.c:255: ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d);
2275
./drivers/net/xenvirt/rte_mempool_gntalloc.c:184: rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg);
2276
./drivers/net/xenvirt/rte_mempool_gntalloc.c:248: rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, arg);
2277
./drivers/net/xenvirt/rte_mempool_gntalloc.c:290: rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, arg);
2278
./drivers/net/af_packet/rte_eth_af_packet.c:502: if (ioctl(sockfd, SIOCGIFINDEX, &ifr) == -1) {
2279
./drivers/net/af_packet/rte_eth_af_packet.c:510: if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) {
2280
./lib/librte_vhost/vhost_cuse/vhost-net-cdev.c:122: fuse_reply_ioctl(req, result, NULL, 0); \
2281
./lib/librte_vhost/vhost_cuse/vhost-net-cdev.c:146: fuse_reply_ioctl(req, result, NULL, 0);\
2282
./lib/librte_vhost/vhost_cuse/vhost-net-cdev.c:159: fuse_reply_ioctl(req, result, &(var), sizeof(type));\
2283
./lib/librte_vhost/vhost_cuse/vhost-net-cdev.c:173: fuse_reply_ioctl(req, result, &(var2), sizeof(type2));\
2284
./lib/librte_vhost/vhost_cuse/vhost-net-cdev.c:183:vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
2285
./lib/librte_vhost/vhost_cuse/vhost-net-cdev.c:205: fuse_reply_ioctl(req, result, NULL, 0);
2286
./lib/librte_vhost/vhost_cuse/vhost-net-cdev.c:252: fuse_reply_ioctl(req, result, NULL, 0);
2287
./lib/librte_vhost/vhost_cuse/vhost-net-cdev.c:262: fuse_reply_ioctl(req, result, NULL, 0);
2288
./lib/librte_vhost/vhost_cuse/vhost-net-cdev.c:313: fuse_reply_ioctl(req, -1, NULL, 0);
2289
./lib/librte_vhost/vhost_cuse/vhost-net-cdev.c:320: fuse_reply_ioctl(req, result, NULL, 0);
2290
./lib/librte_vhost/vhost_cuse/vhost-net-cdev.c:323: fuse_reply_ioctl(req, result, NULL, 0);
2291
./lib/librte_vhost/vhost_cuse/vhost-net-cdev.c:332: fuse_reply_ioctl(req, result, NULL, 0);
2292
./lib/librte_vhost/vhost_cuse/virtio-net-cdev.c:393: ret = ioctl(fd_tap, TUNGETIFF, &ifr);
2293
./lib/librte_vhost/vhost_cuse/eventfd_copy.c:95: ret = ioctl(eventfd_link, EVENTFD_COPY2, &eventfd_copy2);
2294
./lib/librte_vhost/eventfd_link/eventfd_link.c:229:eventfd_link_ioctl(struct file *f, unsigned int ioctl, unsigned long arg)
2295
./lib/librte_eal/bsdapp/eal/eal_pci.c:303: if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0)
2296
./lib/librte_eal/bsdapp/eal/eal_pci.c:375: if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
2297
./lib/librte_eal/bsdapp/eal/eal_pci.c:426: if (ioctl(fd, PCIOCREAD, &pi) < 0)
2298
./lib/librte_eal/bsdapp/eal/eal_pci.c:470: if (ioctl(fd, PCIOCWRITE, &pi) < 0)
2299
./lib/librte_eal/linuxapp/kni/kni_net.c:486:kni_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2300
./lib/librte_eal/linuxapp/kni/kni_vhost.c:447:kni_sock_ioctl(struct socket *sock, unsigned int cmd,
2301
./lib/librte_eal/linuxapp/kni/kni_vhost.c:550:kni_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
2302
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:2292:static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2303
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:2297: return ethtool_ioctl(ifr);
2304
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h:867:extern int ethtool_ioctl(struct ifreq *ifr);
2305
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb.h:826:extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
2306
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb.h:830:extern int ethtool_ioctl(struct ifreq *);
2307
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:33: * the information ethtool needs. We fall back to calling do_ioctl()
2308
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:782:int ethtool_ioctl(struct ifreq *ifr)
2309
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:907:extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
2310
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:1097:int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
2311
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:162:static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
2312
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:8520:static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2313
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:8553:static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2314
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:8560: return igb_mii_ioctl(netdev, ifr, cmd);
2315
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:8564: return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
2316
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:8568: return ethtool_ioctl(ifr);
2317
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ptp.c:604:int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
2318
./lib/librte_eal/linuxapp/kni/kni_misc.c:61:static int kni_ioctl(struct inode *inode, unsigned int ioctl_num,
2319
./lib/librte_eal/linuxapp/kni/kni_misc.c:63:static int kni_compat_ioctl(struct inode *inode, unsigned int ioctl_num,
2320
./lib/librte_eal/linuxapp/kni/kni_misc.c:630:kni_ioctl(struct inode *inode,
2321
./lib/librte_eal/linuxapp/kni/kni_misc.c:661:kni_compat_ioctl(struct inode *inode,
2322
./lib/librte_eal/linuxapp/eal/eal_pci_init.h:94: * the group fd via an ioctl() call.
2323
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:130: ret = mknod(filename, S_IFCHR | S_IRUSR | S_IWUSR, dev);
2324
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:132: RTE_LOG(ERR, EAL, "%s(): mknod() failed %s\n",
2325
./lib/librte_eal/linuxapp/eal/eal_xen_memory.c:219: ret = ioctl(xen_fd, RTE_DOM0_IOCTL_PREPARE_MEMSEG, &meminfo);
2326
./lib/librte_eal/linuxapp/eal/eal_xen_memory.c:227: ret = ioctl(xen_fd, RTE_DOM0_IOCTL_GET_NUM_MEMSEG, &num_memseg);
2327
./lib/librte_eal/linuxapp/eal/eal_xen_memory.c:242: ret = ioctl(xen_fd, RTE_DOM0_IOCTL_GET_MEMSEG_INFO, seginfo);
2328
./lib/librte_eal/linuxapp/eal/eal_xen_memory.c:334: ret = ioctl(xen_fd, RTE_DOM0_IOCTL_ATTACH_TO_MEMSEG, name);
2329
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:159: ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
2330
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:176: ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
2331
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:203: ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
2332
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:219: ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
2333
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:248: ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
2334
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:274: ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
2335
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:310: ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
2336
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:337: ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
2337
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:218: ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU,
2338
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:240: ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
2339
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:294: ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
2340
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:364: ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION);
2341
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:376: ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU);
2342
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:622: ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status);
2343
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:645: ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
2344
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:680: vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, pci_addr);
2345
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:691: ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_INFO, &device_info);
2346
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:752: ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ®);
2347
./lib/librte_eal/linuxapp/eal/eal_pci_vfio.c:876: ioctl(vfio_dev_fd, VFIO_DEVICE_RESET);
2348
./lib/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c:94:static int dom0_ioctl(struct file *file, unsigned int ioctl_num,
2349
./lib/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c:680:dom0_ioctl(struct file *file,
2350
./lib/librte_kni/rte_kni.c:429: ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
2351
./lib/librte_kni/rte_kni.c:469: if (ioctl(kni_fd, RTE_KNI_IOCTL_RELEASE, &dev_info) < 0) {
2354
./examples/cmdline/commands.c:136: if (res->obj->ip.family == AF_INET)
2355
./examples/cmdline/commands.c:206: if (o->ip.family == AF_INET)
2356
./examples/bond/main.c:432: if (res->ip.family == AF_INET)
2357
./examples/vm_power_manager/channel_manager.c:256: info->fd = socket(AF_UNIX, SOCK_STREAM, 0);
2358
./examples/vm_power_manager/channel_manager.c:263: sock_addr.sun_family = AF_UNIX;
2359
./examples/vm_power_manager/channel_manager.h:79: int fd; /**< AF_UNIX socket fd */
2360
./drivers/net/ixgbe/base/ixgbe_api.c:146: case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
2361
./drivers/net/ixgbe/base/ixgbe_api.c:147: case IXGBE_DEV_ID_82598AF_DUAL_PORT:
2362
./drivers/net/ixgbe/base/ixgbe_mbx.h:69: * PF. The reverse is true if it is IXGBE_PF_*.
2363
./drivers/net/ixgbe/base/ixgbe_mbx.h:121:#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
2364
./drivers/net/ixgbe/base/ixgbe_mbx.h:134:#define IXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */
2365
./drivers/net/ixgbe/base/ixgbe_type.h:85:#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
2366
./drivers/net/ixgbe/base/ixgbe_type.h:86:#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
2367
./drivers/net/ixgbe/base/ixgbe_82598.c:377: case IXGBE_DEV_ID_82598AF_DUAL_PORT:
2368
./drivers/net/ixgbe/base/ixgbe_82598.c:378: case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
2369
./drivers/net/ixgbe/base/ixgbe_82598.c:1302: case IXGBE_DEV_ID_82598AF_DUAL_PORT:
2370
./drivers/net/ixgbe/base/ixgbe_82598.c:1303: case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
2371
./drivers/net/ixgbe/base/ixgbe_common.c:278: reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2372
./drivers/net/ixgbe/base/ixgbe_common.c:291: reg_cu |= IXGBE_TAF_ASM_PAUSE;
2373
./drivers/net/ixgbe/base/ixgbe_common.c:292: reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
2374
./drivers/net/ixgbe/base/ixgbe_common.c:312: reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
2375
./drivers/net/ixgbe/base/ixgbe_common.c:2986: IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2376
./drivers/net/ixgbe/base/ixgbe_common.c:2987: IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2377
./drivers/net/ixgbe/base/ixgbe_phy.h:113:#define IXGBE_TAF_SYM_PAUSE 0x400
2378
./drivers/net/ixgbe/base/ixgbe_phy.h:114:#define IXGBE_TAF_ASM_PAUSE 0x800
2379
./drivers/net/ixgbe/ixgbe_ethdev.c:3792:#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
2380
./drivers/net/mlx4/mlx4.c:624: int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
2381
./drivers/net/i40e/i40e_pf.h:34:#ifndef _I40E_PF_H_
2382
./drivers/net/i40e/i40e_pf.h:35:#define _I40E_PF_H_
2383
./drivers/net/i40e/i40e_pf.h:52: I40E_PF_VFR_INPROGRESS = 0,
2384
./drivers/net/i40e/i40e_pf.h:53: I40E_PF_VFR_COMPLETED = 1,
2385
./drivers/net/i40e/i40e_pf.h:128:#endif /* _I40E_PF_H_ */
2386
./drivers/net/i40e/i40e_pf.c:73: struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
2387
./drivers/net/i40e/i40e_pf.c:126: struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
2388
./drivers/net/i40e/i40e_pf.c:138: I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_INPROGRESS);
2389
./drivers/net/i40e/i40e_pf.c:204: I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR |
2390
./drivers/net/i40e/i40e_pf.c:205: (abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
2391
./drivers/net/i40e/i40e_pf.c:208: val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD);
2392
./drivers/net/i40e/i40e_pf.c:219: I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_COMPLETED);
2393
./drivers/net/i40e/i40e_pf.c:251: struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
2394
./drivers/net/i40e/i40e_pf.c:289: struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
2395
./drivers/net/i40e/i40e_pf.c:401: ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2396
./drivers/net/i40e/i40e_pf.c:402: I40E_QTX_CTL_PF_INDX_MASK) |
2397
./drivers/net/i40e/i40e_pf.c:417: struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
2398
./drivers/net/i40e/i40e_pf.c:476: struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
2399
./drivers/net/i40e/i40e_pf.c:574: struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
2400
./drivers/net/i40e/i40e_pf.c:800: struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
2401
./drivers/net/i40e/i40e_pf.c:1014: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2402
./drivers/net/i40e/i40e_pf.c:1066: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2403
./drivers/net/i40e/i40e_fdir.c:194: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2404
./drivers/net/i40e/i40e_fdir.c:304: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2405
./drivers/net/i40e/i40e_fdir.c:345: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2406
./drivers/net/i40e/i40e_fdir.c:537: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2407
./drivers/net/i40e/i40e_fdir.c:595: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2408
./drivers/net/i40e/i40e_fdir.c:1188: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2409
./drivers/net/i40e/i40e_fdir.c:1305: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2410
./drivers/net/i40e/i40e_fdir.c:1347: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2411
./drivers/net/i40e/i40e_fdir.c:1364: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2412
./drivers/net/i40e/i40e_ethdev.h:96:#define PF_IS_V11(vf) \
2413
./drivers/net/i40e/i40e_ethdev.h:431: uint16_t vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
2414
./drivers/net/i40e/i40e_ethdev.h:625:/* I40E_PF_TO */
2415
./drivers/net/i40e/i40e_ethdev.h:626:#define I40E_PF_TO_HW(pf) \
2416
./drivers/net/i40e/i40e_ethdev.h:628:#define I40E_PF_TO_ADAPTER(pf) \
2417
./drivers/net/i40e/base/i40e_adminq_cmd.h:394:#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
2418
./drivers/net/i40e/base/i40e_adminq_cmd.h:2298:#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
2419
./drivers/net/i40e/base/i40e_hmc.c:262: I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
2420
./drivers/net/i40e/base/i40e_hmc.c:319: I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
2421
./drivers/net/i40e/base/i40e_hmc.c:367: I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
2422
./drivers/net/i40e/base/i40e_type.h:67:#define I40E_MAX_PF_VSI 64
2423
./drivers/net/i40e/base/i40e_type.h:68:#define I40E_MAX_PF_QP 128
2424
./drivers/net/i40e/base/i40e_type.h:72:#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
2425
./drivers/net/i40e/base/i40e_type.h:120:#define I40E_QTX_CTL_PF_QUEUE 0x2
2426
./drivers/net/i40e/base/i40e_type.h:1602:#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */
2427
./drivers/net/i40e/base/i40e_lan_hmc.c:416: I40E_SET_PF_SD_ENTRY(hw,
2428
./drivers/net/i40e/base/i40e_lan_hmc.c:421: I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
2429
./drivers/net/i40e/base/i40e_prototype.h:98:#ifdef PF_DRIVER
2430
./drivers/net/i40e/base/i40e_prototype.h:452:#endif /* PF_DRIVER */
2431
./drivers/net/i40e/base/i40e_hmc.h:123: * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
2432
./drivers/net/i40e/base/i40e_hmc.h:129:#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
2433
./drivers/net/i40e/base/i40e_hmc.h:145: * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
2434
./drivers/net/i40e/base/i40e_hmc.h:150:#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
2435
./drivers/net/i40e/base/i40e_hmc.h:164: * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
2436
./drivers/net/i40e/base/i40e_hmc.h:169:#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
2437
./drivers/net/i40e/base/i40e_adminq.c:40:#ifdef PF_DRIVER
2438
./drivers/net/i40e/base/i40e_adminq.c:51:#endif /* PF_DRIVER */
2439
./drivers/net/i40e/base/i40e_adminq.c:72:#ifdef PF_DRIVER
2440
./drivers/net/i40e/base/i40e_adminq.c:74: hw->aq.asq.tail = I40E_PF_ATQT;
2441
./drivers/net/i40e/base/i40e_adminq.c:75: hw->aq.asq.head = I40E_PF_ATQH;
2442
./drivers/net/i40e/base/i40e_adminq.c:76: hw->aq.asq.len = I40E_PF_ATQLEN;
2443
./drivers/net/i40e/base/i40e_adminq.c:77: hw->aq.asq.bal = I40E_PF_ATQBAL;
2444
./drivers/net/i40e/base/i40e_adminq.c:78: hw->aq.asq.bah = I40E_PF_ATQBAH;
2445
./drivers/net/i40e/base/i40e_adminq.c:79: hw->aq.arq.tail = I40E_PF_ARQT;
2446
./drivers/net/i40e/base/i40e_adminq.c:80: hw->aq.arq.head = I40E_PF_ARQH;
2447
./drivers/net/i40e/base/i40e_adminq.c:81: hw->aq.arq.len = I40E_PF_ARQLEN;
2448
./drivers/net/i40e/base/i40e_adminq.c:82: hw->aq.arq.bal = I40E_PF_ARQBAL;
2449
./drivers/net/i40e/base/i40e_adminq.c:83: hw->aq.arq.bah = I40E_PF_ARQBAH;
2450
./drivers/net/i40e/base/i40e_adminq.c:321:#ifdef PF_DRIVER
2451
./drivers/net/i40e/base/i40e_adminq.c:325: I40E_PF_ATQLEN_ATQENABLE_MASK));
2452
./drivers/net/i40e/base/i40e_adminq.c:328: I40E_PF_ATQLEN_ATQENABLE_MASK));
2453
./drivers/net/i40e/base/i40e_adminq.c:330:#endif /* PF_DRIVER */
2454
./drivers/net/i40e/base/i40e_adminq.c:368:#ifdef PF_DRIVER
2455
./drivers/net/i40e/base/i40e_adminq.c:372: I40E_PF_ARQLEN_ARQENABLE_MASK));
2456
./drivers/net/i40e/base/i40e_adminq.c:375: I40E_PF_ARQLEN_ARQENABLE_MASK));
2457
./drivers/net/i40e/base/i40e_adminq.c:377:#endif /* PF_DRIVER */
2458
./drivers/net/i40e/base/i40e_adminq.c:602:#ifdef PF_DRIVER
2459
./drivers/net/i40e/base/i40e_adminq.c:636:#ifdef PF_DRIVER
2460
./drivers/net/i40e/base/i40e_adminq.c:690:#endif /* PF_DRIVER */
2461
./drivers/net/i40e/base/i40e_adminq.c:696:#ifdef PF_DRIVER
2462
./drivers/net/i40e/base/i40e_adminq.c:1039:#ifdef PF_DRIVER
2463
./drivers/net/i40e/base/i40e_adminq.c:1042: ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
2464
./drivers/net/i40e/base/i40e_adminq.c:1044: ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
2465
./drivers/net/i40e/base/i40e_adminq.c:1046:#endif /* PF_DRIVER */
2466
./drivers/net/i40e/base/i40e_adminq.c:1118:#ifdef PF_DRIVER
2467
./drivers/net/i40e/base/i40e_common.c:390:#ifdef PF_DRIVER
2468
./drivers/net/i40e/base/i40e_common.c:394: I40E_PF_ATQLEN_ATQENABLE_MASK);
2469
./drivers/net/i40e/base/i40e_common.c:397: I40E_PF_ATQLEN_ATQENABLE_MASK);
2470
./drivers/net/i40e/base/i40e_common.c:399:#endif /* PF_DRIVER */
2471
./drivers/net/i40e/base/i40e_common.c:988:#ifdef PF_DRIVER
2472
./drivers/net/i40e/base/i40e_common.c:1029: func_rid = rd32(hw, I40E_PF_FUNC_RID);
2473
./drivers/net/i40e/base/i40e_common.c:1284:#define I40E_PF_RESET_WAIT_COUNT 200
2474
./drivers/net/i40e/base/i40e_common.c:1322: for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
2475
./drivers/net/i40e/base/i40e_common.c:1347: for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
2476
./drivers/net/i40e/base/i40e_common.c:1385: num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
2477
./drivers/net/i40e/base/i40e_common.c:1386: I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
2478
./drivers/net/i40e/base/i40e_common.c:1400: val = rd32(hw, I40E_PF_VT_PFALLOC);
2479
./drivers/net/i40e/base/i40e_common.c:1401: i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
2480
./drivers/net/i40e/base/i40e_common.c:1402: I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
2481
./drivers/net/i40e/base/i40e_common.c:1403: j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
2482
./drivers/net/i40e/base/i40e_common.c:1404: I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
2483
./drivers/net/i40e/base/i40e_common.c:1405: if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
2484
./drivers/net/i40e/base/i40e_common.c:5635: max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
2485
./drivers/net/i40e/base/i40e_common.c:5638: min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
2486
./drivers/net/i40e/base/i40e_common.c:5691:#endif /* PF_DRIVER */
2487
./drivers/net/i40e/base/i40e_virtchnl.h:360: I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
2488
./drivers/net/i40e/base/i40e_virtchnl.h:362:#define I40E_PF_EVENT_SEVERITY_INFO 0
2489
./drivers/net/i40e/base/i40e_virtchnl.h:363:#define I40E_PF_EVENT_SEVERITY_ATTENTION 1
2490
./drivers/net/i40e/base/i40e_virtchnl.h:364:#define I40E_PF_EVENT_SEVERITY_ACTION_REQUIRED 2
2491
./drivers/net/i40e/base/i40e_virtchnl.h:365:#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
2492
./drivers/net/i40e/base/i40e_register.h:38:#ifdef PF_DRIVER
2493
./drivers/net/i40e/base/i40e_register.h:74:#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
2494
./drivers/net/i40e/base/i40e_register.h:75:#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
2495
./drivers/net/i40e/base/i40e_register.h:76:#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
2496
./drivers/net/i40e/base/i40e_register.h:77:#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
2497
./drivers/net/i40e/base/i40e_register.h:78:#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
2498
./drivers/net/i40e/base/i40e_register.h:79:#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
2499
./drivers/net/i40e/base/i40e_register.h:80:#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
2500
./drivers/net/i40e/base/i40e_register.h:81:#define I40E_PF_ARQH_ARQH_SHIFT 0
2501
./drivers/net/i40e/base/i40e_register.h:82:#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
2502
./drivers/net/i40e/base/i40e_register.h:83:#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
2503
./drivers/net/i40e/base/i40e_register.h:84:#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
2504
./drivers/net/i40e/base/i40e_register.h:85:#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
2505
./drivers/net/i40e/base/i40e_register.h:86:#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
2506
./drivers/net/i40e/base/i40e_register.h:87:#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
2507
./drivers/net/i40e/base/i40e_register.h:88:#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
2508
./drivers/net/i40e/base/i40e_register.h:89:#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT)
2509
./drivers/net/i40e/base/i40e_register.h:90:#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
2510
./drivers/net/i40e/base/i40e_register.h:91:#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
2511
./drivers/net/i40e/base/i40e_register.h:92:#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
2512
./drivers/net/i40e/base/i40e_register.h:93:#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
2513
./drivers/net/i40e/base/i40e_register.h:94:#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
2514
./drivers/net/i40e/base/i40e_register.h:95:#define I40E_PF_ARQT_ARQT_SHIFT 0
2515
./drivers/net/i40e/base/i40e_register.h:96:#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
2516
./drivers/net/i40e/base/i40e_register.h:97:#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
2517
./drivers/net/i40e/base/i40e_register.h:98:#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
2518
./drivers/net/i40e/base/i40e_register.h:99:#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
2519
./drivers/net/i40e/base/i40e_register.h:100:#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
2520
./drivers/net/i40e/base/i40e_register.h:101:#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
2521
./drivers/net/i40e/base/i40e_register.h:102:#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
2522
./drivers/net/i40e/base/i40e_register.h:103:#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
2523
./drivers/net/i40e/base/i40e_register.h:104:#define I40E_PF_ATQH_ATQH_SHIFT 0
2524
./drivers/net/i40e/base/i40e_register.h:105:#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
2525
./drivers/net/i40e/base/i40e_register.h:106:#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
2526
./drivers/net/i40e/base/i40e_register.h:107:#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
2527
./drivers/net/i40e/base/i40e_register.h:108:#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
2528
./drivers/net/i40e/base/i40e_register.h:109:#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
2529
./drivers/net/i40e/base/i40e_register.h:110:#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
2530
./drivers/net/i40e/base/i40e_register.h:111:#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
2531
./drivers/net/i40e/base/i40e_register.h:112:#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT)
2532
./drivers/net/i40e/base/i40e_register.h:113:#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
2533
./drivers/net/i40e/base/i40e_register.h:114:#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
2534
./drivers/net/i40e/base/i40e_register.h:115:#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
2535
./drivers/net/i40e/base/i40e_register.h:116:#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
2536
./drivers/net/i40e/base/i40e_register.h:117:#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
2537
./drivers/net/i40e/base/i40e_register.h:118:#define I40E_PF_ATQT_ATQT_SHIFT 0
2538
./drivers/net/i40e/base/i40e_register.h:119:#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
2539
./drivers/net/i40e/base/i40e_register.h:270:#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
2540
./drivers/net/i40e/base/i40e_register.h:271:#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
2541
./drivers/net/i40e/base/i40e_register.h:1301:#define I40E_QTX_CTL_PF_INDX_SHIFT 2
2542
./drivers/net/i40e/base/i40e_register.h:1302:#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT)
2543
./drivers/net/i40e/base/i40e_register.h:1564:#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
2544
./drivers/net/i40e/base/i40e_register.h:1565:#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
2545
./drivers/net/i40e/base/i40e_register.h:1568:#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
2546
./drivers/net/i40e/base/i40e_register.h:1569:#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
2547
./drivers/net/i40e/base/i40e_register.h:1620:#endif /* PF_DRIVER */
2548
./drivers/net/i40e/base/i40e_register.h:1643:#ifdef PF_DRIVER
2549
./drivers/net/i40e/base/i40e_register.h:1770:#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
2550
./drivers/net/i40e/base/i40e_register.h:1771:#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
2551
./drivers/net/i40e/base/i40e_register.h:1930:#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
2552
./drivers/net/i40e/base/i40e_register.h:1931:#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
2553
./drivers/net/i40e/base/i40e_register.h:1932:#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
2554
./drivers/net/i40e/base/i40e_register.h:1933:#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
2555
./drivers/net/i40e/base/i40e_register.h:1934:#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
2556
./drivers/net/i40e/base/i40e_register.h:1935:#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
2557
./drivers/net/i40e/base/i40e_register.h:1936:#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
2558
./drivers/net/i40e/base/i40e_register.h:1937:#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
2559
./drivers/net/i40e/base/i40e_register.h:1938:#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
2560
./drivers/net/i40e/base/i40e_register.h:1939:#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
2561
./drivers/net/i40e/base/i40e_register.h:1940:#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
2562
./drivers/net/i40e/base/i40e_register.h:1941:#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
2563
./drivers/net/i40e/base/i40e_register.h:1942:#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
2564
./drivers/net/i40e/base/i40e_register.h:1943:#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
2565
./drivers/net/i40e/base/i40e_register.h:1944:#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
2566
./drivers/net/i40e/base/i40e_register.h:1950:#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2
2567
./drivers/net/i40e/base/i40e_register.h:1951:#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
2568
./drivers/net/i40e/base/i40e_register.h:1962:#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
2569
./drivers/net/i40e/base/i40e_register.h:1963:#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
2570
./drivers/net/i40e/base/i40e_register.h:1987:#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */
2571
./drivers/net/i40e/base/i40e_register.h:1988:#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
2572
./drivers/net/i40e/base/i40e_register.h:1989:#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
2573
./drivers/net/i40e/base/i40e_register.h:1997:#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
2574
./drivers/net/i40e/base/i40e_register.h:1998:#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
2575
./drivers/net/i40e/base/i40e_register.h:2931:#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
2576
./drivers/net/i40e/base/i40e_register.h:2932:#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
2577
./drivers/net/i40e/base/i40e_register.h:3028:#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21
2578
./drivers/net/i40e/base/i40e_register.h:3029:#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT)
2579
./drivers/net/i40e/base/i40e_register.h:3034:#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */
2580
./drivers/net/i40e/base/i40e_register.h:3035:#define I40E_PF_MDET_RX_VALID_SHIFT 0
2581
./drivers/net/i40e/base/i40e_register.h:3036:#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT)
2582
./drivers/net/i40e/base/i40e_register.h:3037:#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */
2583
./drivers/net/i40e/base/i40e_register.h:3038:#define I40E_PF_MDET_TX_VALID_SHIFT 0
2584
./drivers/net/i40e/base/i40e_register.h:3039:#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT)
2585
./drivers/net/i40e/base/i40e_register.h:3040:#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */
2586
./drivers/net/i40e/base/i40e_register.h:3041:#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
2587
./drivers/net/i40e/base/i40e_register.h:3042:#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
2588
./drivers/net/i40e/base/i40e_register.h:3043:#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
2589
./drivers/net/i40e/base/i40e_register.h:3044:#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
2590
./drivers/net/i40e/base/i40e_register.h:3045:#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
2591
./drivers/net/i40e/base/i40e_register.h:3046:#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
2592
./drivers/net/i40e/base/i40e_register.h:3064:#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
2593
./drivers/net/i40e/base/i40e_register.h:3065:#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
2594
./drivers/net/i40e/base/i40e_register.h:3153:#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
2595
./drivers/net/i40e/base/i40e_register.h:3154:#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
2596
./drivers/net/i40e/base/i40e_register.h:3163:#endif /* PF_DRIVER */
2597
./drivers/net/i40e/base/i40e_register.h:3390:#ifdef PF_DRIVER
2598
./drivers/net/i40e/base/i40e_register.h:5259:#endif /* PF_DRIVER */
2599
./drivers/net/i40e/i40e_ethdev_vf.c:278: case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
2600
./drivers/net/i40e/i40e_ethdev_vf.c:479: if (PF_IS_V11(vf)) {
2601
./drivers/net/i40e/i40e_ethdev.c:700: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2602
./drivers/net/i40e/i40e_ethdev.c:748: hw->back = I40E_PF_TO_ADAPTER(pf);
2603
./drivers/net/i40e/i40e_ethdev.c:2522: mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
2604
./drivers/net/i40e/i40e_ethdev.c:2525: mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
2605
./drivers/net/i40e/i40e_ethdev.c:2658: hw = I40E_PF_TO_HW(pf);
2606
./drivers/net/i40e/i40e_ethdev.c:2723: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2607
./drivers/net/i40e/i40e_ethdev.c:3051: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2608
./drivers/net/i40e/i40e_ethdev.c:3167: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2609
./drivers/net/i40e/i40e_ethdev.c:3626: hw = I40E_PF_TO_HW(pf);
2610
./drivers/net/i40e/i40e_ethdev.c:3832: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2611
./drivers/net/i40e/i40e_ethdev.c:3869: vsi->adapter = I40E_PF_TO_ADAPTER(pf);
2612
./drivers/net/i40e/i40e_ethdev.c:4289: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2613
./drivers/net/i40e/i40e_ethdev.c:4623: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2614
./drivers/net/i40e/i40e_ethdev.c:5639: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2615
./drivers/net/i40e/i40e_ethdev.c:5718: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2616
./drivers/net/i40e/i40e_ethdev.c:5816: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2617
./drivers/net/i40e/i40e_ethdev.c:5891: for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
2618
./drivers/net/i40e/i40e_ethdev.c:5904: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2619
./drivers/net/i40e/i40e_ethdev.c:5946: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2620
./drivers/net/i40e/i40e_ethdev.c:6061: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2621
./drivers/net/i40e/i40e_ethdev.c:7142: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2622
./drivers/net/i40e/i40e_ethdev.c:8242: struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2623
./drivers/net/i40e/i40e_rxtx.c:2710: qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2624
./drivers/net/i40e/i40e_rxtx.c:2711: qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2625
./drivers/net/i40e/i40e_rxtx.c:2712: I40E_QTX_CTL_PF_INDX_MASK);
2626
./drivers/net/nfp/nfp_net_pmd.h:45:#define PCI_DEVICE_ID_NFP6000_PF_NIC 0x6000
2627
./drivers/net/nfp/nfp_net.c:2450: .device_id = PCI_DEVICE_ID_NFP6000_PF_NIC,
2628
./drivers/net/mlx5/mlx5_ethdev.c:321: int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
2629
./drivers/net/fm10k/fm10k_ethdev.c:2372: FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
2630
./drivers/net/fm10k/fm10k_ethdev.c:2373: FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
2631
./drivers/net/fm10k/fm10k_ethdev.c:2374: FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
2632
./drivers/net/fm10k/fm10k_ethdev.c:2375: FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
2633
./drivers/net/fm10k/fm10k_ethdev.c:2376: FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
2634
./drivers/net/fm10k/fm10k_ethdev.c:2377: FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
2635
./drivers/net/fm10k/base/fm10k_pf.c:390: fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE);
2636
./drivers/net/fm10k/base/fm10k_pf.c:391: fm10k_tlv_attr_put_le_struct(msg, FM10K_PF_ATTR_ID_MAC_UPDATE,
2637
./drivers/net/fm10k/base/fm10k_pf.c:476: fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_XCAST_MODES);
2638
./drivers/net/fm10k/base/fm10k_pf.c:477: fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_XCAST_MODE, xcast_mode);
2639
./drivers/net/fm10k/base/fm10k_pf.c:544: fm10k_tlv_msg_init(msg, enable ? FM10K_PF_MSG_ID_LPORT_CREATE :
2640
./drivers/net/fm10k/base/fm10k_pf.c:545: FM10K_PF_MSG_ID_LPORT_DELETE);
2641
./drivers/net/fm10k/base/fm10k_pf.c:546: fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_PORT, lport_msg);
2642
./drivers/net/fm10k/base/fm10k_pf.c:1656: fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_LPORT_MAP);
2643
./drivers/net/fm10k/base/fm10k_pf.c:1698: FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
2644
./drivers/net/fm10k/base/fm10k_pf.c:1721: err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_LPORT_MAP],
2645
./drivers/net/fm10k/base/fm10k_pf.c:1745: FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_UPDATE_PVID),
2646
./drivers/net/fm10k/base/fm10k_pf.c:1767: err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
2647
./drivers/net/fm10k/base/fm10k_pf.c:1807: FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
2648
./drivers/net/fm10k/base/fm10k_pf.c:1831: err = fm10k_tlv_attr_get_le_struct(results[FM10K_PF_ATTR_ID_ERR],
2649
./drivers/net/fm10k/base/fm10k_pf.c:1850: FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP,
2650
./drivers/net/fm10k/base/fm10k_pf.c:1856: FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_CLOCK_OWNER,
2651
./drivers/net/fm10k/base/fm10k_pf.c:1862: FM10K_TLV_ATTR_U64(FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET),
2652
./drivers/net/fm10k/base/fm10k_pf.c:1907: results[FM10K_PF_ATTR_ID_1588_CLOCK_OWNER],
2653
./drivers/net/fm10k/base/fm10k_pf.c:2000: fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_MASTER_CLK_OFFSET);
2654
./drivers/net/fm10k/base/fm10k_pf.c:2001: fm10k_tlv_attr_put_u64(msg, FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET, offset);
2655
./drivers/net/fm10k/base/fm10k_pf.c:2033: FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
2656
./drivers/net/fm10k/base/fm10k_pf.c:2034: FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
2657
./drivers/net/fm10k/base/fm10k_pf.c:2035: FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
2658
./drivers/net/fm10k/base/fm10k_pf.c:2036: FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
2659
./drivers/net/fm10k/base/fm10k_pf.c:2037: FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
2660
./drivers/net/fm10k/base/fm10k_pf.c:2038: FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
2661
./drivers/net/fm10k/base/fm10k_pf.c:2039: FM10K_PF_MSG_1588_CLOCK_OWNER_HANDLER(fm10k_msg_1588_clock_owner_pf),
2662
./drivers/net/fm10k/base/fm10k_mbx.h:49:#define FM10K_MBMEM_PF_XOR (FM10K_MBMEM_SM(0) ^ FM10K_MBMEM_PF(0))
2663
./drivers/net/fm10k/base/fm10k_mbx.h:215:#define FM10K_SM_MBX_FIFO_LEN (FM10K_MBMEM_PF_XOR - 1)
2664
./drivers/net/fm10k/base/fm10k_pf.h:34:#ifndef _FM10K_PF_H_
2665
./drivers/net/fm10k/base/fm10k_pf.h:35:#define _FM10K_PF_H_
2666
./drivers/net/fm10k/base/fm10k_pf.h:45: FM10K_PF_MSG_ID_TEST = 0x000, /* msg ID reserved */
2667
./drivers/net/fm10k/base/fm10k_pf.h:46: FM10K_PF_MSG_ID_XCAST_MODES = 0x001,
2668
./drivers/net/fm10k/base/fm10k_pf.h:47: FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE = 0x002,
2669
./drivers/net/fm10k/base/fm10k_pf.h:48: FM10K_PF_MSG_ID_LPORT_MAP = 0x100,
2670
./drivers/net/fm10k/base/fm10k_pf.h:49: FM10K_PF_MSG_ID_LPORT_CREATE = 0x200,
2671
./drivers/net/fm10k/base/fm10k_pf.h:50: FM10K_PF_MSG_ID_LPORT_DELETE = 0x201,
2672
./drivers/net/fm10k/base/fm10k_pf.h:51: FM10K_PF_MSG_ID_CONFIG = 0x300,
2673
./drivers/net/fm10k/base/fm10k_pf.h:52: FM10K_PF_MSG_ID_UPDATE_PVID = 0x400,
2674
./drivers/net/fm10k/base/fm10k_pf.h:53: FM10K_PF_MSG_ID_CREATE_FLOW_TABLE = 0x501,
2675
./drivers/net/fm10k/base/fm10k_pf.h:54: FM10K_PF_MSG_ID_DELETE_FLOW_TABLE = 0x502,
2676
./drivers/net/fm10k/base/fm10k_pf.h:55: FM10K_PF_MSG_ID_UPDATE_FLOW = 0x503,
2677
./drivers/net/fm10k/base/fm10k_pf.h:56: FM10K_PF_MSG_ID_DELETE_FLOW = 0x504,
2678
./drivers/net/fm10k/base/fm10k_pf.h:57: FM10K_PF_MSG_ID_SET_FLOW_STATE = 0x505,
2679
./drivers/net/fm10k/base/fm10k_pf.h:58: FM10K_PF_MSG_ID_GET_1588_INFO = 0x506,
2680
./drivers/net/fm10k/base/fm10k_pf.h:59: FM10K_PF_MSG_ID_1588_TIMESTAMP = 0x701,
2681
./drivers/net/fm10k/base/fm10k_pf.h:60: FM10K_PF_MSG_ID_1588_CLOCK_OWNER = 0x702,
2682
./drivers/net/fm10k/base/fm10k_pf.h:61: FM10K_PF_MSG_ID_MASTER_CLK_OFFSET = 0x703,
2683
./drivers/net/fm10k/base/fm10k_pf.h:65: FM10K_PF_ATTR_ID_ERR = 0x00,
2684
./drivers/net/fm10k/base/fm10k_pf.h:66: FM10K_PF_ATTR_ID_LPORT_MAP = 0x01,
2685
./drivers/net/fm10k/base/fm10k_pf.h:67: FM10K_PF_ATTR_ID_XCAST_MODE = 0x02,
2686
./drivers/net/fm10k/base/fm10k_pf.h:68: FM10K_PF_ATTR_ID_MAC_UPDATE = 0x03,
2687
./drivers/net/fm10k/base/fm10k_pf.h:69: FM10K_PF_ATTR_ID_VLAN_UPDATE = 0x04,
2688
./drivers/net/fm10k/base/fm10k_pf.h:70: FM10K_PF_ATTR_ID_CONFIG = 0x05,
2689
./drivers/net/fm10k/base/fm10k_pf.h:71: FM10K_PF_ATTR_ID_CREATE_FLOW_TABLE = 0x06,
2690
./drivers/net/fm10k/base/fm10k_pf.h:72: FM10K_PF_ATTR_ID_DELETE_FLOW_TABLE = 0x07,
2691
./drivers/net/fm10k/base/fm10k_pf.h:73: FM10K_PF_ATTR_ID_UPDATE_FLOW = 0x08,
2692
./drivers/net/fm10k/base/fm10k_pf.h:74: FM10K_PF_ATTR_ID_FLOW_STATE = 0x09,
2693
./drivers/net/fm10k/base/fm10k_pf.h:75: FM10K_PF_ATTR_ID_FLOW_HANDLE = 0x0A,
2694
./drivers/net/fm10k/base/fm10k_pf.h:76: FM10K_PF_ATTR_ID_DELETE_FLOW = 0x0B,
2695
./drivers/net/fm10k/base/fm10k_pf.h:77: FM10K_PF_ATTR_ID_PORT = 0x0C,
2696
./drivers/net/fm10k/base/fm10k_pf.h:78: FM10K_PF_ATTR_ID_UPDATE_PVID = 0x0D,
2697
./drivers/net/fm10k/base/fm10k_pf.h:79: FM10K_PF_ATTR_ID_1588_TIMESTAMP = 0x10,
2698
./drivers/net/fm10k/base/fm10k_pf.h:80: FM10K_PF_ATTR_ID_1588_CLOCK_OWNER = 0x12,
2699
./drivers/net/fm10k/base/fm10k_pf.h:81: FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET = 0x14,
2700
./drivers/net/fm10k/base/fm10k_pf.h:143:#define FM10K_PF_MSG_LPORT_CREATE_HANDLER(func) \
2701
./drivers/net/fm10k/base/fm10k_pf.h:144: FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_LPORT_CREATE, NULL, func)
2702
./drivers/net/fm10k/base/fm10k_pf.h:145:#define FM10K_PF_MSG_LPORT_DELETE_HANDLER(func) \
2703
./drivers/net/fm10k/base/fm10k_pf.h:146: FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_LPORT_DELETE, NULL, func)
2704
./drivers/net/fm10k/base/fm10k_pf.h:149:#define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \
2705
./drivers/net/fm10k/base/fm10k_pf.h:150: FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_LPORT_MAP, \
2706
./drivers/net/fm10k/base/fm10k_pf.h:155:#define FM10K_PF_MSG_UPDATE_PVID_HANDLER(func) \
2707
./drivers/net/fm10k/base/fm10k_pf.h:156: FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_UPDATE_PVID, \
2708
./drivers/net/fm10k/base/fm10k_pf.h:161:#define FM10K_PF_MSG_ERR_HANDLER(msg, func) \
2709
./drivers/net/fm10k/base/fm10k_pf.h:162: FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func)
2710
./drivers/net/fm10k/base/fm10k_pf.h:165:#define FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(func) \
2711
./drivers/net/fm10k/base/fm10k_pf.h:166: FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_TIMESTAMP, \
2712
./drivers/net/fm10k/base/fm10k_pf.h:172:#define FM10K_PF_MSG_1588_CLOCK_OWNER_HANDLER(func) \
2713
./drivers/net/fm10k/base/fm10k_pf.h:173: FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_CLOCK_OWNER, \
2714
./drivers/net/fm10k/base/fm10k_pf.h:177:#define FM10K_PF_MSG_MASTER_CLK_OFFSET_HANDLER(func) \
2715
./drivers/net/fm10k/base/fm10k_pf.h:178: FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_MASTER_CLK_OFFSET, \
2716
./drivers/net/fm10k/base/fm10k_pf.h:189:#endif /* _FM10K_PF_H */
2717
./drivers/net/fm10k/base/fm10k_mbx.c:2221: mbx->mbmem_len = FM10K_MBMEM_PF_XOR;
2718
./drivers/net/bnx2x/bnx2x.h:621: BNX2X_PF_QUERY_IDX,
2719
./drivers/net/bnx2x/bnx2x.h:641:#define BNX2X_IGU_STAS_MSG_PF_CNT 4
2720
./drivers/net/bnx2x/bnx2x.c:843: REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)),
2721
./drivers/net/bnx2x/bnx2x.c:845: REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)),
2722
./drivers/net/bnx2x/bnx2x.c:847: REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)),
2723
./drivers/net/bnx2x/bnx2x.c:849: REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)),
2724
./drivers/net/bnx2x/bnx2x.c:5452: (BNX2X_IGU_STAS_MSG_PF_CNT * 4) +
2725
./drivers/net/bnx2x/bnx2x.c:5558: val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
2726
./drivers/net/bnx2x/bnx2x.c:5561: val &= ~(IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_SINGLE_ISR_EN);
2727
./drivers/net/bnx2x/bnx2x.c:5562: val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN);
2728
./drivers/net/bnx2x/bnx2x.c:5564: val |= IGU_PF_CONF_SINGLE_ISR_EN;
2729
./drivers/net/bnx2x/bnx2x.c:5567: val &= ~IGU_PF_CONF_INT_LINE_EN;
2730
./drivers/net/bnx2x/bnx2x.c:5568: val |= (IGU_PF_CONF_MSI_MSIX_EN |
2731
./drivers/net/bnx2x/bnx2x.c:5569: IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN);
2732
./drivers/net/bnx2x/bnx2x.c:5571: val &= ~IGU_PF_CONF_MSI_MSIX_EN;
2733
./drivers/net/bnx2x/bnx2x.c:5572: val |= (IGU_PF_CONF_INT_LINE_EN |
2734
./drivers/net/bnx2x/bnx2x.c:5573: IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN);
2735
./drivers/net/bnx2x/bnx2x.c:5578: REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
2736
./drivers/net/bnx2x/bnx2x.c:5582: val |= IGU_PF_CONF_FUNC_EN;
2737
./drivers/net/bnx2x/bnx2x.c:5587: REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
2738
./drivers/net/bnx2x/bnx2x.c:5638: uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
2739
./drivers/net/bnx2x/bnx2x.c:5640: val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
2740
./drivers/net/bnx2x/bnx2x.c:5641: IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN);
2741
./drivers/net/bnx2x/bnx2x.c:5648: REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
2742
./drivers/net/bnx2x/bnx2x.c:5649: if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
2743
./drivers/net/bnx2x/bnx2x.c:7528: (uint8_t) ((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
2744
./drivers/net/bnx2x/bnx2x.c:7530: (uint8_t) ((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) &
2745
./drivers/net/bnx2x/bnx2x.c:8196: if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
2746
./drivers/net/bnx2x/bnx2x.c:9024: REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
2747
./drivers/net/bnx2x/bnx2x.c:9807: (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
2748
./drivers/net/bnx2x/bnx2x.c:9899: uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
2749
./drivers/net/bnx2x/bnx2x.c:9901: val &= ~IGU_PF_CONF_FUNC_EN;
2750
./drivers/net/bnx2x/bnx2x.c:9903: REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
2751
./drivers/net/bnx2x/bnx2x.c:10802: DORQ_REG_PF_USAGE_CNT,
2752
./drivers/net/bnx2x/bnx2x.c:10810: QM_REG_PF_USG_CNT_0 + 4 * SC_FUNC(sc),
2753
./drivers/net/bnx2x/bnx2x.c:10995: val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
2754
./drivers/net/bnx2x/bnx2x.c:10996: PMD_DRV_LOG(DEBUG, "IGU_REG_PCI_PF_MSI_EN is 0x%x", val);
2755
./drivers/net/bnx2x/bnx2x.c:10998: val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
2756
./drivers/net/bnx2x/bnx2x.c:10999: PMD_DRV_LOG(DEBUG, "IGU_REG_PCI_PF_MSIX_EN is 0x%x", val);
2757
./drivers/net/bnx2x/bnx2x.c:11001: val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
2758
./drivers/net/bnx2x/bnx2x.c:11002: PMD_DRV_LOG(DEBUG, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x", val);
2759
./drivers/net/bnx2x/bnx2x.c:11004: val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
2760
./drivers/net/bnx2x/bnx2x.c:11005: PMD_DRV_LOG(DEBUG, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x", val);
2761
./drivers/net/bnx2x/bnx2x.c:11007: val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
2762
./drivers/net/bnx2x/bnx2x.c:11008: PMD_DRV_LOG(DEBUG, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x", val);
2763
./drivers/net/bnx2x/bnx2x.c:11118: uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
2764
./drivers/net/bnx2x/bnx2x.c:11125: pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
2765
./drivers/net/bnx2x/bnx2x.c:11143: REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
2766
./drivers/net/bnx2x/bnx2x.c:11151: REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
2767
./drivers/net/bnx2x/bnx2x.c:11168: REG_WR(sc, QM_REG_PF_EN, 1);
2768
./drivers/net/bnx2x/bnx2x.c:11171: REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
2769
./drivers/net/bnx2x/bnx2x.c:11172: REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
2770
./drivers/net/bnx2x/bnx2x.c:11173: REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
2771
./drivers/net/bnx2x/bnx2x.c:11174: REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
2772
./drivers/net/bnx2x/ecore_sp.c:724: if (index > ECORE_LLH_CAM_MAX_PF_LINE)
2773
./drivers/net/bnx2x/ecore_fw_defs.h:28:#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
2774
./drivers/net/bnx2x/ecore_fw_defs.h:30:#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
2775
./drivers/net/bnx2x/ecore_fw_defs.h:32:#define CSTORM_VF_TO_PF_OFFSET(funcId) \
2776
./drivers/net/bnx2x/ecore_fw_defs.h:96:#define CSTORM_VF_TO_PF_OFFSET(funcId) \
2777
./drivers/net/bnx2x/ecore_fw_defs.h:134:#define TSTORM_VF_TO_PF_OFFSET(funcId) \
2778
./drivers/net/bnx2x/ecore_fw_defs.h:180:#define USTORM_VF_TO_PF_OFFSET(funcId) \
2779
./drivers/net/bnx2x/ecore_fw_defs.h:245:#define XSTORM_VF_TO_PF_OFFSET(funcId) \
2780
./drivers/net/bnx2x/ecore_hsi.h:272: #define SHARED_HW_CFG_PF_MSIX_MAX_NUM_MASK 0x0000007F
2781
./drivers/net/bnx2x/ecore_hsi.h:273: #define SHARED_HW_CFG_PF_MSIX_MAX_NUM_SHIFT 0
2782
./drivers/net/bnx2x/ecore_hsi.h:2341: #define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001
2783
./drivers/net/bnx2x/ecore_hsi.h:2490: #define PF_ALLOACTION_MSIX_VECTORS_MASK 0x000000ff /* real value, as PCI config space can show only maximum of 64 vectors */
2784
./drivers/net/bnx2x/ecore_hsi.h:2491: #define PF_ALLOACTION_MSIX_VECTORS_SHIFT 0
2785
./drivers/net/bnx2x/ecore_hsi.h:3291:#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
2786
./drivers/net/bnx2x/ecore_hsi.h:3297:#define DMAE_COMMAND_DST_VFPF_SHIFT 14
2787
./drivers/net/bnx2x/ecore_hsi.h:3307:#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
2788
./drivers/net/bnx2x/ecore_hsi.h:3313:#define DMAE_COMMAND_DST_VFPF_SHIFT 14
2789
./drivers/net/bnx2x/ecore_hsi.h:5575: EVENT_RING_OPCODE_VF_PF_CHANNEL,
2790
./drivers/net/bnx2x/ecore_hsi.h:5904: VF_PF_CHANNEL_NOT_READY /* Writing to VF/PF channel when it is not ready */,
2791
./drivers/net/bnx2x/ecore_hsi.h:6295: VF_PF_CHANNEL_STATE_READY /* Channel is ready to accept a message from VF */,
2792
./drivers/net/bnx2x/ecore_hsi.h:6296: VF_PF_CHANNEL_STATE_WAITING_FOR_ACK /* Channel waits for an ACK from PF */,
2793
./drivers/net/bnx2x/ecore_hsi.h:6297: MAX_VF_PF_CHANNEL_STATE};
2794
./drivers/net/bnx2x/ecore_sp.h:839: ECORE_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
2795
./drivers/net/bnx2x/bnx2x_stats.c:1385: cur_query_entry = &sc->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
2796
./drivers/net/bnx2x/ecore_init.h:226:#define ECORE_PF_Q_NUM(q_num, port, vnic)\
2797
./drivers/net/bnx2x/ecore_init.h:253: ECORE_PF_Q_NUM(q_num, PORT_ID(sc), vnic);
2798
./drivers/net/bnx2x/bnx2x_vfpf.h:11:#ifndef BNX2X_VFPF_H
2799
./drivers/net/bnx2x/bnx2x_vfpf.h:12:#define BNX2X_VFPF_H
2800
./drivers/net/bnx2x/bnx2x_vfpf.h:315: BNX2X_VF_TLV_PF_RELEASE_VF,
2801
./drivers/net/bnx2x/bnx2x_vfpf.h:318: BNX2X_VF_TLV_PF_SET_MAC,
2802
./drivers/net/bnx2x/bnx2x_vfpf.h:319: BNX2X_VF_TLV_PF_SET_VLAN,
2803
./drivers/net/bnx2x/bnx2x_vfpf.h:334:#endif /* BNX2X_VFPF_H */
2804
./drivers/net/bnx2x/ecore_reg.h:123:#define CSEM_REG_VFPF_ERR_NUM \
2805
./drivers/net/bnx2x/ecore_reg.h:185:#define DORQ_REG_PF_USAGE_CNT \
2806
./drivers/net/bnx2x/ecore_reg.h:279:#define IGU_REG_PCI_PF_MSIX_EN \
2807
./drivers/net/bnx2x/ecore_reg.h:281:#define IGU_REG_PCI_PF_MSIX_FUNC_MASK \
2808
./drivers/net/bnx2x/ecore_reg.h:283:#define IGU_REG_PCI_PF_MSI_EN \
2809
./drivers/net/bnx2x/ecore_reg.h:287:#define IGU_REG_PF_CONFIGURATION \
2810
./drivers/net/bnx2x/ecore_reg.h:1029:#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR \
2811
./drivers/net/bnx2x/ecore_reg.h:1053:#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR \
2812
./drivers/net/bnx2x/ecore_reg.h:1057:#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR \
2813
./drivers/net/bnx2x/ecore_reg.h:1479:#define QM_REG_PF_EN \
2814
./drivers/net/bnx2x/ecore_reg.h:1481:#define QM_REG_PF_USG_CNT_0 \
2815
./drivers/net/bnx2x/ecore_reg.h:1591:#define TSEM_REG_VFPF_ERR_NUM \
2816
./drivers/net/bnx2x/ecore_reg.h:1661:#define USEM_REG_VFPF_ERR_NUM \
2817
./drivers/net/bnx2x/ecore_reg.h:1751:#define XSEM_REG_VFPF_ERR_NUM \
2818
./drivers/net/bnx2x/ecore_reg.h:2459:#define GRC_CONFIG_REG_PF_INIT_VF 0x624
2819
./drivers/net/bnx2x/ecore_reg.h:2460:#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK \
2820
./drivers/net/bnx2x/ecore_reg.h:2704:#define ME_REG_PF_NUM_SHIFT 0
2821
./drivers/net/bnx2x/ecore_reg.h:2705:#define ME_REG_PF_NUM \
2822
./drivers/net/bnx2x/ecore_reg.h:2706: (7L<<ME_REG_PF_NUM_SHIFT) /* Relative PF Num */
2823
./drivers/net/bnx2x/ecore_reg.h:2712:#define ME_REG_ABS_PF_NUM_SHIFT 16
2824
./drivers/net/bnx2x/ecore_reg.h:2713:#define ME_REG_ABS_PF_NUM \
2825
./drivers/net/bnx2x/ecore_reg.h:2714: (7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
2826
./drivers/net/bnx2x/ecore_reg.h:3590:#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */
2827
./drivers/net/bnx2x/ecore_reg.h:3591:#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
2828
./drivers/net/bnx2x/ecore_reg.h:3592:#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */
2829
./drivers/net/bnx2x/ecore_reg.h:3593:#define IGU_PF_CONF_ATTN_BIT_EN (0x1<<3) /* attention enable */
2830
./drivers/net/bnx2x/ecore_reg.h:3594:#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
2831
./drivers/net/bnx2x/ecore_reg.h:3595:#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */
2832
./drivers/net/bnx2x/ecore_reg.h:3615:#define IGU_FID_ENCODE_IS_PF_SHIFT 6
2833
./drivers/net/bnx2x/ecore_reg.h:3617:#define IGU_FID_PF_NUM_MASK (0x7)
2834
./drivers/net/e1000/base/e1000_mbx.h:64: * PF. The reverse is true if it is E1000_PF_*.
2835
./drivers/net/e1000/base/e1000_mbx.h:89:#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
2836
./drivers/net/e1000/igb_ethdev.c:2755:#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
2837
./drivers/net/cxgbe/cxgbe_main.c:984: t4_write_reg(adap, MYPF_REG(A_SGE_PF_GTS),
2838
./drivers/net/cxgbe/cxgbe_main.c:999: t4_write_reg(adap, MYPF_REG(A_SGE_PF_GTS),
2839
./drivers/net/cxgbe/base/t4_hw.c:297:#define X_CIM_PF_NOACCESS 0xeeeeeeee
2840
./drivers/net/cxgbe/base/t4_hw.c:356: u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
2841
./drivers/net/cxgbe/base/t4_hw.c:357: u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
2842
./drivers/net/cxgbe/base/t4_hw.c:509: if (v == X_CIM_PF_NOACCESS)
2843
./drivers/net/cxgbe/base/t4_hw.c:1001:#define PF_INTR_MASK (F_PFSW | F_PFCIM)
2844
./drivers/net/cxgbe/base/t4_hw.c:1030: t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2845
./drivers/net/cxgbe/base/t4_hw.c:1046: t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2846
./drivers/net/cxgbe/base/t4_hw.c:2262: if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
2847
./drivers/net/cxgbe/base/t4_hw.c:2267: return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
2848
./drivers/net/cxgbe/base/t4fw_interface.h:984:#define VI_PF_NUM_STATS 17
2849
./drivers/net/cxgbe/base/t4fw_interface.h:986: FW_VI_PF_STAT_TX_BCAST_BYTES_IX,
2850
./drivers/net/cxgbe/base/t4fw_interface.h:987: FW_VI_PF_STAT_TX_BCAST_FRAMES_IX,
2851
./drivers/net/cxgbe/base/t4fw_interface.h:988: FW_VI_PF_STAT_TX_MCAST_BYTES_IX,
2852
./drivers/net/cxgbe/base/t4fw_interface.h:989: FW_VI_PF_STAT_TX_MCAST_FRAMES_IX,
2853
./drivers/net/cxgbe/base/t4fw_interface.h:990: FW_VI_PF_STAT_TX_UCAST_BYTES_IX,
2854
./drivers/net/cxgbe/base/t4fw_interface.h:991: FW_VI_PF_STAT_TX_UCAST_FRAMES_IX,
2855
./drivers/net/cxgbe/base/t4fw_interface.h:992: FW_VI_PF_STAT_TX_OFLD_BYTES_IX,
2856
./drivers/net/cxgbe/base/t4fw_interface.h:993: FW_VI_PF_STAT_TX_OFLD_FRAMES_IX,
2857
./drivers/net/cxgbe/base/t4fw_interface.h:994: FW_VI_PF_STAT_RX_BYTES_IX,
2858
./drivers/net/cxgbe/base/t4fw_interface.h:995: FW_VI_PF_STAT_RX_FRAMES_IX,
2859
./drivers/net/cxgbe/base/t4fw_interface.h:996: FW_VI_PF_STAT_RX_BCAST_BYTES_IX,
2860
./drivers/net/cxgbe/base/t4fw_interface.h:997: FW_VI_PF_STAT_RX_BCAST_FRAMES_IX,
2861
./drivers/net/cxgbe/base/t4fw_interface.h:998: FW_VI_PF_STAT_RX_MCAST_BYTES_IX,
2862
./drivers/net/cxgbe/base/t4fw_interface.h:999: FW_VI_PF_STAT_RX_MCAST_FRAMES_IX,
2863
./drivers/net/cxgbe/base/t4fw_interface.h:1000: FW_VI_PF_STAT_RX_UCAST_BYTES_IX,
2864
./drivers/net/cxgbe/base/t4fw_interface.h:1001: FW_VI_PF_STAT_RX_UCAST_FRAMES_IX,
2865
./drivers/net/cxgbe/base/t4fw_interface.h:1002: FW_VI_PF_STAT_RX_ERR_FRAMES_IX
2866
./drivers/net/cxgbe/base/t4_regs.h:34:#define MYPF_BASE 0x1b000
2867
./drivers/net/cxgbe/base/t4_regs.h:35:#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
2868
./drivers/net/cxgbe/base/t4_regs.h:40:#define PF_STRIDE 0x400
2869
./drivers/net/cxgbe/base/t4_regs.h:41:#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
2870
./drivers/net/cxgbe/base/t4_regs.h:42:#define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
2871
./drivers/net/cxgbe/base/t4_regs.h:79:#define A_SGE_PF_KDOORBELL 0x0
2872
./drivers/net/cxgbe/base/t4_regs.h:104:#define A_SGE_PF_GTS 0x4
2873
./drivers/net/cxgbe/base/t4_regs.h:426:#define A_CIM_PF_MAILBOX_DATA 0x240
2874
./drivers/net/cxgbe/base/t4_regs.h:427:#define A_CIM_PF_MAILBOX_CTRL 0x280
2875
./drivers/net/cxgbe/base/t4_regs.h:438:#define A_CIM_PF_MAILBOX_CTRL_SHADOW_COPY 0x290
2876
./drivers/net/cxgbe/base/t4_regs.h:766:#define A_PL_PF_INT_ENABLE 0x3c4
2877
./drivers/net/cxgbe/sge.c:341: t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
2878
./drivers/net/cxgbe/sge.c:714: t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
2879
./drivers/net/cxgbe/sge.c:1546: t4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS),
2880
./drivers/net/af_packet/rte_eth_af_packet.c:58:#define ETH_AF_PACKET_IFACE_ARG "iface"
2881
./drivers/net/af_packet/rte_eth_af_packet.c:59:#define ETH_AF_PACKET_NUM_Q_ARG "qpairs"
2882
./drivers/net/af_packet/rte_eth_af_packet.c:60:#define ETH_AF_PACKET_BLOCKSIZE_ARG "blocksz"
2883
./drivers/net/af_packet/rte_eth_af_packet.c:61:#define ETH_AF_PACKET_FRAMESIZE_ARG "framesz"
2884
./drivers/net/af_packet/rte_eth_af_packet.c:62:#define ETH_AF_PACKET_FRAMECOUNT_ARG "framecnt"
2885
./drivers/net/af_packet/rte_eth_af_packet.c:103: struct pkt_rx_queue rx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
2886
./drivers/net/af_packet/rte_eth_af_packet.c:104: struct pkt_tx_queue tx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
2887
./drivers/net/af_packet/rte_eth_af_packet.c:108: ETH_AF_PACKET_IFACE_ARG,
2888
./drivers/net/af_packet/rte_eth_af_packet.c:109: ETH_AF_PACKET_NUM_Q_ARG,
2889
./drivers/net/af_packet/rte_eth_af_packet.c:110: ETH_AF_PACKET_BLOCKSIZE_ARG,
2890
./drivers/net/af_packet/rte_eth_af_packet.c:111: ETH_AF_PACKET_FRAMESIZE_ARG,
2891
./drivers/net/af_packet/rte_eth_af_packet.c:112: ETH_AF_PACKET_FRAMECOUNT_ARG,
2892
./drivers/net/af_packet/rte_eth_af_packet.c:116:static const char *drivername = "AF_PACKET PMD";
2893
./drivers/net/af_packet/rte_eth_af_packet.c:139: * Reads the given number of packets from the AF_PACKET socket one by
2894
./drivers/net/af_packet/rte_eth_af_packet.c:405: * Opens an AF_PACKET socket
2895
./drivers/net/af_packet/rte_eth_af_packet.c:414: /* Open an AF_PACKET socket... */
2896
./drivers/net/af_packet/rte_eth_af_packet.c:415: *sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
2897
./drivers/net/af_packet/rte_eth_af_packet.c:417: RTE_LOG(ERR, PMD, "Could not open AF_PACKET socket\n");
2898
./drivers/net/af_packet/rte_eth_af_packet.c:453: if (strstr(pair->key, ETH_AF_PACKET_IFACE_ARG) != NULL)
2899
./drivers/net/af_packet/rte_eth_af_packet.c:458: "%s: no interface specified for AF_PACKET ethdev\n",
2900
./drivers/net/af_packet/rte_eth_af_packet.c:464: "%s: creating AF_PACKET-backed ethdev on numa socket %u\n",
2901
./drivers/net/af_packet/rte_eth_af_packet.c:519: sockaddr.sll_family = AF_PACKET;
2902
./drivers/net/af_packet/rte_eth_af_packet.c:532: /* Open an AF_PACKET socket for this queue... */
2903
./drivers/net/af_packet/rte_eth_af_packet.c:533: qsockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
2904
./drivers/net/af_packet/rte_eth_af_packet.c:536: "%s: could not open AF_PACKET socket\n",
2905
./drivers/net/af_packet/rte_eth_af_packet.c:546: "%s: could not set PACKET_VERSION on AF_PACKET "
2906
./drivers/net/af_packet/rte_eth_af_packet.c:557: "AF_PACKET socket for %s\n", name, pair->value);
2907
./drivers/net/af_packet/rte_eth_af_packet.c:568: "on AF_PACKET socket for %s\n", name,
2908
./drivers/net/af_packet/rte_eth_af_packet.c:577: "%s: could not set PACKET_RX_RING on AF_PACKET "
2909
./drivers/net/af_packet/rte_eth_af_packet.c:585: "%s: could not set PACKET_TX_RING on AF_PACKET "
2910
./drivers/net/af_packet/rte_eth_af_packet.c:598: "%s: call to mmap failed on AF_PACKET socket for %s\n",
2911
./drivers/net/af_packet/rte_eth_af_packet.c:632: "%s: could not bind AF_PACKET socket to %s\n",
2912
./drivers/net/af_packet/rte_eth_af_packet.c:642: "%s: could not set PACKET_FANOUT on AF_PACKET socket "
2913
./drivers/net/af_packet/rte_eth_af_packet.c:725: if (strstr(pair->key, ETH_AF_PACKET_NUM_Q_ARG) != NULL) {
2914
./drivers/net/af_packet/rte_eth_af_packet.c:728: qpairs > RTE_PMD_AF_PACKET_MAX_RINGS) {
2915
./drivers/net/af_packet/rte_eth_af_packet.c:736: if (strstr(pair->key, ETH_AF_PACKET_BLOCKSIZE_ARG) != NULL) {
2916
./drivers/net/af_packet/rte_eth_af_packet.c:746: if (strstr(pair->key, ETH_AF_PACKET_FRAMESIZE_ARG) != NULL) {
2917
./drivers/net/af_packet/rte_eth_af_packet.c:756: if (strstr(pair->key, ETH_AF_PACKET_FRAMECOUNT_ARG) != NULL) {
2918
./drivers/net/af_packet/rte_eth_af_packet.c:770: "%s: AF_PACKET MMAP frame size exceeds block size!\n",
2919
./drivers/net/af_packet/rte_eth_af_packet.c:778: "%s: invalid AF_PACKET MMAP parameters\n", name);
2920
./drivers/net/af_packet/rte_eth_af_packet.c:782: RTE_LOG(INFO, PMD, "%s: AF_PACKET MMAP parameters:\n", name);
2921
./drivers/net/af_packet/rte_eth_af_packet.c:823: if (rte_kvargs_count(kvlist, ETH_AF_PACKET_IFACE_ARG) == 1) {
2922
./drivers/net/af_packet/rte_eth_af_packet.c:825: ret = rte_kvargs_process(kvlist, ETH_AF_PACKET_IFACE_ARG,
2923
./drivers/net/af_packet/rte_eth_af_packet.h:34:#ifndef _RTE_ETH_AF_PACKET_H_
2924
./drivers/net/af_packet/rte_eth_af_packet.h:35:#define _RTE_ETH_AF_PACKET_H_
2925
./drivers/net/af_packet/rte_eth_af_packet.h:41:#define RTE_PMD_AF_PACKET_MAX_RINGS 16
2926
./app/test/test_cmdline_ipaddr.c:111: {"0.0.0.0", {AF_INET, {IP4(0,0,0,0)}, 0},
2927
./app/test/test_cmdline_ipaddr.c:113: {"0.0.0.0/0", {AF_INET, {IP4(0,0,0,0)}, 0},
2928
./app/test/test_cmdline_ipaddr.c:115: {"0.0.0.0/24", {AF_INET, {IP4(0,0,0,0)}, 24},
2929
./app/test/test_cmdline_ipaddr.c:117: {"192.168.1.0/24", {AF_INET, {IP4(192,168,1,0)}, 24},
2930
./app/test/test_cmdline_ipaddr.c:119: {"012.34.56.78/24", {AF_INET, {IP4(12,34,56,78)}, 24},
2931
./app/test/test_cmdline_ipaddr.c:121: {"34.56.78.90/1", {AF_INET, {IP4(34,56,78,90)}, 1},
2932
./app/test/test_cmdline_ipaddr.c:123: {"::", {AF_INET6, {IP6(0,0,0,0,0,0,0,0)}, 0},
2933
./app/test/test_cmdline_ipaddr.c:125: {"::1", {AF_INET6, {IP6(0,0,0,0,0,0,0,1)}, 0},
2934
./app/test/test_cmdline_ipaddr.c:127: {"::1/32", {AF_INET6, {IP6(0,0,0,0,0,0,0,1)}, 32},
2935
./app/test/test_cmdline_ipaddr.c:129: {"::/32", {AF_INET6, {IP6(0,0,0,0,0,0,0,0)}, 32},
2936
./app/test/test_cmdline_ipaddr.c:132: {"1234:5678:90ab:cdef:4321:8765:BA09:FEDC", {AF_INET6,
2937
./app/test/test_cmdline_ipaddr.c:136: {"1234::1234/64", {AF_INET6,
2938
./app/test/test_cmdline_ipaddr.c:140: {"1234::/64", {AF_INET6,
2939
./app/test/test_cmdline_ipaddr.c:144: {"1:1::1/32", {AF_INET6,
2940
./app/test/test_cmdline_ipaddr.c:148: {"1:2:3:4::/64", {AF_INET6,
2941
./app/test/test_cmdline_ipaddr.c:152: {"::ffff:192.168.1.0/64", {AF_INET6,
2942
./app/test/test_cmdline_ipaddr.c:157: {"1::2:3:4:5:6:7", {AF_INET6,
2943
./app/test/test_cmdline_ipaddr.c:320: case AF_INET:
2944
./app/test/test_cmdline_ipaddr.c:326: case AF_INET6:
2945
./app/test/test_cmdline_ipaddr.c:350: case AF_INET:
2946
./app/test/test_cmdline_ipaddr.c:356: case AF_INET6:
2947
./app/test-pmd/cmdline.c:6645: if (res->ip_value.family == AF_INET) {
2948
./app/test-pmd/cmdline.c:7536: if (res->dst_ip_value.family == AF_INET)
2949
./app/test-pmd/cmdline.c:7547: if (res->src_ip_value.family == AF_INET)
2950
./app/test-pmd/cmdline.c:8095: if ((ip_addr).family == AF_INET) \
2951
./app/test-pmd/cmdline.c:8105: if ((ip_addr).family == AF_INET6) \
2952
./lib/librte_vhost/vhost_user/vhost-net-user.c:120: sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
2953
./lib/librte_vhost/vhost_user/vhost-net-user.c:126: un.sun_family = AF_UNIX;
2954
./lib/librte_eal/linuxapp/kni/kni_vhost.c:254: (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock->flags) &&
2955
./lib/librte_eal/linuxapp/kni/kni_vhost.c:602: ((struct sockaddr_ll*)addr)->sll_family = AF_PACKET;
2956
./lib/librte_eal/linuxapp/kni/kni_vhost.c:622: !test_and_clear_bit(SOCK_ASYNC_NOSPACE,
2957
./lib/librte_eal/linuxapp/kni/kni_vhost.c:670: net, AF_UNSPEC, GFP_KERNEL, &kni_raw_proto)))
2958
./lib/librte_eal/linuxapp/kni/kni_vhost.c:673: err = sock_create_lite(AF_UNSPEC, SOCK_RAW, IPPROTO_RAW, &q->sock);
2959
./lib/librte_eal/linuxapp/kni/kni_vhost.c:706: q->sock->type = SOCK_RAW;
2960
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c:85: case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
2961
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c:86: case IXGBE_DEV_ID_82598AF_DUAL_PORT:
2962
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:95: {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT)},
2963
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:96: {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT)},
2964
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_mbx.h:63: * PF. The reverse is true if it is IXGBE_PF_*.
2965
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_mbx.h:88:#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
2966
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_type.h:40:#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
2967
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_type.h:41:#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
2968
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c:349: case IXGBE_DEV_ID_82598AF_DUAL_PORT:
2969
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c:350: case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
2970
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c:1209: case IXGBE_DEV_ID_82598AF_DUAL_PORT:
2971
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c:1210: case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
2972
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h:145:#define IXGBE_MAX_PF_MACVLANS 15
2973
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:220: reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2974
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:233: reg_cu |= IXGBE_TAF_ASM_PAUSE;
2975
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:234: reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
2976
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:254: reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
2977
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:2546: IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2978
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:2547: IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2979
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.h:62:#define IXGBE_TAF_SYM_PAUSE 0x400
2980
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.h:63:#define IXGBE_TAF_ASM_PAUSE 0x800
2981
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb.h:153:#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */
2982
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.h:47: * PF. The reverse is true if it is E1000_PF_*.
2983
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.h:72:#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
2984
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:1981: adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
2985
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:2227: br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
2986
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:6132: ping = E1000_PF_CONTROL_MSG;
2987
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:6570: adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
2988
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:6610: if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
2989
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:6716: if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
2990
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:9345: adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
2991
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:243: socket_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
2992
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:250: addr.sun_family = AF_UNIX;
2993
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:339: socket_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
2994
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:346: addr.sun_family = AF_UNIX;
2995
./lib/librte_eal/common/include/rte_pci_dev_ids.h:399:#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
2996
./lib/librte_eal/common/include/rte_pci_dev_ids.h:400:#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
2997
./lib/librte_eal/common/include/rte_pci_dev_ids.h:445:RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT)
2998
./lib/librte_eal/common/include/rte_pci_dev_ids.h:447: IXGBE_DEV_ID_82598AF_SINGLE_PORT)
2999
./lib/librte_cmdline/cmdline_parse_ipaddr.c:137: case AF_INET:
3000
./lib/librte_cmdline/cmdline_parse_ipaddr.c:139: case AF_INET6:
3001
./lib/librte_cmdline/cmdline_parse_ipaddr.c:356: my_inet_pton(AF_INET, ip_str, &ipaddr.addr.ipv4) == 1 &&
3002
./lib/librte_cmdline/cmdline_parse_ipaddr.c:358: ipaddr.family = AF_INET;
3003
./lib/librte_cmdline/cmdline_parse_ipaddr.c:364: my_inet_pton(AF_INET6, ip_str, &ipaddr.addr.ipv6) == 1) {
3004
./lib/librte_cmdline/cmdline_parse_ipaddr.c:365: ipaddr.family = AF_INET6;
3005
./examples/quota_watermark/qw/main.c:351: /* Start pipeline_connect() on all the available slave lcore but the last */
3006
./examples/kni/main.c:341: f_stop = rte_atomic32_read(&kni_stop);
3007
./examples/kni/main.c:351: f_stop = rte_atomic32_read(&kni_stop);
3008
./examples/packet_ordering/main.c:345:rx_thread(struct rte_ring *ring_out)
3009
./examples/packet_ordering/main.c:397:worker_thread(void *args_ptr)
3010
./examples/packet_ordering/main.c:462:send_thread(struct send_thread_args *args)
3011
./examples/packet_ordering/main.c:541:tx_thread(struct rte_ring *ring_in)
3012
./examples/packet_ordering/main.c:677: /* Start worker_thread() on all the available slave cores but the last 1 */
3013
./examples/packet_ordering/main.c:684: /* Start tx_thread() on the last slave core */
3014
./examples/packet_ordering/main.c:689: /* Start send_thread() on the last slave core */
3015
./examples/packet_ordering/main.c:694: /* Start rx_thread() on the master core */
3016
./examples/packet_ordering/main.c:695: rx_thread(rx_to_workers);
3017
./examples/l2fwd-jobstats/main.c:587: rte_atomic16_read(&qconf->stats_read_pending);
3018
./examples/tep_termination/main.c:1121: rx_total = rte_atomic64_read(
3019
./examples/tep_termination/main.c:1123: rx = rte_atomic64_read(
3020
./examples/tep_termination/main.c:1126: rx_ip_csum = rte_atomic64_read(
3021
./examples/tep_termination/main.c:1128: rx_l4_csum = rte_atomic64_read(
3022
./examples/vm_power_manager/guest_cli/vm_power_cli_guest.h:43:int guest_channel_host_connect(unsigned lcore_id);
3023
./examples/vm_power_manager/guest_cli/vm_power_cli_guest.h:47:void guest_channel_host_disconnect(unsigned lcore_id);
3024
./examples/vm_power_manager/channel_manager.c:231: return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]);
3025
./examples/vm_power_manager/channel_manager.c:256: info->fd = socket(AF_UNIX, SOCK_STREAM, 0);
3026
./examples/vm_power_manager/channel_manager.c:281: ret = connect(info->fd, (struct sockaddr *)&sock_addr,
3027
./examples/vm_power_manager/channel_manager.c:612: info->pcpu_mask[i] = rte_atomic64_read(&vm_info->pcpu_mask[i]);
3028
./examples/vm_power_manager/channel_monitor.c:215: n_bytes = read(chan_info->fd, buffer, buffer_len);
3029
./examples/vhost/main.c:1438: eventfd_write(vq->callfd, (eventfd_t)1);
3030
./examples/vhost/main.c:1633: eventfd_write(vq->callfd, (eventfd_t)1);
3031
./examples/vhost/main.c:1781: eventfd_write(vq->callfd, (eventfd_t)1);
3032
./examples/vhost/main.c:2817: rx_total = rte_atomic64_read(
3033
./examples/vhost/main.c:2819: rx = rte_atomic64_read(
3034
./examples/netmap_compat/lib/compat_netmap.c:658: (netmap.mem = rte_zmalloc_socket(__func__, sz,
3035
./examples/vhost_xen/vhost_monitor.c:441: status = xs_read(watch.xs, th, path, &len);
3036
./examples/vhost_xen/vhost_monitor.c:523: buf = xs_read(watch.xs, th, vec[XS_WATCH_PATH],&len);
3037
./examples/vhost_xen/xenstore_parse.c:179: buf = xs_read(xs, XBT_NULL, path, len);
3038
./examples/vhost_xen/main.c:1396: rx_total = rte_atomic64_read(&dev_statistics[device_fh].rx_total);
3039
./examples/vhost_xen/main.c:1397: rx = rte_atomic64_read(&dev_statistics[device_fh].rx);
3040
./examples/ip_reassembly/main.c:859: if ((mtb = rte_zmalloc_socket(__func__, sz, RTE_CACHE_LINE_SIZE,
3041
./examples/performance-thread/l3fwd-thread/main.c:1880: while (rte_atomic16_read(&rx_counter) < n_rx_thread)
3042
./examples/performance-thread/l3fwd-thread/main.c:1883: while (rte_atomic16_read(&tx_counter) < n_tx_thread)
3043
./examples/performance-thread/l3fwd-thread/main.c:2206: while (rte_atomic16_read(&rx_counter) < n_rx_thread)
3044
./examples/performance-thread/common/lthread_diag.h:110:#define DIAG_COUNT(o, x) rte_atomic64_read(&((o)->count_##x))
3045
./examples/performance-thread/common/lthread_diag_api.h:217: * p2 = the lthread(s) that are signalled, or error code
3046
./examples/performance-thread/common/lthread_pool.h:136: struct qnode_pool *p = rte_malloc_socket(NULL,
3047
./examples/performance-thread/common/lthread_pool.h:143: p->stub = rte_malloc_socket(NULL,
3048
./examples/performance-thread/common/lthread_pool.h:277: n = rte_malloc_socket(NULL,
3049
./examples/performance-thread/common/lthread_sched.c:278: rte_calloc_socket(NULL, 1, sizeof(struct lthread_sched),
3050
./examples/performance-thread/common/lthread_sched.c:322: return (int)rte_atomic16_read(&num_schedulers);
3051
./examples/performance-thread/common/lthread_sched.c:330: return (int)rte_atomic16_read(&active_schedulers);
3052
./examples/performance-thread/common/lthread_sched.c:360: while (rte_atomic16_read(&active_schedulers) <
3053
./examples/performance-thread/common/lthread_sched.c:361: rte_atomic16_read(&num_schedulers))
3054
./examples/performance-thread/common/lthread_sched.c:483: while (rte_atomic16_read(&active_schedulers) <
3055
./examples/performance-thread/common/lthread_sched.c:484: rte_atomic16_read(&num_schedulers))
3056
./examples/performance-thread/common/lthread_sched.c:502: while (rte_atomic16_read(&active_schedulers) > 0)
3057
./examples/performance-thread/common/lthread_objcache.h:71: rte_malloc_socket(NULL, sizeof(struct lthread_objcache),
3058
./examples/performance-thread/common/lthread_objcache.h:128: rte_zmalloc_socket(NULL, obj_size,
3059
./examples/performance-thread/common/lthread_mutex.c:159: } while ((rte_atomic64_read(&m->count) == 1) &&
3060
./examples/performance-thread/common/lthread_mutex.c:229: while (rte_atomic64_read(&m->count) > 0) {
3061
./examples/performance-thread/common/lthread_queue.h:124: new_queue = rte_malloc_socket(NULL, sizeof(struct lthread_queue),
3062
./examples/performance-thread/pthread_shim/main.c:86:void *helloworld_pthread(void *arg);
3063
./examples/performance-thread/pthread_shim/main.c:87:void *helloworld_pthread(void *arg)
3064
./examples/performance-thread/pthread_shim/main.c:149:static void initial_lthread(void *args);
3065
./examples/performance-thread/pthread_shim/main.c:150:static void initial_lthread(void *args __attribute__((unused)))
3066
./examples/qos_sched/app_thread.c:82:app_rx_thread(struct thread_conf **confs)
3067
./examples/qos_sched/app_thread.c:105: rte_sched_port_pkt_write(rx_mbufs[i], subport, pipe,
3068
./examples/qos_sched/app_thread.c:172:app_tx_thread(struct thread_conf **confs)
3069
./examples/qos_sched/app_thread.c:210:app_worker_thread(struct thread_conf **confs)
3070
./examples/qos_sched/app_thread.c:244:app_mixed_thread(struct thread_conf **confs)
3071
./examples/qos_sched/main.h:177:void app_rx_thread(struct thread_conf **qconf);
3072
./examples/qos_sched/main.h:178:void app_tx_thread(struct thread_conf **qconf);
3073
./examples/qos_sched/main.h:179:void app_worker_thread(struct thread_conf **qconf);
3074
./examples/qos_sched/main.h:180:void app_mixed_thread(struct thread_conf **qconf);
3075
./examples/qos_sched/main.c:133: app_rx_thread(rx_confs);
3076
./examples/qos_sched/main.c:148: app_mixed_thread(wt_confs);
3077
./examples/qos_sched/main.c:163: app_tx_thread(tx_confs);
3078
./examples/qos_sched/main.c:170: app_worker_thread(wt_confs);
3079
./examples/ip_pipeline/app.h:922:int app_thread(void *arg);
3080
./examples/ip_pipeline/thread.c:176:app_thread(void *arg)
3081
./examples/ip_pipeline/cpu_core_map.c:452:cpu_core_map_get_n_cores_per_socket(struct cpu_core_map *map)
3082
./examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c:245: /* Read (entry), write (entry, color) */
3083
./examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c:330: /* Read (entry), compute, write (entry) */
3084
./examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c:365: /* Read (entry), write (entry, color) */
3085
./examples/ip_pipeline/pipeline/pipeline_common_be.c:67: rsp->status = rte_pipeline_port_in_stats_read(p->p,
3086
./examples/ip_pipeline/pipeline/pipeline_common_be.c:91: rsp->status = rte_pipeline_port_out_stats_read(p->p,
3087
./examples/ip_pipeline/pipeline/pipeline_common_be.c:115: rsp->status = rte_pipeline_table_stats_read(p->p,
3088
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:474:app_pipeline_fa_flow_policer_stats_read(struct app_params *app,
3089
./examples/ip_pipeline/pipeline/pipeline_flow_actions.c:1604: status = app_pipeline_fa_flow_policer_stats_read(app,
3090
./examples/ip_pipeline/pipeline/pipeline_passthrough_be.c:88: /* Read (dma_src), compute (dma_dst), write (dma_dst) */
3091
./examples/ip_pipeline/pipeline/pipeline_passthrough_be.c:92: /* Read (dma_dst), compute (hash), write (hash) */
3092
./examples/ip_pipeline/pipeline/pipeline_passthrough_be.c:137: /* Read (dma_src), compute (dma_dst), write (dma_dst) */
3093
./examples/ip_pipeline/pipeline/pipeline_passthrough_be.c:145: /* Read (dma_dst), compute (hash), write (hash) */
3094
./examples/ip_pipeline/pipeline/pipeline_flow_actions.h:69:app_pipeline_fa_flow_policer_stats_read(struct app_params *app,
3095
./examples/ip_pipeline/cpu_core_map.h:53:cpu_core_map_get_n_cores_per_socket(struct cpu_core_map *map);
3096
./examples/ethtool/ethtool-app/ethapp.c:251: if ((int)fwrite(buf_data,
3097
./examples/ethtool/ethtool-app/ethapp.c:309: if (fwrite(bytes_eeprom,
3098
./examples/exception_path/main.c:248: /* Ignore return val from write() */
3099
./examples/exception_path/main.c:249: int ret = write(tap_fd,
3100
./examples/exception_path/main.c:278: ret = read(tap_fd, rte_pktmbuf_mtod(m, void *),
3101
./drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c:179: qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
3102
./drivers/crypto/qat/qat_qp.c:216: if (rte_atomic16_read(&(qp->inflights16)) == 0) {
3103
./drivers/net/ixgbe/ixgbe_rxtx.c:2068: txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
3104
./drivers/net/ixgbe/ixgbe_rxtx.c:2115: txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
3105
./drivers/net/ixgbe/ixgbe_rxtx.c:2360: rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
3106
./drivers/net/ixgbe/ixgbe_rxtx.c:2437: rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
3107
./drivers/net/ixgbe/ixgbe_rxtx.c:2454: rte_zmalloc_socket("rxq->sw_sc_ring",
3108
./drivers/net/ixgbe/base/ixgbe_vf.c:593: if (mbx->ops.read(hw, &in_msg, 1, 0))
3109
./drivers/net/ixgbe/base/ixgbe_phy.c:1217: ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
3110
./drivers/net/ixgbe/base/ixgbe_phy.c:1223: ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
3111
./drivers/net/ixgbe/base/ixgbe_phy.c:1238: ret_val = hw->eeprom.ops.read(hw, data_offset,
3112
./drivers/net/ixgbe/base/ixgbe_phy.c:1244: ret_val = hw->eeprom.ops.read(hw, data_offset,
3113
./drivers/net/ixgbe/base/ixgbe_phy.c:1912: if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
3114
./drivers/net/ixgbe/base/ixgbe_phy.c:1929: if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
3115
./drivers/net/ixgbe/base/ixgbe_phy.c:1935: if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
3116
./drivers/net/ixgbe/base/ixgbe_phy.c:1945: if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
3117
./drivers/net/ixgbe/base/ixgbe_mbx.c:58: ret_val = mbx->ops.read(hw, msg, size, mbx_id);
3118
./drivers/net/ixgbe/base/ixgbe_mbx.c:84: ret_val = mbx->ops.write(hw, msg, size, mbx_id);
3119
./drivers/net/ixgbe/base/ixgbe_mbx.c:237: ret_val = mbx->ops.read(hw, msg, size, mbx_id);
3120
./drivers/net/ixgbe/base/ixgbe_mbx.c:265: ret_val = mbx->ops.write(hw, msg, size, mbx_id);
3121
./drivers/net/ixgbe/base/ixgbe_82598.c:1337: hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
3122
./drivers/net/ixgbe/base/ixgbe_82598.c:1340: hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
3123
./drivers/net/ixgbe/base/ixgbe_common.c:242: ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp);
3124
./drivers/net/ixgbe/base/ixgbe_common.c:345: ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
3125
./drivers/net/ixgbe/base/ixgbe_common.c:608: ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
3126
./drivers/net/ixgbe/base/ixgbe_common.c:614: ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
3127
./drivers/net/ixgbe/base/ixgbe_common.c:660: ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
3128
./drivers/net/ixgbe/base/ixgbe_common.c:682: ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
3129
./drivers/net/ixgbe/base/ixgbe_common.c:709: ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
3130
./drivers/net/ixgbe/base/ixgbe_common.c:719: ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
3131
./drivers/net/ixgbe/base/ixgbe_common.c:894: ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
3132
./drivers/net/ixgbe/base/ixgbe_common.c:2117: if (hw->eeprom.ops.read(hw, i, &word)) {
3133
./drivers/net/ixgbe/base/ixgbe_common.c:2126: if (hw->eeprom.ops.read(hw, i, &pointer)) {
3134
./drivers/net/ixgbe/base/ixgbe_common.c:2135: if (hw->eeprom.ops.read(hw, pointer, &length)) {
3135
./drivers/net/ixgbe/base/ixgbe_common.c:2144: if (hw->eeprom.ops.read(hw, j, &word)) {
3136
./drivers/net/ixgbe/base/ixgbe_common.c:2178: status = hw->eeprom.ops.read(hw, 0, &checksum);
3137
./drivers/net/ixgbe/base/ixgbe_common.c:2190: status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
3138
./drivers/net/ixgbe/base/ixgbe_common.c:2224: status = hw->eeprom.ops.read(hw, 0, &checksum);
3139
./drivers/net/ixgbe/base/ixgbe_common.c:2236: status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
3140
./drivers/net/ixgbe/base/ixgbe_common.c:3371: ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3141
./drivers/net/ixgbe/base/ixgbe_common.c:3378: ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3142
./drivers/net/ixgbe/base/ixgbe_common.c:3409: ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3143
./drivers/net/ixgbe/base/ixgbe_common.c:3416: ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3144
./drivers/net/ixgbe/base/ixgbe_common.c:3450: ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3145
./drivers/net/ixgbe/base/ixgbe_common.c:3493: ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3146
./drivers/net/ixgbe/base/ixgbe_common.c:3546: hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3147
./drivers/net/ixgbe/base/ixgbe_common.c:4136: if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
3148
./drivers/net/ixgbe/base/ixgbe_common.c:4145: if (hw->eeprom.ops.read(hw, offset, &caps))
3149
./drivers/net/ixgbe/base/ixgbe_common.c:4152: if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
3150
./drivers/net/ixgbe/base/ixgbe_common.c:4158: if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
3151
./drivers/net/ixgbe/base/ixgbe_common.c:4189: status = hw->eeprom.ops.read(hw, offset, &caps);
3152
./drivers/net/ixgbe/base/ixgbe_common.c:4197: status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
3153
./drivers/net/ixgbe/base/ixgbe_common.c:4206: status = hw->eeprom.ops.read(hw, offset, &flags);
3154
./drivers/net/ixgbe/base/ixgbe_common.c:4297: hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3155
./drivers/net/ixgbe/base/ixgbe_common.c:4687: status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
3156
./drivers/net/ixgbe/base/ixgbe_common.c:4696: status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
3157
./drivers/net/ixgbe/base/ixgbe_common.c:4711: status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
3158
./drivers/net/ixgbe/base/ixgbe_common.c:4766: if (hw->eeprom.ops.read(hw, offset, &ets_offset))
3159
./drivers/net/ixgbe/base/ixgbe_common.c:4772: if (hw->eeprom.ops.read(hw, offset, &ets_cfg))
3160
./drivers/net/ixgbe/base/ixgbe_common.c:4784: if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) {
3161
./drivers/net/ixgbe/base/ixgbe_x550.c:2540: status = hw->eeprom.ops.read(hw, 0, &checksum);
3162
./drivers/net/ixgbe/base/ixgbe_x540.c:572: status = hw->eeprom.ops.read(hw, 0, &checksum);
3163
./drivers/net/ixgbe/base/ixgbe_x540.c:633: status = hw->eeprom.ops.read(hw, 0, &checksum);
3164
./drivers/net/ixgbe/base/ixgbe_82599.c:194: if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
3165
./drivers/net/ixgbe/base/ixgbe_82599.c:199: if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
3166
./drivers/net/ixgbe/base/ixgbe_82599.c:211: ret_val = hw->mac.ops.prot_autoc_write(hw,
3167
./drivers/net/ixgbe/base/ixgbe_82599.c:961: status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
3168
./drivers/net/ixgbe/base/ixgbe_82599.c:1147: status = hw->mac.ops.prot_autoc_write(hw,
3169
./drivers/net/ixgbe/base/ixgbe_82599.c:2268: if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
3170
./drivers/net/ixgbe/base/ixgbe_82599.c:2278: if (hw->eeprom.ops.read(hw, (fw_offset +
3171
./drivers/net/ixgbe/base/ixgbe_82599.c:2292: if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
3172
./drivers/net/ixgbe/base/ixgbe_82599.c:2323: status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
3173
./drivers/net/ixgbe/base/ixgbe_82599.c:2330: status = hw->eeprom.ops.read(hw, (fw_offset +
3174
./drivers/net/ixgbe/base/ixgbe_82599.c:2339: status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
3175
./drivers/net/szedata2/rte_eth_szedata2.c:1423: data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
3176
./drivers/net/szedata2/rte_eth_szedata2.c:1427: *internals = rte_zmalloc_socket(name, sizeof(**internals), 0,
3177
./drivers/net/mpipe/mpipe_tilegx.c:459: while (rte_atomic32_read(&priv->dp_count) != 0) {
3178
./drivers/net/mlx4/mlx4.c:470:priv_sysfs_read(const struct priv *priv, const char *entry,
3179
./drivers/net/mlx4/mlx4.c:487: ret = fread(buf, 1, size, file);
3180
./drivers/net/mlx4/mlx4.c:514:priv_sysfs_write(const struct priv *priv, const char *entry,
3181
./drivers/net/mlx4/mlx4.c:531: ret = fwrite(buf, 1, size, file);
3182
./drivers/net/mlx4/mlx4.c:562: ret = priv_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1));
3183
./drivers/net/mlx4/mlx4.c:599: ret = priv_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1));
3184
./drivers/net/mlx4/mlx4.c:624: int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
3185
./drivers/net/mlx4/mlx4.c:901: txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0,
3186
./drivers/net/mlx4/mlx4.c:987: rte_calloc_socket("TXQ", 1, sizeof(*elts), 0, txq->socket);
3187
./drivers/net/mlx4/mlx4.c:989: rte_calloc_socket("TXQ", 1, sizeof(*elts_linear), 0,
3188
./drivers/net/mlx4/mlx4.c:1951: txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, socket);
3189
./drivers/net/mlx4/mlx4.c:2027: rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
3190
./drivers/net/mlx4/mlx4.c:2173: rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
3191
./drivers/net/mlx4/mlx4.c:3445: * thread (such as a control thread), may corrupt the pool.
3192
./drivers/net/mlx4/mlx4.c:3926: rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
3193
./drivers/net/i40e/base/i40e_adminq_cmd.h:1945:/* Direct write (direct 0x0900)
3194
./drivers/net/i40e/base/i40e_adminq_cmd.h:1946: * Direct read (direct 0x0902)
3195
./drivers/net/i40e/base/i40e_adminq_cmd.h:1957:/* Indirect write (indirect 0x0901)
3196
./drivers/net/i40e/base/i40e_adminq_cmd.h:1958: * Indirect read (indirect 0x0903)
3197
./drivers/net/i40e/base/i40e_adminq_cmd.h:1970:/* Done alternate write (direct 0x0904)
3198
./drivers/net/i40e/base/i40e_nvm.c:212: * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
3199
./drivers/net/i40e/base/i40e_nvm.c:230: * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
3200
./drivers/net/i40e/base/i40e_nvm.c:280: * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
3201
./drivers/net/i40e/base/i40e_nvm.c:301: * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
3202
./drivers/net/i40e/base/i40e_nvm.c:322: * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
3203
./drivers/net/i40e/base/i40e_nvm.c:355: * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
3204
./drivers/net/i40e/base/i40e_nvm.c:728:STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
3205
./drivers/net/i40e/base/i40e_nvm.c:731:STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
3206
./drivers/net/i40e/base/i40e_nvm.c:870: status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
3207
./drivers/net/i40e/base/i40e_nvm.c:881: status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
3208
./drivers/net/i40e/base/i40e_nvm.c:911: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
3209
./drivers/net/i40e/base/i40e_nvm.c:927: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
3210
./drivers/net/i40e/base/i40e_nvm.c:998: status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
3211
./drivers/net/i40e/base/i40e_nvm.c:1002: status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
3212
./drivers/net/i40e/base/i40e_nvm.c:1043: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
3213
./drivers/net/i40e/base/i40e_nvm.c:1049: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
3214
./drivers/net/i40e/base/i40e_nvm.c:1376:STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
3215
./drivers/net/i40e/base/i40e_nvm.c:1455:STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
3216
./drivers/net/i40e/base/i40e_prototype.h:318:enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw,
3217
./drivers/net/i40e/base/i40e_prototype.h:394:enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
3218
./drivers/net/i40e/base/i40e_prototype.h:399:enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw,
3219
./drivers/net/i40e/base/i40e_common.c:1046:STATIC enum i40e_status_code i40e_aq_mac_address_read(struct i40e_hw *hw,
3220
./drivers/net/i40e/base/i40e_common.c:1073:enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw,
3221
./drivers/net/i40e/base/i40e_common.c:1109: status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
3222
./drivers/net/i40e/base/i40e_common.c:1130: status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
3223
./drivers/net/i40e/base/i40e_common.c:3152: * @length: length of the section to be read (in bytes from the offset)
3224
./drivers/net/i40e/base/i40e_common.c:5273:enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw,
3225
./drivers/net/i40e/base/i40e_common.c:5346:enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
3226
./drivers/net/i40e/base/i40e_common.c:5643: status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
3227
./drivers/net/i40e/i40e_rxtx.c:2143: rxq = rte_zmalloc_socket("i40e rx queue",
3228
./drivers/net/i40e/i40e_rxtx.c:2194: rte_zmalloc_socket("i40e rx sw ring",
3229
./drivers/net/i40e/i40e_rxtx.c:2433: txq = rte_zmalloc_socket("i40e tx queue",
3230
./drivers/net/i40e/i40e_rxtx.c:2477: rte_zmalloc_socket("i40e tx sw ring",
3231
./drivers/net/i40e/i40e_rxtx.c:2945: txq = rte_zmalloc_socket("i40e fdir tx queue",
3232
./drivers/net/i40e/i40e_rxtx.c:2999: rxq = rte_zmalloc_socket("i40e fdir rx queue",
3233
./drivers/net/nfp/nfp_net.c:160:nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr)
3234
./drivers/net/nfp/nfp_net.c:1291: rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
3235
./drivers/net/nfp/nfp_net.c:1339: rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
3236
./drivers/net/nfp/nfp_net.c:1456: txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
3237
./drivers/net/nfp/nfp_net.c:1497: txq->txbufs = rte_zmalloc_socket("txq->txbufs",
3238
./drivers/net/nfp/nfp_net.c:1866: qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
3239
./drivers/net/mlx5/mlx5_ethdev.c:167:priv_sysfs_read(const struct priv *priv, const char *entry,
3240
./drivers/net/mlx5/mlx5_ethdev.c:184: ret = fread(buf, 1, size, file);
3241
./drivers/net/mlx5/mlx5_ethdev.c:211:priv_sysfs_write(const struct priv *priv, const char *entry,
3242
./drivers/net/mlx5/mlx5_ethdev.c:228: ret = fwrite(buf, 1, size, file);
3243
./drivers/net/mlx5/mlx5_ethdev.c:259: ret = priv_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1));
3244
./drivers/net/mlx5/mlx5_ethdev.c:296: ret = priv_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1));
3245
./drivers/net/mlx5/mlx5_ethdev.c:321: int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
3246
./drivers/net/mlx5/mlx5_rxq.c:608: rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
3247
./drivers/net/mlx5/mlx5_rxq.c:746: rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
3248
./drivers/net/mlx5/mlx5_rxq.c:905: * thread (such as a control thread), may corrupt the pool.
3249
./drivers/net/mlx5/mlx5_rxq.c:1342: rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
3250
./drivers/net/mlx5/mlx5_txq.c:84: rte_calloc_socket("TXQ", 1, sizeof(*elts), 0, txq->socket);
3251
./drivers/net/mlx5/mlx5_txq.c:86: rte_calloc_socket("TXQ", 1, sizeof(*elts_linear), 0,
3252
./drivers/net/mlx5/mlx5_txq.c:472: txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, socket);
3253
./drivers/net/fm10k/fm10k_ethdev.c:1664: q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
3254
./drivers/net/fm10k/fm10k_ethdev.c:1683: q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
3255
./drivers/net/fm10k/fm10k_ethdev.c:1824: q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
3256
./drivers/net/fm10k/fm10k_ethdev.c:1843: q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
3257
./drivers/net/fm10k/fm10k_ethdev.c:1873: q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
3258
./drivers/net/fm10k/fm10k_ethdev.c:2401: return hw->mbx.ops.connect(hw, &hw->mbx);
3259
./drivers/net/fm10k/fm10k_ethdev.c:2408: hw->mbx.ops.disconnect(hw, &hw->mbx);
3260
./drivers/net/fm10k/base/fm10k_pf.c:1008: vf_info->mbx.ops.disconnect(hw, &vf_info->mbx);
3261
./drivers/net/fm10k/base/fm10k_mbx.h:76: * +----------+ connect() +----------+
3262
./drivers/net/fm10k/base/fm10k_mbx.h:85: * +----------+ disconnect() +----------+
3263
./drivers/net/fm10k/base/fm10k_mbx.h:94: * this state is for the system to make the connect() call for the
3264
./drivers/net/fm10k/base/fm10k_mbx.h:105: * transition to disconnect on a call to disconnect();
3265
./drivers/net/fm10k/base/fm10k_mbx.c:825:STATIC s32 fm10k_mbx_read(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
3266
./drivers/net/fm10k/base/fm10k_mbx.c:854:STATIC void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
3267
./drivers/net/fm10k/base/fm10k_mbx.c:1192:STATIC s32 fm10k_mbx_process_connect(struct fm10k_hw *hw,
3268
./drivers/net/fm10k/base/fm10k_mbx.c:1281:STATIC s32 fm10k_mbx_process_disconnect(struct fm10k_hw *hw,
3269
./drivers/net/fm10k/base/fm10k_mbx.c:1392: err = fm10k_mbx_read(hw, mbx);
3270
./drivers/net/fm10k/base/fm10k_mbx.c:1403: err = fm10k_mbx_process_connect(hw, mbx);
3271
./drivers/net/fm10k/base/fm10k_mbx.c:1409: err = fm10k_mbx_process_disconnect(hw, mbx);
3272
./drivers/net/fm10k/base/fm10k_mbx.c:1425: fm10k_mbx_write(hw, mbx);
3273
./drivers/net/fm10k/base/fm10k_mbx.c:1443:STATIC void fm10k_mbx_disconnect(struct fm10k_hw *hw,
3274
./drivers/net/fm10k/base/fm10k_mbx.c:1484:STATIC s32 fm10k_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
3275
./drivers/net/fm10k/base/fm10k_mbx.c:1513: fm10k_mbx_write(hw, mbx);
3276
./drivers/net/fm10k/base/fm10k_mbx.c:1749:STATIC s32 fm10k_sm_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
3277
./drivers/net/fm10k/base/fm10k_mbx.c:1777: fm10k_mbx_write(hw, mbx);
3278
./drivers/net/fm10k/base/fm10k_mbx.c:1797:STATIC void fm10k_sm_mbx_disconnect(struct fm10k_hw *hw,
3279
./drivers/net/fm10k/base/fm10k_mbx.c:2154: err = fm10k_mbx_read(hw, mbx);
3280
./drivers/net/fm10k/base/fm10k_mbx.c:2181: fm10k_mbx_write(hw, mbx);
3281
./drivers/net/null/rte_eth_null.c:248: dummy_packet = rte_zmalloc_socket(NULL,
3282
./drivers/net/null/rte_eth_null.c:281: dummy_packet = rte_zmalloc_socket(NULL,
3283
./drivers/net/null/rte_eth_null.c:516: data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
3284
./drivers/net/null/rte_eth_null.c:520: internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
3285
./drivers/net/xenvirt/rte_eth_xenvirt.c:220: xenstore_write(key_str, val_str);
3286
./drivers/net/xenvirt/rte_eth_xenvirt.c:240: xenstore_write(key_str, val_str);
3287
./drivers/net/xenvirt/rte_eth_xenvirt.c:278: if (xenstore_write(key_str, val_str))
3288
./drivers/net/xenvirt/rte_eth_xenvirt.c:429: if (rv == -1 || xenstore_write(key_str, val_str) == -1) {
3289
./drivers/net/xenvirt/rte_eth_xenvirt.c:658: data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
3290
./drivers/net/xenvirt/rte_eth_xenvirt.c:662: internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
3291
./drivers/net/xenvirt/rte_xen_lib.c:115: (rc = pread(fd, pa, nb, ofs)) < 0 ||
3292
./drivers/net/xenvirt/rte_xen_lib.c:288: buf = xs_read(xs, XBT_NULL, "domid", &len);
3293
./drivers/net/xenvirt/rte_xen_lib.c:332:xenstore_write(const char *key_str, const char *val_str)
3294
./drivers/net/xenvirt/rte_xen_lib.c:349: if (xs_write(xs, XBT_NULL, grant_path, val_str, len) == false) {
3295
./drivers/net/xenvirt/rte_xen_lib.c:434: if (xenstore_write(key_str, val_str) == -1)
3296
./drivers/net/xenvirt/rte_xen_lib.c:442: if (xenstore_write(key_str, val_str) == -1)
3297
./drivers/net/xenvirt/rte_xen_lib.h:99:xenstore_write(const char *key_str, const char *val_str);
3298
./drivers/net/pcap/rte_eth_pcap.c:817: data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
3299
./drivers/net/pcap/rte_eth_pcap.c:821: *internals = rte_zmalloc_socket(name, sizeof(**internals), 0, numa_node);
3300
./drivers/net/bonding/rte_eth_bond_api.c:189: internals = rte_zmalloc_socket(name, sizeof(*internals), 0, socket_id);
3301
./drivers/net/bonding/rte_eth_bond_api.c:210: eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
3302
./drivers/net/bonding/rte_eth_bond_pmd.c:1660: rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
3303
./drivers/net/bonding/rte_eth_bond_pmd.c:1684: rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
3304
./drivers/net/bnx2x/bnx2x.h:1951:static inline int pci_read(struct bnx2x_softc *sc, size_t addr,
3305
./drivers/net/bnx2x/bnx2x.c:525:uint32_t elink_cb_reg_read(struct bnx2x_softc *sc, uint32_t reg_addr)
3306
./drivers/net/bnx2x/bnx2x.c:530:void elink_cb_reg_write(struct bnx2x_softc *sc, uint32_t reg_addr, uint32_t val)
3307
./drivers/net/bnx2x/bnx2x.c:585:static int bnx2x_gpio_read(struct bnx2x_softc *sc, int gpio_num, uint8_t port)
3308
./drivers/net/bnx2x/bnx2x.c:610:bnx2x_gpio_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode, uint8_t port)
3309
./drivers/net/bnx2x/bnx2x.c:661:bnx2x_gpio_mult_write(struct bnx2x_softc *sc, uint8_t pins, uint32_t mode)
3310
./drivers/net/bnx2x/bnx2x.c:704:bnx2x_gpio_int_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode,
3311
./drivers/net/bnx2x/bnx2x.c:751:elink_cb_gpio_read(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t port)
3312
./drivers/net/bnx2x/bnx2x.c:753: return bnx2x_gpio_read(sc, gpio_num, port);
3313
./drivers/net/bnx2x/bnx2x.c:756:uint8_t elink_cb_gpio_write(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */
3314
./drivers/net/bnx2x/bnx2x.c:759: return bnx2x_gpio_write(sc, gpio_num, mode, port);
3315
./drivers/net/bnx2x/bnx2x.c:763:elink_cb_gpio_mult_write(struct bnx2x_softc * sc, uint8_t pins,
3316
./drivers/net/bnx2x/bnx2x.c:766: return bnx2x_gpio_mult_write(sc, pins, mode);
3317
./drivers/net/bnx2x/bnx2x.c:769:uint8_t elink_cb_gpio_int_write(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */
3318
./drivers/net/bnx2x/bnx2x.c:772: return bnx2x_gpio_int_write(sc, gpio_num, mode, port);
3319
./drivers/net/bnx2x/bnx2x.c:5801: pci_read(sc, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), &pmcsr,
3320
./drivers/net/bnx2x/bnx2x.c:7568:static uint32_t bnx2x_pcie_capability_read(struct bnx2x_softc *sc, int reg)
3321
./drivers/net/bnx2x/bnx2x.c:7579: pci_read(sc, (caps->addr + reg), &ret, 2);
3322
./drivers/net/bnx2x/bnx2x.c:7590: return (bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA) &
3323
./drivers/net/bnx2x/bnx2x.c:7620: link_status = bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA);
3324
./drivers/net/bnx2x/bnx2x.c:8349: * Enable internal target-read (in case we are probed after PF
3325
./drivers/net/bnx2x/bnx2x.c:9544: pci_read(sc, PCI_STATUS, &status, 2);
3326
./drivers/net/bnx2x/bnx2x.c:9547: pci_read(sc, PCIR_STATUS, &status, 2);
3327
./drivers/net/bnx2x/bnx2x.c:9555: pci_read(sc, PCI_CAPABILITY_LIST, &pci_cap.next, 1);
3328
./drivers/net/bnx2x/bnx2x.c:9557: pci_read(sc, PCIR_CAP_PTR, &pci_cap.next, 1);
3329
./drivers/net/bnx2x/bnx2x.c:9561: pci_read(sc, pci_cap.next & ~3, &pci_cap, 2);
3330
./drivers/net/bnx2x/bnx2x.c:9616: if (read(f, sc->firmware, st.st_size) != st.st_size) {
3331
./drivers/net/bnx2x/bnx2x.c:9717: pci_read(sc,
3332
./drivers/net/bnx2x/bnx2x.c:9913: devctl = bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL);
3333
./drivers/net/bnx2x/elink.c:894: elink_cl45_write(_sc, _phy, \
3334
./drivers/net/bnx2x/elink.c:900: elink_cl45_read(_sc, _phy, \
3335
./drivers/net/bnx2x/elink.c:1109: elink_cb_gpio_write(sc, gpio_num, (uint8_t) val, gpio_port);
3336
./drivers/net/bnx2x/elink.c:1123: *val = elink_cb_gpio_read(sc, gpio_num, gpio_port);
3337
./drivers/net/bnx2x/elink.c:1261: elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MODE,
3338
./drivers/net/bnx2x/elink.c:1278: elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MAC_MATCH, val);
3339
./drivers/net/bnx2x/elink.c:1283: elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MAC_MATCH + 4, val);
3340
./drivers/net/bnx2x/elink.c:1635: elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MODE,
3341
./drivers/net/bnx2x/elink.c:1676: elink_cb_reg_write(sc, emac_base + EMAC_REG_RX_PFC_MODE, 0);
3342
./drivers/net/bnx2x/elink.c:1680: elink_cb_reg_write(sc, emac_base + EMAC_REG_RX_PFC_MODE,
3343
./drivers/net/bnx2x/elink.c:1685: elink_cb_reg_write(sc, emac_base + EMAC_REG_RX_PFC_PARAM,
3344
./drivers/net/bnx2x/elink.c:1692: elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_RX_MODE, val);
3345
./drivers/net/bnx2x/elink.c:1700: elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MODE, val);
3346
./drivers/net/bnx2x/elink.c:1706: elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_RX_MTU_SIZE,
3347
./drivers/net/bnx2x/elink.c:2405:static elink_status_t elink_cl22_write(struct bnx2x_softc *sc,
3348
./drivers/net/bnx2x/elink.c:2439:static elink_status_t elink_cl22_read(struct bnx2x_softc *sc,
3349
./drivers/net/bnx2x/elink.c:2480:static elink_status_t elink_cl45_read(struct bnx2x_softc *sc,
3350
./drivers/net/bnx2x/elink.c:2545: elink_cl45_read(sc, phy, devad, 0xf, &temp_val);
3351
./drivers/net/bnx2x/elink.c:2555:static elink_status_t elink_cl45_write(struct bnx2x_softc *sc,
3352
./drivers/net/bnx2x/elink.c:2618: elink_cl45_read(sc, phy, devad, 0xf, &temp_val);
3353
./drivers/net/bnx2x/elink.c:2778: elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0);
3354
./drivers/net/bnx2x/elink.c:2805: elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val);
3355
./drivers/net/bnx2x/elink.c:2833: elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv);
3356
./drivers/net/bnx2x/elink.c:2834: elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp);
3357
./drivers/net/bnx2x/elink.c:2900:static elink_status_t elink_bsc_read(struct elink_params *params,
3358
./drivers/net/bnx2x/elink.c:2987:static void elink_cl45_read_or_write(struct bnx2x_softc *sc,
3359
./drivers/net/bnx2x/elink.c:2992: elink_cl45_read(sc, phy, devad, reg, &val);
3360
./drivers/net/bnx2x/elink.c:2993: elink_cl45_write(sc, phy, devad, reg, val | or_val);
3361
./drivers/net/bnx2x/elink.c:2996:static void elink_cl45_read_and_write(struct bnx2x_softc *sc,
3362
./drivers/net/bnx2x/elink.c:3002: elink_cl45_read(sc, phy, devad, reg, &val);
3363
./drivers/net/bnx2x/elink.c:3003: elink_cl45_write(sc, phy, devad, reg, val & and_val);
3364
./drivers/net/bnx2x/elink.c:3247: elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
3365
./drivers/net/bnx2x/elink.c:3264: elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
3366
./drivers/net/bnx2x/elink.c:3304: elink_cl22_read(sc, phy, 0x4, &ld_pause);
3367
./drivers/net/bnx2x/elink.c:3305: elink_cl22_read(sc, phy, 0x5, &lp_pause);
3368
./drivers/net/bnx2x/elink.c:3309: elink_cl45_read(sc, phy,
3369
./drivers/net/bnx2x/elink.c:3316: elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
3370
./drivers/net/bnx2x/elink.c:3318: elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
3371
./drivers/net/bnx2x/elink.c:3321: elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
3372
./drivers/net/bnx2x/elink.c:3323: elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
3373
./drivers/net/bnx2x/elink.c:3333: elink_cl45_read(sc, phy,
3374
./drivers/net/bnx2x/elink.c:3336: elink_cl45_read(sc, phy,
3375
./drivers/net/bnx2x/elink.c:3413: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3376
./drivers/net/bnx2x/elink.c:3417: elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
3377
./drivers/net/bnx2x/elink.c:3451: elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
3378
./drivers/net/bnx2x/elink.c:3465: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3379
./drivers/net/bnx2x/elink.c:3467: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3380
./drivers/net/bnx2x/elink.c:3479: elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
3381
./drivers/net/bnx2x/elink.c:3505: elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
3382
./drivers/net/bnx2x/elink.c:3508: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3383
./drivers/net/bnx2x/elink.c:3512: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3384
./drivers/net/bnx2x/elink.c:3523: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, addr, 0x1);
3385
./drivers/net/bnx2x/elink.c:3535: elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
3386
./drivers/net/bnx2x/elink.c:3543: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3387
./drivers/net/bnx2x/elink.c:3548: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3388
./drivers/net/bnx2x/elink.c:3551: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3389
./drivers/net/bnx2x/elink.c:3553: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3390
./drivers/net/bnx2x/elink.c:3557: elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
3391
./drivers/net/bnx2x/elink.c:3561: elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
3392
./drivers/net/bnx2x/elink.c:3572: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3393
./drivers/net/bnx2x/elink.c:3581: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3394
./drivers/net/bnx2x/elink.c:3585: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3395
./drivers/net/bnx2x/elink.c:3595: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3396
./drivers/net/bnx2x/elink.c:3599: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3397
./drivers/net/bnx2x/elink.c:3631: elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
3398
./drivers/net/bnx2x/elink.c:3639: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3399
./drivers/net/bnx2x/elink.c:3642: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3400
./drivers/net/bnx2x/elink.c:3645: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3401
./drivers/net/bnx2x/elink.c:3648: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3402
./drivers/net/bnx2x/elink.c:3653: elink_cl45_write(sc, phy, MDIO_PMA_DEVAD,
3403
./drivers/net/bnx2x/elink.c:3656: elink_cl45_write(sc, phy, MDIO_PMA_DEVAD,
3404
./drivers/net/bnx2x/elink.c:3660: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3405
./drivers/net/bnx2x/elink.c:3664: elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_TX66_CONTROL, 0x9);
3406
./drivers/net/bnx2x/elink.c:3667: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3407
./drivers/net/bnx2x/elink.c:3671: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3408
./drivers/net/bnx2x/elink.c:3673: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3409
./drivers/net/bnx2x/elink.c:3687: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3410
./drivers/net/bnx2x/elink.c:3691: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3411
./drivers/net/bnx2x/elink.c:3695: elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
3412
./drivers/net/bnx2x/elink.c:3698: elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
3413
./drivers/net/bnx2x/elink.c:3702: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3414
./drivers/net/bnx2x/elink.c:3706: elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
3415
./drivers/net/bnx2x/elink.c:3710: elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
3416
./drivers/net/bnx2x/elink.c:3715: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3417
./drivers/net/bnx2x/elink.c:3717: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3418
./drivers/net/bnx2x/elink.c:3722: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3419
./drivers/net/bnx2x/elink.c:3757: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3420
./drivers/net/bnx2x/elink.c:3762: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3421
./drivers/net/bnx2x/elink.c:3765: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3422
./drivers/net/bnx2x/elink.c:3770: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3423
./drivers/net/bnx2x/elink.c:3774: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3424
./drivers/net/bnx2x/elink.c:3780: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3425
./drivers/net/bnx2x/elink.c:3784: elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
3426
./drivers/net/bnx2x/elink.c:3788: elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
3427
./drivers/net/bnx2x/elink.c:3802: elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
3428
./drivers/net/bnx2x/elink.c:3807: elink_cl45_read_and_write(sc, phy, MDIO_PMA_DEVAD,
3429
./drivers/net/bnx2x/elink.c:3809: elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
3430
./drivers/net/bnx2x/elink.c:3811: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3431
./drivers/net/bnx2x/elink.c:3815: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3432
./drivers/net/bnx2x/elink.c:3819: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3433
./drivers/net/bnx2x/elink.c:3822: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3434
./drivers/net/bnx2x/elink.c:3825: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3435
./drivers/net/bnx2x/elink.c:3829: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3436
./drivers/net/bnx2x/elink.c:3831: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3437
./drivers/net/bnx2x/elink.c:3838: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3438
./drivers/net/bnx2x/elink.c:3848: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3439
./drivers/net/bnx2x/elink.c:3852: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3440
./drivers/net/bnx2x/elink.c:3855: elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_SCW0, 0xE070);
3441
./drivers/net/bnx2x/elink.c:3857: elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_SCW1, 0xC0D0);
3442
./drivers/net/bnx2x/elink.c:3859: elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_SCW2, 0xA0B0);
3443
./drivers/net/bnx2x/elink.c:3861: elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_SCW3, 0x8090);
3444
./drivers/net/bnx2x/elink.c:3863: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3445
./drivers/net/bnx2x/elink.c:3866: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3446
./drivers/net/bnx2x/elink.c:3869: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3447
./drivers/net/bnx2x/elink.c:3872: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3448
./drivers/net/bnx2x/elink.c:3876: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3449
./drivers/net/bnx2x/elink.c:3880: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3450
./drivers/net/bnx2x/elink.c:3884: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3451
./drivers/net/bnx2x/elink.c:3888: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3452
./drivers/net/bnx2x/elink.c:3902: elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
3453
./drivers/net/bnx2x/elink.c:3909: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3454
./drivers/net/bnx2x/elink.c:3914: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3455
./drivers/net/bnx2x/elink.c:3936: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3456
./drivers/net/bnx2x/elink.c:3941: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3457
./drivers/net/bnx2x/elink.c:3947: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3458
./drivers/net/bnx2x/elink.c:3954: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3459
./drivers/net/bnx2x/elink.c:3958: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3460
./drivers/net/bnx2x/elink.c:3960: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3461
./drivers/net/bnx2x/elink.c:3965: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3462
./drivers/net/bnx2x/elink.c:3970: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3463
./drivers/net/bnx2x/elink.c:3980: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3464
./drivers/net/bnx2x/elink.c:3986: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3465
./drivers/net/bnx2x/elink.c:3988: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3466
./drivers/net/bnx2x/elink.c:4016: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3467
./drivers/net/bnx2x/elink.c:4020: elink_cl45_write(sc, phy, wc_regs[i].devad, wc_regs[i].reg,
3468
./drivers/net/bnx2x/elink.c:4024: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3469
./drivers/net/bnx2x/elink.c:4079: gpio_val = elink_cb_gpio_read(sc, gpio_num, gpio_port);
3470
./drivers/net/bnx2x/elink.c:4096: elink_cl45_read(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_0,
3471
./drivers/net/bnx2x/elink.c:4127: elink_cl45_read(sc, phy, MDIO_WC_DEVAD, 0x81d1,
3472
./drivers/net/bnx2x/elink.c:4141: elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
3473
./drivers/net/bnx2x/elink.c:4317: elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
3474
./drivers/net/bnx2x/elink.c:4320: elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
3475
./drivers/net/bnx2x/elink.c:4327: elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
3476
./drivers/net/bnx2x/elink.c:4330: elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
3477
./drivers/net/bnx2x/elink.c:4334: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3478
./drivers/net/bnx2x/elink.c:4339: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3479
./drivers/net/bnx2x/elink.c:4342: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3480
./drivers/net/bnx2x/elink.c:4351: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3481
./drivers/net/bnx2x/elink.c:4375: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3482
./drivers/net/bnx2x/elink.c:4380: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3483
./drivers/net/bnx2x/elink.c:4385: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3484
./drivers/net/bnx2x/elink.c:4392: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3485
./drivers/net/bnx2x/elink.c:4395: elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
3486
./drivers/net/bnx2x/elink.c:5363: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3487
./drivers/net/bnx2x/elink.c:5365: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3488
./drivers/net/bnx2x/elink.c:5371: elink_cl45_read(sc, phy, MDIO_WC_DEVAD, 1, &temp_link_up);
3489
./drivers/net/bnx2x/elink.c:5372: elink_cl45_read(sc, phy, MDIO_WC_DEVAD, 1, &link_up);
3490
./drivers/net/bnx2x/elink.c:5379: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3491
./drivers/net/bnx2x/elink.c:5387: elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
3492
./drivers/net/bnx2x/elink.c:5389: elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
3493
./drivers/net/bnx2x/elink.c:5397: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3494
./drivers/net/bnx2x/elink.c:5405: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3495
./drivers/net/bnx2x/elink.c:5421: elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
3496
./drivers/net/bnx2x/elink.c:5432: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3497
./drivers/net/bnx2x/elink.c:5445: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3498
./drivers/net/bnx2x/elink.c:5448: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3499
./drivers/net/bnx2x/elink.c:5669: elink_cl22_read(sc, phy, MDIO_PMA_REG_CTRL, &ctrl);
3500
./drivers/net/bnx2x/elink.c:5671: elink_cl45_read(sc, phy,
3501
./drivers/net/bnx2x/elink.c:5871: elink_cl45_write(sc, phy,
3502
./drivers/net/bnx2x/elink.c:5876: elink_cl45_write(sc, phy,
3503
./drivers/net/bnx2x/elink.c:5893: elink_cl45_read(sc, phy, 5,
3504
./drivers/net/bnx2x/elink.c:5897: elink_cl45_write(sc, phy, 5,
3505
./drivers/net/bnx2x/elink.c:5938: tmp = elink_cb_reg_read(sc, emac_base + EMAC_REG_EMAC_LED);
3506
./drivers/net/bnx2x/elink.c:5947: elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_LED, tmp);
3507
./drivers/net/bnx2x/elink.c:5969: elink_cb_reg_read(sc,
3508
./drivers/net/bnx2x/elink.c:5972: elink_cb_reg_write(sc,
3509
./drivers/net/bnx2x/elink.c:6003: elink_cb_reg_read(sc,
3510
./drivers/net/bnx2x/elink.c:6005: elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_LED,
3511
./drivers/net/bnx2x/elink.c:6033: tmp = elink_cb_reg_read(sc, emac_base + EMAC_REG_EMAC_LED);
3512
./drivers/net/bnx2x/elink.c:6034: elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_LED,
3513
./drivers/net/bnx2x/elink.c:6145: elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1,
3514
./drivers/net/bnx2x/elink.c:6147: elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
3515
./drivers/net/bnx2x/elink.c:6561: elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1,
3516
./drivers/net/bnx2x/elink.c:6564: elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1,
3517
./drivers/net/bnx2x/elink.c:6584: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
3518
./drivers/net/bnx2x/elink.c:6586: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
3519
./drivers/net/bnx2x/elink.c:6598: elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_STATUS, &val);
3520
./drivers/net/bnx2x/elink.c:6599: elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_STATUS, &val);
3521
./drivers/net/bnx2x/elink.c:6625: elink_cl45_read(sc, phy,
3522
./drivers/net/bnx2x/elink.c:6629: elink_cl45_read(sc, phy,
3523
./drivers/net/bnx2x/elink.c:6653: elink_cl45_write(sc, phy,
3524
./drivers/net/bnx2x/elink.c:6657: elink_cl45_write(sc, phy,
3525
./drivers/net/bnx2x/elink.c:6660: elink_cl45_write(sc, phy,
3526
./drivers/net/bnx2x/elink.c:6664: elink_cl45_write(sc, phy,
3527
./drivers/net/bnx2x/elink.c:6670: elink_cl45_write(sc, phy,
3528
./drivers/net/bnx2x/elink.c:6690: elink_cl45_read(sc, phy,
3529
./drivers/net/bnx2x/elink.c:6693: elink_cl45_read(sc, phy,
3530
./drivers/net/bnx2x/elink.c:6703: elink_cl45_write(sc, phy,
3531
./drivers/net/bnx2x/elink.c:6724: elink_cl45_read(sc, phy,
3532
./drivers/net/bnx2x/elink.c:6732: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER2, &val);
3533
./drivers/net/bnx2x/elink.c:6746: elink_cl45_read(sc, phy,
3534
./drivers/net/bnx2x/elink.c:6760: elink_cl45_read(sc, phy,
3535
./drivers/net/bnx2x/elink.c:6778: elink_cl45_read(sc, phy,
3536
./drivers/net/bnx2x/elink.c:6800: elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
3537
./drivers/net/bnx2x/elink.c:6801: elink_cl45_write(sc, phy,
3538
./drivers/net/bnx2x/elink.c:6803: elink_cl45_write(sc, phy,
3539
./drivers/net/bnx2x/elink.c:6805: elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
3540
./drivers/net/bnx2x/elink.c:6814: elink_cl45_read(sc, phy,
3541
./drivers/net/bnx2x/elink.c:6837: elink_cl45_write(sc, phy,
3542
./drivers/net/bnx2x/elink.c:6850: elink_cl45_write(sc, phy,
3543
./drivers/net/bnx2x/elink.c:6853: elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
3544
./drivers/net/bnx2x/elink.c:6873: elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
3545
./drivers/net/bnx2x/elink.c:6876: elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1,
3546
./drivers/net/bnx2x/elink.c:6882: elink_cl45_read(sc, phy,
3547
./drivers/net/bnx2x/elink.c:6885: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
3548
./drivers/net/bnx2x/elink.c:6894: elink_cl45_read(sc, phy,
3549
./drivers/net/bnx2x/elink.c:6897: elink_cl45_write(sc, phy,
3550
./drivers/net/bnx2x/elink.c:6910: elink_cl45_read(sc, phy,
3551
./drivers/net/bnx2x/elink.c:6912: elink_cl45_write(sc, phy,
3552
./drivers/net/bnx2x/elink.c:6921: elink_cl45_write(sc, phy,
3553
./drivers/net/bnx2x/elink.c:6947: elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val);
3554
./drivers/net/bnx2x/elink.c:6948: elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1);
3555
./drivers/net/bnx2x/elink.c:6955: elink_cl45_read(sc, phy,
3556
./drivers/net/bnx2x/elink.c:6968: elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1);
3557
./drivers/net/bnx2x/elink.c:6971: elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1);
3558
./drivers/net/bnx2x/elink.c:6972: elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD,
3559
./drivers/net/bnx2x/elink.c:6977: elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
3560
./drivers/net/bnx2x/elink.c:6984: elink_cl45_write(sc, phy,
3561
./drivers/net/bnx2x/elink.c:6989: elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1);
3562
./drivers/net/bnx2x/elink.c:6991: elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1);
3563
./drivers/net/bnx2x/elink.c:6997: elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
3564
./drivers/net/bnx2x/elink.c:7013: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
3565
./drivers/net/bnx2x/elink.c:7018: elink_cl45_read(sc, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
3566
./drivers/net/bnx2x/elink.c:7019: elink_cl45_read(sc, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1);
3567
./drivers/net/bnx2x/elink.c:7022: elink_cl45_read(sc, phy,
3568
./drivers/net/bnx2x/elink.c:7026: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
3569
./drivers/net/bnx2x/elink.c:7031: elink_cl45_read(sc, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
3570
./drivers/net/bnx2x/elink.c:7034: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
3571
./drivers/net/bnx2x/elink.c:7035: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
3572
./drivers/net/bnx2x/elink.c:7043: elink_cl45_read(sc, phy,
3573
./drivers/net/bnx2x/elink.c:7045: elink_cl45_read(sc, phy,
3574
./drivers/net/bnx2x/elink.c:7049: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
3575
./drivers/net/bnx2x/elink.c:7050: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
3576
./drivers/net/bnx2x/elink.c:7060: elink_cl45_write(sc, phy,
3577
./drivers/net/bnx2x/elink.c:7065: elink_cl45_write(sc, phy,
3578
./drivers/net/bnx2x/elink.c:7069: elink_cl45_read(sc, phy,
3579
./drivers/net/bnx2x/elink.c:7099: elink_cl45_read(sc, phy,
3580
./drivers/net/bnx2x/elink.c:7112: elink_cl45_write(sc, phy,
3581
./drivers/net/bnx2x/elink.c:7122: elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
3582
./drivers/net/bnx2x/elink.c:7147: elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
3583
./drivers/net/bnx2x/elink.c:7162: elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
3584
./drivers/net/bnx2x/elink.c:7166: elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
3585
./drivers/net/bnx2x/elink.c:7169: elink_cl45_write(sc, phy,
3586
./drivers/net/bnx2x/elink.c:7171: elink_cl45_write(sc, phy,
3587
./drivers/net/bnx2x/elink.c:7173: elink_cl45_write(sc, phy,
3588
./drivers/net/bnx2x/elink.c:7175: elink_cl45_write(sc, phy, MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
3589
./drivers/net/bnx2x/elink.c:7189: elink_cl45_read(sc, phy,
3590
./drivers/net/bnx2x/elink.c:7193: elink_cl45_read(sc, phy,
3591
./drivers/net/bnx2x/elink.c:7197: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
3592
./drivers/net/bnx2x/elink.c:7199: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xc809, &val1);
3593
./drivers/net/bnx2x/elink.c:7200: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xc809, &val1);
3594
./drivers/net/bnx2x/elink.c:7234: elink_cl45_write(sc, phy,
3595
./drivers/net/bnx2x/elink.c:7271: elink_cl45_read(sc, phy,
3596
./drivers/net/bnx2x/elink.c:7280: elink_cl45_write(sc, phy,
3597
./drivers/net/bnx2x/elink.c:7298: elink_cb_gpio_write(sc, gpio_pin, gpio_mode, gpio_port);
3598
./drivers/net/bnx2x/elink.c:7336: elink_cl45_write(sc, phy,
3599
./drivers/net/bnx2x/elink.c:7341: elink_cl45_write(sc, phy,
3600
./drivers/net/bnx2x/elink.c:7346: elink_cl45_write(sc, phy,
3601
./drivers/net/bnx2x/elink.c:7352: elink_cl45_read(sc, phy,
3602
./drivers/net/bnx2x/elink.c:7371: elink_cl45_read(sc, phy,
3603
./drivers/net/bnx2x/elink.c:7379: elink_cl45_read(sc, phy,
3604
./drivers/net/bnx2x/elink.c:7443: rc = elink_bsc_read(params, sc, dev_addr, addr32, 0, byte_cnt,
3605
./drivers/net/bnx2x/elink.c:7479: elink_cl45_write(sc, phy,
3606
./drivers/net/bnx2x/elink.c:7485: elink_cl45_read(sc, phy,
3607
./drivers/net/bnx2x/elink.c:7489: elink_cl45_write(sc, phy,
3608
./drivers/net/bnx2x/elink.c:7495: elink_cl45_write(sc, phy,
3609
./drivers/net/bnx2x/elink.c:7499: elink_cl45_write(sc, phy,
3610
./drivers/net/bnx2x/elink.c:7504: elink_cl45_write(sc, phy,
3611
./drivers/net/bnx2x/elink.c:7514: elink_cl45_read(sc, phy,
3612
./drivers/net/bnx2x/elink.c:7533: elink_cl45_read(sc, phy,
3613
./drivers/net/bnx2x/elink.c:7541: elink_cl45_read(sc, phy,
3614
./drivers/net/bnx2x/elink.c:7875: elink_cl45_write(sc, phy,
3615
./drivers/net/bnx2x/elink.c:7885: elink_cl45_read(sc, phy,
3616
./drivers/net/bnx2x/elink.c:7892: elink_cl45_write(sc, phy,
3617
./drivers/net/bnx2x/elink.c:7906: elink_cl45_write(sc, phy,
3618
./drivers/net/bnx2x/elink.c:7908: elink_cl45_write(sc, phy,
3619
./drivers/net/bnx2x/elink.c:7910: elink_cl45_write(sc, phy,
3620
./drivers/net/bnx2x/elink.c:7913: elink_cl45_write(sc, phy,
3621
./drivers/net/bnx2x/elink.c:7925: elink_cl45_read(sc, phy,
3622
./drivers/net/bnx2x/elink.c:7929: elink_cl45_write(sc, phy,
3623
./drivers/net/bnx2x/elink.c:7934: elink_cl45_read(sc, phy,
3624
./drivers/net/bnx2x/elink.c:7937: elink_cl45_write(sc, phy,
3625
./drivers/net/bnx2x/elink.c:7942: elink_cl45_write(sc, phy,
3626
./drivers/net/bnx2x/elink.c:7965: elink_cl45_write(sc, phy,
3627
./drivers/net/bnx2x/elink.c:7968: elink_cl45_write(sc, phy,
3628
./drivers/net/bnx2x/elink.c:7970: elink_cl45_write(sc, phy,
3629
./drivers/net/bnx2x/elink.c:7973: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
3630
./drivers/net/bnx2x/elink.c:7983: elink_cl45_write(sc, phy,
3631
./drivers/net/bnx2x/elink.c:8019: elink_cb_gpio_write(sc, gpio_pin, gpio_mode, gpio_port);
3632
./drivers/net/bnx2x/elink.c:8100: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3633
./drivers/net/bnx2x/elink.c:8118: elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
3634
./drivers/net/bnx2x/elink.c:8121: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3635
./drivers/net/bnx2x/elink.c:8228: gpio_val = elink_cb_gpio_read(sc, gpio_num, gpio_port);
3636
./drivers/net/bnx2x/elink.c:8236: elink_cb_gpio_int_write(sc, gpio_num,
3637
./drivers/net/bnx2x/elink.c:8247: elink_cl45_read(sc, phy,
3638
./drivers/net/bnx2x/elink.c:8263: elink_cb_gpio_int_write(sc, gpio_num,
3639
./drivers/net/bnx2x/elink.c:8282: elink_cl45_read(sc, phy,
3640
./drivers/net/bnx2x/elink.c:8284: elink_cl45_read(sc, phy,
3641
./drivers/net/bnx2x/elink.c:8287: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val);
3642
./drivers/net/bnx2x/elink.c:8292: elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val);
3643
./drivers/net/bnx2x/elink.c:8307: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
3644
./drivers/net/bnx2x/elink.c:8313: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
3645
./drivers/net/bnx2x/elink.c:8314: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
3646
./drivers/net/bnx2x/elink.c:8317: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
3647
./drivers/net/bnx2x/elink.c:8318: elink_cl45_read(sc, phy,
3648
./drivers/net/bnx2x/elink.c:8320: elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
3649
./drivers/net/bnx2x/elink.c:8321: elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
3650
./drivers/net/bnx2x/elink.c:8340: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
3651
./drivers/net/bnx2x/elink.c:8342: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
3652
./drivers/net/bnx2x/elink.c:8362: elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
3653
./drivers/net/bnx2x/elink.c:8366: elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
3654
./drivers/net/bnx2x/elink.c:8371: elink_cl45_read(sc, phy,
3655
./drivers/net/bnx2x/elink.c:8386: elink_cl45_read(sc, phy, MDIO_XS_DEVAD, reg, &val);
3656
./drivers/net/bnx2x/elink.c:8393: elink_cl45_write(sc, phy, MDIO_XS_DEVAD, reg, val);
3657
./drivers/net/bnx2x/elink.c:8400: elink_cl45_write(sc, phy,
3658
./drivers/net/bnx2x/elink.c:8403: elink_cl45_write(sc, phy,
3659
./drivers/net/bnx2x/elink.c:8406: elink_cl45_write(sc, phy,
3660
./drivers/net/bnx2x/elink.c:8413: elink_cl45_write(sc, phy,
3661
./drivers/net/bnx2x/elink.c:8417: elink_cl45_write(sc, phy,
3662
./drivers/net/bnx2x/elink.c:8420: elink_cl45_write(sc, phy,
3663
./drivers/net/bnx2x/elink.c:8423: elink_cl45_write(sc, phy,
3664
./drivers/net/bnx2x/elink.c:8427: elink_cl45_write(sc, phy,
3665
./drivers/net/bnx2x/elink.c:8429: elink_cl45_write(sc, phy,
3666
./drivers/net/bnx2x/elink.c:8431: elink_cl45_write(sc, phy,
3667
./drivers/net/bnx2x/elink.c:8448: elink_cl45_read(sc, phy,
3668
./drivers/net/bnx2x/elink.c:8452: elink_cl45_write(sc, phy,
3669
./drivers/net/bnx2x/elink.c:8475: elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001);
3670
./drivers/net/bnx2x/elink.c:8486: elink_cl45_write(sc, phy,
3671
./drivers/net/bnx2x/elink.c:8490: elink_cl45_write(sc, phy,
3672
./drivers/net/bnx2x/elink.c:8495: elink_cl45_write(sc, phy,
3673
./drivers/net/bnx2x/elink.c:8498: elink_cl45_write(sc, phy,
3674
./drivers/net/bnx2x/elink.c:8507: elink_cl45_write(sc, phy,
3675
./drivers/net/bnx2x/elink.c:8522: elink_cl45_read(sc, phy,
3676
./drivers/net/bnx2x/elink.c:8541: elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1 << 15);
3677
./drivers/net/bnx2x/elink.c:8555: elink_cl45_write(sc, phy,
3678
./drivers/net/bnx2x/elink.c:8557: elink_cl45_write(sc, phy,
3679
./drivers/net/bnx2x/elink.c:8559: elink_cl45_write(sc, phy,
3680
./drivers/net/bnx2x/elink.c:8561: elink_cl45_write(sc, phy,
3681
./drivers/net/bnx2x/elink.c:8572: elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20);
3682
./drivers/net/bnx2x/elink.c:8573: elink_cl45_write(sc, phy,
3683
./drivers/net/bnx2x/elink.c:8575: elink_cl45_write(sc, phy,
3684
./drivers/net/bnx2x/elink.c:8577: elink_cl45_write(sc, phy,
3685
./drivers/net/bnx2x/elink.c:8579: elink_cl45_write(sc, phy,
3686
./drivers/net/bnx2x/elink.c:8584: elink_cl45_write(sc, phy,
3687
./drivers/net/bnx2x/elink.c:8586: elink_cl45_write(sc, phy,
3688
./drivers/net/bnx2x/elink.c:8590: elink_cl45_write(sc, phy,
3689
./drivers/net/bnx2x/elink.c:8600: elink_cl45_write(sc, phy,
3690
./drivers/net/bnx2x/elink.c:8605: elink_cl45_write(sc, phy,
3691
./drivers/net/bnx2x/elink.c:8621: elink_cl45_write(sc, phy,
3692
./drivers/net/bnx2x/elink.c:8654: elink_cl45_read(sc, phy,
3693
./drivers/net/bnx2x/elink.c:8658: elink_cl45_write(sc, phy,
3694
./drivers/net/bnx2x/elink.c:8660: elink_cl45_read(sc, phy,
3695
./drivers/net/bnx2x/elink.c:8664: elink_cl45_write(sc, phy,
3696
./drivers/net/bnx2x/elink.c:8680: elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1,
3697
./drivers/net/bnx2x/elink.c:8693: elink_cl45_write(sc, phy,
3698
./drivers/net/bnx2x/elink.c:8695: elink_cl45_write(sc, phy,
3699
./drivers/net/bnx2x/elink.c:8697: elink_cl45_read(sc, phy,
3700
./drivers/net/bnx2x/elink.c:8704: elink_cl45_read(sc, phy,
3701
./drivers/net/bnx2x/elink.c:8708: elink_cl45_write(sc, phy,
3702
./drivers/net/bnx2x/elink.c:8720: elink_cl45_write(sc, phy,
3703
./drivers/net/bnx2x/elink.c:8722: elink_cl45_write(sc, phy,
3704
./drivers/net/bnx2x/elink.c:8728: elink_cl45_write(sc, phy,
3705
./drivers/net/bnx2x/elink.c:8731: elink_cl45_write(sc, phy,
3706
./drivers/net/bnx2x/elink.c:8733: elink_cl45_write(sc, phy,
3707
./drivers/net/bnx2x/elink.c:8735: elink_cl45_write(sc, phy,
3708
./drivers/net/bnx2x/elink.c:8759: elink_cl45_read(sc, phy,
3709
./drivers/net/bnx2x/elink.c:8768: elink_cl45_write(sc, phy,
3710
./drivers/net/bnx2x/elink.c:8776: elink_cl45_read(sc, phy,
3711
./drivers/net/bnx2x/elink.c:8779: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
3712
./drivers/net/bnx2x/elink.c:8788: elink_cl45_write(sc, phy,
3713
./drivers/net/bnx2x/elink.c:8792: elink_cl45_write(sc, phy,
3714
./drivers/net/bnx2x/elink.c:8809: elink_cl45_read(sc, phy,
3715
./drivers/net/bnx2x/elink.c:8814: elink_cl45_write(sc, phy,
3716
./drivers/net/bnx2x/elink.c:8817: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
3717
./drivers/net/bnx2x/elink.c:8819: elink_cl45_write(sc, phy, MDIO_PMA_DEVAD,
3718
./drivers/net/bnx2x/elink.c:8835: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
3719
./drivers/net/bnx2x/elink.c:8852: elink_cl45_write(sc, phy,
3720
./drivers/net/bnx2x/elink.c:8859: elink_cl45_read(sc, phy,
3721
./drivers/net/bnx2x/elink.c:8876: elink_cl45_write(sc, phy,
3722
./drivers/net/bnx2x/elink.c:8885: elink_cl45_read(sc, phy,
3723
./drivers/net/bnx2x/elink.c:8917: elink_cl45_read(sc, phy,
3724
./drivers/net/bnx2x/elink.c:8923: elink_cl45_read(sc, phy,
3725
./drivers/net/bnx2x/elink.c:8931: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
3726
./drivers/net/bnx2x/elink.c:8936: elink_cl45_read(sc, phy,
3727
./drivers/net/bnx2x/elink.c:8944: elink_cl45_read(sc, phy,
3728
./drivers/net/bnx2x/elink.c:8962: elink_cl45_write(sc, phy,
3729
./drivers/net/bnx2x/elink.c:8966: elink_cl45_read(sc, phy,
3730
./drivers/net/bnx2x/elink.c:8971: elink_cl45_write(sc, phy,
3731
./drivers/net/bnx2x/elink.c:8975: elink_cl45_read(sc, phy,
3732
./drivers/net/bnx2x/elink.c:8988: elink_cl45_write(sc, phy,
3733
./drivers/net/bnx2x/elink.c:9001: elink_cl45_read(sc, phy,
3734
./drivers/net/bnx2x/elink.c:9026: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
3735
./drivers/net/bnx2x/elink.c:9029: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
3736
./drivers/net/bnx2x/elink.c:9045: elink_cl45_read(sc, phy,
3737
./drivers/net/bnx2x/elink.c:9055: elink_cl45_write(sc, phy,
3738
./drivers/net/bnx2x/elink.c:9073: elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0);
3739
./drivers/net/bnx2x/elink.c:9095: elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
3740
./drivers/net/bnx2x/elink.c:9102: elink_cl45_write(sc, phy, reg_set[i].devad,
3741
./drivers/net/bnx2x/elink.c:9106: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xA818, &val);
3742
./drivers/net/bnx2x/elink.c:9119: elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
3743
./drivers/net/bnx2x/elink.c:9120: elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
3744
./drivers/net/bnx2x/elink.c:9121: elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
3745
./drivers/net/bnx2x/elink.c:9123: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xA818, &val);
3746
./drivers/net/bnx2x/elink.c:9136: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
3747
./drivers/net/bnx2x/elink.c:9138: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
3748
./drivers/net/bnx2x/elink.c:9159: elink_cl45_read(sc, phy,
3749
./drivers/net/bnx2x/elink.c:9164: elink_cl45_write(sc, phy,
3750
./drivers/net/bnx2x/elink.c:9168: elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
3751
./drivers/net/bnx2x/elink.c:9178: elink_cl45_read_or_write(sc, phy,
3752
./drivers/net/bnx2x/elink.c:9215: elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
3753
./drivers/net/bnx2x/elink.c:9218: elink_cl45_read(sc, phy,
3754
./drivers/net/bnx2x/elink.c:9223: elink_cl45_read(sc, phy,
3755
./drivers/net/bnx2x/elink.c:9226: elink_cl45_read(sc, phy,
3756
./drivers/net/bnx2x/elink.c:9246: elink_cl45_write(sc, phy,
3757
./drivers/net/bnx2x/elink.c:9293: elink_cl45_write(sc, phy,
3758
./drivers/net/bnx2x/elink.c:9304: elink_cl45_write(sc, phy,
3759
./drivers/net/bnx2x/elink.c:9310: elink_cl45_write(sc, phy,
3760
./drivers/net/bnx2x/elink.c:9323: elink_cl45_write(sc, phy,
3761
./drivers/net/bnx2x/elink.c:9334: elink_cl45_read_or_write(sc, phy,
3762
./drivers/net/bnx2x/elink.c:9338: elink_cl45_write(sc, phy,
3763
./drivers/net/bnx2x/elink.c:9341: elink_cl45_write(sc, phy,
3764
./drivers/net/bnx2x/elink.c:9354: elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
3765
./drivers/net/bnx2x/elink.c:9361: elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1 << 15);
3766
./drivers/net/bnx2x/elink.c:9376: elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
3767
./drivers/net/bnx2x/elink.c:9380: elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
3768
./drivers/net/bnx2x/elink.c:9393: elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
3769
./drivers/net/bnx2x/elink.c:9397: elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
3770
./drivers/net/bnx2x/elink.c:9400: elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
3771
./drivers/net/bnx2x/elink.c:9414: elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
3772
./drivers/net/bnx2x/elink.c:9418: elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
3773
./drivers/net/bnx2x/elink.c:9511: elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
3774
./drivers/net/bnx2x/elink.c:9514: elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
3775
./drivers/net/bnx2x/elink.c:9524: elink_cb_gpio_mult_write(sc, reset_gpios,
3776
./drivers/net/bnx2x/elink.c:9589: elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_3,
3777
./drivers/net/bnx2x/elink.c:9593: elink_cl45_write(sc, phy,
3778
./drivers/net/bnx2x/elink.c:9614: elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
3779
./drivers/net/bnx2x/elink.c:9653: elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
3780
./drivers/net/bnx2x/elink.c:9689: elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
3781
./drivers/net/bnx2x/elink.c:9695: elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
3782
./drivers/net/bnx2x/elink.c:9699: elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
3783
./drivers/net/bnx2x/elink.c:9731: elink_cl45_read_and_write(sc, phy,
3784
./drivers/net/bnx2x/elink.c:9750: elink_cl45_read(sc, phy, MDIO_AN_DEVAD, 0xFFFA, &val1);
3785
./drivers/net/bnx2x/elink.c:9751: elink_cl45_read(sc, phy,
3786
./drivers/net/bnx2x/elink.c:9765: elink_cl45_write(sc, phy,
3787
./drivers/net/bnx2x/elink.c:9770: elink_cl45_read(sc, phy,
3788
./drivers/net/bnx2x/elink.c:9791: elink_cl45_read(sc, phy,
3789
./drivers/net/bnx2x/elink.c:9810: elink_cl45_read(sc, phy,
3790
./drivers/net/bnx2x/elink.c:9817: elink_cl45_read(sc, phy,
3791
./drivers/net/bnx2x/elink.c:9832: elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
3792
./drivers/net/bnx2x/elink.c:9850: elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
3793
./drivers/net/bnx2x/elink.c:9860: elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
3794
./drivers/net/bnx2x/elink.c:9889: elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_1,
3795
./drivers/net/bnx2x/elink.c:9891: elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_1,
3796
./drivers/net/bnx2x/elink.c:9898: elink_cl45_write(params->sc, phy,
3797
./drivers/net/bnx2x/elink.c:9900: elink_cl45_write(params->sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1);
3798
./drivers/net/bnx2x/elink.c:9916: elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_3,
3799
./drivers/net/bnx2x/elink.c:9919: elink_cl45_read(sc, phy,
3800
./drivers/net/bnx2x/elink.c:9923: elink_cl45_write(sc, phy,
3801
./drivers/net/bnx2x/elink.c:9950: elink_cl45_write(sc, phy,
3802
./drivers/net/bnx2x/elink.c:9954: elink_cl45_write(sc, phy,
3803
./drivers/net/bnx2x/elink.c:9958: elink_cl45_write(sc, phy,
3804
./drivers/net/bnx2x/elink.c:9962: elink_cl45_write(sc, phy,
3805
./drivers/net/bnx2x/elink.c:9967: elink_cl45_write(sc, phy,
3806
./drivers/net/bnx2x/elink.c:9980: elink_cl45_write(sc, phy,
3807
./drivers/net/bnx2x/elink.c:9984: elink_cl45_write(sc, phy,
3808
./drivers/net/bnx2x/elink.c:9988: elink_cl45_write(sc, phy,
3809
./drivers/net/bnx2x/elink.c:9992: elink_cl45_write(sc, phy,
3810
./drivers/net/bnx2x/elink.c:9997: elink_cl45_write(sc, phy,
3811
./drivers/net/bnx2x/elink.c:10015: elink_cl45_write(sc, phy,
3812
./drivers/net/bnx2x/elink.c:10029: elink_cl45_read(sc, phy,
3813
./drivers/net/bnx2x/elink.c:10035: elink_cl45_write(sc, phy,
3814
./drivers/net/bnx2x/elink.c:10040: elink_cl45_write(sc, phy,
3815
./drivers/net/bnx2x/elink.c:10044: elink_cl45_write(sc, phy,
3816
./drivers/net/bnx2x/elink.c:10048: elink_cl45_write(sc, phy,
3817
./drivers/net/bnx2x/elink.c:10052: elink_cl45_write(sc, phy,
3818
./drivers/net/bnx2x/elink.c:10056: elink_cl45_write(sc, phy,
3819
./drivers/net/bnx2x/elink.c:10074: elink_cl45_write(sc, phy,
3820
./drivers/net/bnx2x/elink.c:10090: elink_cl45_read(sc, phy,
3821
./drivers/net/bnx2x/elink.c:10100: elink_cl45_write(sc, phy,
3822
./drivers/net/bnx2x/elink.c:10107: elink_cl45_write(sc, phy,
3823
./drivers/net/bnx2x/elink.c:10111: elink_cl45_write(sc, phy,
3824
./drivers/net/bnx2x/elink.c:10115: elink_cl45_write(sc, phy,
3825
./drivers/net/bnx2x/elink.c:10119: elink_cl45_write(sc, phy,
3826
./drivers/net/bnx2x/elink.c:10132: elink_cl45_write(sc, phy,
3827
./drivers/net/bnx2x/elink.c:10137: elink_cl45_read(sc, phy,
3828
./drivers/net/bnx2x/elink.c:10142: elink_cl45_write(sc, phy,
3829
./drivers/net/bnx2x/elink.c:10149: elink_cl45_write(sc, phy,
3830
./drivers/net/bnx2x/elink.c:10168: elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
3831
./drivers/net/bnx2x/elink.c:10186: elink_cl22_write(sc, phy,
3832
./drivers/net/bnx2x/elink.c:10189: elink_cl22_read(sc, phy, MDIO_REG_GPHY_SHADOW, &temp);
3833
./drivers/net/bnx2x/elink.c:10192: elink_cl22_write(sc, phy,
3834
./drivers/net/bnx2x/elink.c:10196: elink_cl22_write(sc, phy,
3835
./drivers/net/bnx2x/elink.c:10234: elink_cl22_write(sc, phy, MDIO_PMA_REG_CTRL, 0x8000);
3836
./drivers/net/bnx2x/elink.c:10242: elink_cl22_write(sc, phy,
3837
./drivers/net/bnx2x/elink.c:10245: elink_cl22_read(sc, phy, MDIO_REG_GPHY_SHADOW, &temp);
3838
./drivers/net/bnx2x/elink.c:10247: elink_cl22_write(sc, phy,
3839
./drivers/net/bnx2x/elink.c:10264: elink_cl22_read(sc, phy, 0x09, &an_1000_val);
3840
./drivers/net/bnx2x/elink.c:10266: elink_cl22_read(sc, phy, 0x04, &an_10_100_val);
3841
./drivers/net/bnx2x/elink.c:10268: elink_cl22_read(sc, phy, MDIO_PMA_REG_CTRL, &autoneg_val);
3842
./drivers/net/bnx2x/elink.c:10289: elink_cl22_write(sc, phy, 0x09, an_1000_val);
3843
./drivers/net/bnx2x/elink.c:10290: elink_cl22_read(sc, phy, 0x09, &an_1000_val);
3844
./drivers/net/bnx2x/elink.c:10324: elink_cl22_write(sc, phy, 0x18, (1 << 15 | 1 << 9 | 7 << 0));
3845
./drivers/net/bnx2x/elink.c:10329: elink_cl22_write(sc, phy, 0x18, (1 << 15 | 1 << 9 | 7 << 0));
3846
./drivers/net/bnx2x/elink.c:10336: elink_cl22_write(sc, phy, MDIO_REG_GPHY_EXP_ACCESS,
3847
./drivers/net/bnx2x/elink.c:10339: elink_cl22_read(sc, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp);
3848
./drivers/net/bnx2x/elink.c:10341: elink_cl22_write(sc, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp);
3849
./drivers/net/bnx2x/elink.c:10376: elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
3850
./drivers/net/bnx2x/elink.c:10381: elink_cl22_write(sc, phy, 0x04, an_10_100_val | fc_val);
3851
./drivers/net/bnx2x/elink.c:10386: elink_cl22_write(sc, phy, MDIO_PMA_REG_CTRL, autoneg_val);
3852
./drivers/net/bnx2x/elink.c:10397: elink_cl22_write(sc, phy,
3853
./drivers/net/bnx2x/elink.c:10399: elink_cl22_read(sc, phy, MDIO_REG_GPHY_SHADOW, &temp);
3854
./drivers/net/bnx2x/elink.c:10417: elink_cl22_write(sc, phy,
3855
./drivers/net/bnx2x/elink.c:10433: elink_cl22_write(sc, phy, MDIO_PMA_REG_CTRL, 0x800);
3856
./drivers/net/bnx2x/elink.c:10459: elink_cl22_read(sc, phy, MDIO_REG_GPHY_AUX_STATUS, &legacy_status);
3857
./drivers/net/bnx2x/elink.c:10463: elink_cl22_read(sc, phy, MDIO_REG_INTR_STATUS, &val);
3858
./drivers/net/bnx2x/elink.c:10497: elink_cl22_read(sc, phy, 0x01, &val);
3859
./drivers/net/bnx2x/elink.c:10501: elink_cl22_read(sc, phy, 0x06, &val);
3860
./drivers/net/bnx2x/elink.c:10513: elink_cl22_read(sc, phy, 0x5, &val);
3861
./drivers/net/bnx2x/elink.c:10531: elink_cl22_read(sc, phy, 0xa, &val);
3862
./drivers/net/bnx2x/elink.c:10558: elink_cl22_write(sc, phy, 0x09, 3 << 11);
3863
./drivers/net/bnx2x/elink.c:10565: elink_cl22_read(sc, phy, 0x00, &val);
3864
./drivers/net/bnx2x/elink.c:10568: elink_cl22_write(sc, phy, 0x00, val);
3865
./drivers/net/bnx2x/elink.c:10574: elink_cl22_write(sc, phy, 0x18, 7);
3866
./drivers/net/bnx2x/elink.c:10575: elink_cl22_read(sc, phy, 0x18, &val);
3867
./drivers/net/bnx2x/elink.c:10576: elink_cl22_write(sc, phy, 0x18, val | (1 << 10) | (1 << 15));
3868
./drivers/net/bnx2x/elink.c:10595: elink_cl45_write(sc, phy,
3869
./drivers/net/bnx2x/elink.c:10608: elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
3870
./drivers/net/bnx2x/elink.c:10614: elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1);
3871
./drivers/net/bnx2x/elink.c:10616: elink_cl45_write(sc, phy,
3872
./drivers/net/bnx2x/elink.c:10621: elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val);
3873
./drivers/net/bnx2x/elink.c:10623: elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val);
3874
./drivers/net/bnx2x/elink.c:10626: elink_cl45_read(sc, phy,
3875
./drivers/net/bnx2x/elink.c:10629: elink_cl45_read(sc, phy,
3876
./drivers/net/bnx2x/elink.c:10644: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
3877
./drivers/net/bnx2x/elink.c:10645: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
3878
./drivers/net/bnx2x/elink.c:10647: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
3879
./drivers/net/bnx2x/elink.c:10648: elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
3880
./drivers/net/bnx2x/elink.c:10653: elink_cl45_read(sc, phy,
3881
./drivers/net/bnx2x/elink.c:10689: elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_2,
3882
./drivers/net/bnx2x/elink.c:10692: elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_1,
3883
./drivers/net/bnx2x/elink.c:10713: elink_cl45_write(sc, phy,
3884
./drivers/net/bnx2x/elink.c:12512: elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
3885
./drivers/net/bnx2x/elink.c:12516: elink_cl45_write(sc, &phy[port],
3886
./drivers/net/bnx2x/elink.c:12545: elink_cl45_read(sc, phy_blk[port],
3887
./drivers/net/bnx2x/elink.c:12550: elink_cl45_write(sc, phy_blk[port],
3888
./drivers/net/bnx2x/elink.c:12564: elink_cl45_read(sc, phy_blk[port],
3889
./drivers/net/bnx2x/elink.c:12568: elink_cl45_write(sc, phy_blk[port],
3890
./drivers/net/bnx2x/elink.c:12575: elink_cl45_read(sc, phy_blk[port],
3891
./drivers/net/bnx2x/elink.c:12578: elink_cl45_write(sc, phy_blk[port],
3892
./drivers/net/bnx2x/elink.c:12583: elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
3893
./drivers/net/bnx2x/elink.c:12627: elink_cl45_write(sc, &phy,
3894
./drivers/net/bnx2x/elink.c:12631: elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_0,
3895
./drivers/net/bnx2x/elink.c:12715: elink_cb_gpio_write(sc, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
3896
./drivers/net/bnx2x/elink.c:12718: elink_cb_gpio_write(sc, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
3897
./drivers/net/bnx2x/elink.c:12754: elink_cl45_write(sc, &phy[port],
3898
./drivers/net/bnx2x/elink.c:12779: elink_cl45_write(sc, phy_blk[port],
3899
./drivers/net/bnx2x/elink.c:12795: elink_cb_gpio_mult_write(sc, reset_gpios,
3900
./drivers/net/bnx2x/elink.c:12798: elink_cb_gpio_mult_write(sc, reset_gpios,
3901
./drivers/net/bnx2x/elink.c:13162: elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
3902
./drivers/net/bnx2x/elink.c:13164: elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
3903
./drivers/net/bnx2x/elink.c:13325: elink_cb_gpio_write(sc, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z,
3904
./drivers/net/bnx2x/bnx2x_rxtx.c:88: rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue),
3905
./drivers/net/bnx2x/bnx2x_rxtx.c:139: rxq->sw_ring = rte_zmalloc_socket("sw_ring", dma_size,
3906
./drivers/net/bnx2x/elink.h:32:extern uint32_t elink_cb_reg_read(struct bnx2x_softc *sc, uint32_t reg_addr);
3907
./drivers/net/bnx2x/elink.h:33:extern void elink_cb_reg_write(struct bnx2x_softc *sc, uint32_t reg_addr, uint32_t val);
3908
./drivers/net/bnx2x/elink.h:36:extern uint8_t elink_cb_gpio_write(struct bnx2x_softc *sc,
3909
./drivers/net/bnx2x/elink.h:39:extern uint8_t elink_cb_gpio_mult_write(struct bnx2x_softc *sc,
3910
./drivers/net/bnx2x/elink.h:43:extern uint32_t elink_cb_gpio_read(struct bnx2x_softc *sc, uint16_t gpio_num, uint8_t port);
3911
./drivers/net/bnx2x/elink.h:44:extern uint8_t elink_cb_gpio_int_write(struct bnx2x_softc *sc,
3912
./drivers/net/e1000/base/e1000_vf.c:562: if (mbx->ops.read(hw, &in_msg, 1, 0))
3913
./drivers/net/e1000/base/e1000_82540.c:359: * read (flush). This is to protect against some strange
3914
./drivers/net/e1000/base/e1000_82540.c:505: ret_val = hw->nvm.ops.read(hw, NVM_SERDES_AMPLITUDE, 1, &nvm_data);
3915
./drivers/net/e1000/base/e1000_82540.c:597: ret_val = hw->nvm.ops.read(hw, NVM_PHY_CLASS_WORD, 1, &nvm_data);
3916
./drivers/net/e1000/base/e1000_82540.c:699: ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
3917
./drivers/net/e1000/base/e1000_api.h:114:s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
3918
./drivers/net/e1000/base/e1000_mbx.c:85: ret_val = mbx->ops.read(hw, msg, size, mbx_id);
3919
./drivers/net/e1000/base/e1000_mbx.c:110: ret_val = mbx->ops.write(hw, msg, size, mbx_id);
3920
./drivers/net/e1000/base/e1000_mbx.c:261: ret_val = mbx->ops.read(hw, msg, size, mbx_id);
3921
./drivers/net/e1000/base/e1000_mbx.c:288: ret_val = mbx->ops.write(hw, msg, size, mbx_id);
3922
./drivers/net/e1000/base/e1000_82571.c:1459: ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &data);
3923
./drivers/net/e1000/base/e1000_82571.c:1816: ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3924
./drivers/net/e1000/base/e1000_82571.c:1908: ret_val = nvm->ops.read(hw, 0x10, 1, &data);
3925
./drivers/net/e1000/base/e1000_82571.c:1920: ret_val = nvm->ops.read(hw, 0x23, 1, &data);
3926
./drivers/net/e1000/base/e1000_82571.c:1926: ret_val = nvm->ops.write(hw, 0x23, 1, &data);
3927
./drivers/net/e1000/base/e1000_82543.c:1030: ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &data);
3928
./drivers/net/e1000/base/e1000_82542.c:576: ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
3929
./drivers/net/e1000/base/e1000_api.c:896:s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
3930
./drivers/net/e1000/base/e1000_api.c:1273: return hw->nvm.ops.read(hw, offset, words, data);
3931
./drivers/net/e1000/base/e1000_api.c:1291: return hw->nvm.ops.write(hw, offset, words, data);
3932
./drivers/net/e1000/base/e1000_i210.c:826: ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3933
./drivers/net/e1000/base/e1000_82575.c:1685: ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
3934
./drivers/net/e1000/base/e1000_82575.c:1930: ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3935
./drivers/net/e1000/base/e1000_82575.c:2435: ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
3936
./drivers/net/e1000/base/e1000_82575.c:2588: ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
3937
./drivers/net/e1000/base/e1000_82575.c:2625: ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
3938
./drivers/net/e1000/base/e1000_82575.c:2633: ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
3939
./drivers/net/e1000/base/e1000_82575.c:2659: ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
3940
./drivers/net/e1000/base/e1000_82575.c:2699: ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
3941
./drivers/net/e1000/base/e1000_82575.c:2708: ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
3942
./drivers/net/e1000/base/e1000_nvm.c:792: ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
3943
./drivers/net/e1000/base/e1000_nvm.c:798: ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
3944
./drivers/net/e1000/base/e1000_nvm.c:843: ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
3945
./drivers/net/e1000/base/e1000_nvm.c:864: ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data);
3946
./drivers/net/e1000/base/e1000_nvm.c:899: ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
3947
./drivers/net/e1000/base/e1000_nvm.c:905: ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
3948
./drivers/net/e1000/base/e1000_nvm.c:917: ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
3949
./drivers/net/e1000/base/e1000_nvm.c:951: ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
3950
./drivers/net/e1000/base/e1000_nvm.c:961: ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
3951
./drivers/net/e1000/base/e1000_nvm.c:1205: ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
3952
./drivers/net/e1000/base/e1000_nvm.c:1238: ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
3953
./drivers/net/e1000/base/e1000_nvm.c:1246: ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
3954
./drivers/net/e1000/base/e1000_nvm.c:1298: hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
3955
./drivers/net/e1000/base/e1000_nvm.c:1303: hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
3956
./drivers/net/e1000/base/e1000_nvm.c:1319: hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
3957
./drivers/net/e1000/base/e1000_nvm.c:1321: hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
3958
./drivers/net/e1000/base/e1000_nvm.c:1325: hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
3959
./drivers/net/e1000/base/e1000_nvm.c:1327: hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
3960
./drivers/net/e1000/base/e1000_nvm.c:1347: hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
3961
./drivers/net/e1000/base/e1000_nvm.c:1350: hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
3962
./drivers/net/e1000/base/e1000_nvm.c:1372: hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
3963
./drivers/net/e1000/base/e1000_nvm.c:1373: hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
3964
./drivers/net/e1000/base/e1000_nvm.c:1377: hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
3965
./drivers/net/e1000/base/e1000_nvm.c:1378: hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
3966
./drivers/net/e1000/base/e1000_mac.c:403: ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data);
3967
./drivers/net/e1000/base/e1000_mac.c:417: ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
3968
./drivers/net/e1000/base/e1000_mac.c:438: ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
3969
./drivers/net/e1000/base/e1000_mac.c:959: ret_val = hw->nvm.ops.read(hw,
3970
./drivers/net/e1000/base/e1000_mac.c:964: ret_val = hw->nvm.ops.read(hw,
3971
./drivers/net/e1000/base/e1000_mac.c:1815: ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3972
./drivers/net/e1000/base/e1000_82541.c:180: ret_val = nvm->ops.read(hw, NVM_CFG, 1, &size);
3973
./drivers/net/e1000/base/e1000_82541.c:405: * read (flush). This is to protect against some strange
3974
./drivers/net/e1000/base/e1000_ich8lan.c:2140: ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
3975
./drivers/net/e1000/base/e1000_ich8lan.c:2145: ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
3976
./drivers/net/e1000/base/e1000_ich8lan.c:3769: ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3977
./drivers/net/e1000/base/e1000_ich8lan.c:3775: ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3978
./drivers/net/e1000/base/e1000_ich8lan.c:4042: ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3979
./drivers/net/e1000/igb_ethdev.c:4669: return nvm->ops.read(hw, first, length, data);
3980
./drivers/net/e1000/igb_ethdev.c:4692: return nvm->ops.write(hw, first, length, data);
3981
./drivers/net/ring/rte_eth_ring.c:288: data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
3982
./drivers/net/ring/rte_eth_ring.c:294: data->rx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_rx_queues,
3983
./drivers/net/ring/rte_eth_ring.c:301: data->tx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_tx_queues,
3984
./drivers/net/ring/rte_eth_ring.c:308: internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
3985
./drivers/net/cxgbe/cxgbe_main.c:1154: data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
3986
./drivers/net/cxgbe/base/t4_hw.c:817:static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3987
./drivers/net/cxgbe/base/t4_hw.c:846:static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3988
./drivers/net/cxgbe/base/t4_hw.c:883: ret = sf1_write(adapter, 4, 1, 0, addr);
3989
./drivers/net/cxgbe/base/t4_hw.c:887: ret = sf1_read(adapter, 1, 1, 0, data);
3990
./drivers/net/cxgbe/base/t4_hw.c:892: ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3991
./drivers/net/cxgbe/base/t4_hw.c:2291: ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
3992
./drivers/net/cxgbe/base/t4_hw.c:2293: ret = sf1_read(adapter, 3, 0, 1, &info);
3993
./drivers/net/cxgbe/sge.c:1289: s = rte_zmalloc_socket(z_name_sw, nelem * sw_size,
3994
./drivers/net/af_packet/rte_eth_af_packet.c:225: if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1)
3995
./drivers/net/af_packet/rte_eth_af_packet.c:415: *sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
3996
./drivers/net/af_packet/rte_eth_af_packet.c:471: data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
3997
./drivers/net/af_packet/rte_eth_af_packet.c:475: *internals = rte_zmalloc_socket(name, sizeof(**internals),
3998
./drivers/net/af_packet/rte_eth_af_packet.c:533: qsockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
3999
./drivers/net/af_packet/rte_eth_af_packet.c:606: rx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
4000
./drivers/net/af_packet/rte_eth_af_packet.c:620: tx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
4001
./drivers/net/af_packet/rte_eth_af_packet.c:629: rc = bind(qsockfd, (const struct sockaddr*)&sockaddr, sizeof(sockaddr));
4002
./drivers/net/virtio/virtio_ethdev.c:323: vq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
4003
./app/test-pipeline/runtime.c:113: worker_mbuf = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
4004
./app/test-acl/main.c:502: config.traces = rte_zmalloc_socket(name, sz, RTE_CACHE_LINE_SIZE,
4005
./app/test/test_func_reentrancy.c:90: while (rte_atomic32_read(&synchro) == 0); \
4006
./app/test/test_atomic.c:73: * (rte_atomic32_read(&val) == 0)`` which is triggered by the main test
4007
./app/test/test_atomic.c:112: while (rte_atomic32_read(&synchro) == 0)
4008
./app/test/test_atomic.c:148: while (rte_atomic32_read(&synchro) == 0)
4009
./app/test/test_atomic.c:169: while (rte_atomic32_read(&synchro) == 0)
4010
./app/test/test_atomic.c:208: while (rte_atomic32_read(&synchro) == 0)
4011
./app/test/test_atomic.c:235: while (rte_atomic32_read(&synchro) == 0)
4012
./app/test/test_atomic.c:270: if (rte_atomic16_read(&a16) != 1UL << 10) {
4013
./app/test/test_atomic.c:275: if (rte_atomic32_read(&a32) != 1UL << 10) {
4014
./app/test/test_atomic.c:280: if (rte_atomic64_read(&a64) != 1ULL << 33) {
4015
./app/test/test_atomic.c:296: if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) {
4016
./app/test/test_atomic.c:313: if (rte_atomic64_read(&count) != 0) {
4017
./app/test/test_atomic.c:347: if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) {
4018
./app/test/test_atomic.c:369: if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) {
4019
./app/test/test_sched.c:127: rte_sched_port_pkt_write(mbuf, SUBPORT, PIPE, TC, QUEUE, e_RTE_METER_YELLOW);
4020
./app/test/test_pmd_perf.c:585: rte_calloc_socket("poll_burst",
4021
./app/test/test_pmd_perf.c:596: while (!rte_atomic64_read(&start))
4022
./app/test/test_pmd_perf.c:794: rte_calloc_socket("tx_buff",
4023
./app/test/test_malloc.c:306: void *p1 = rte_malloc_socket("stats", size , align, socket);
4024
./app/test/test_malloc.c:327: void *p2 = rte_malloc_socket("add", size ,align, socket);
4025
./app/test/test_malloc.c:332: void *p3 = rte_malloc_socket("add2", size,align, socket);
4026
./app/test/test_malloc.c:743:is_mem_on_socket(int32_t socket)
4027
./app/test/test_malloc.c:760:addr_to_socket(void * addr)
4028
./app/test/test_malloc.c:774:/* Test using rte_[c|m|zm]alloc_socket() on a specific socket */
4029
./app/test/test_malloc.c:776:test_alloc_single_socket(int32_t socket)
4030
./app/test/test_malloc.c:785: /* Test rte_calloc_socket() */
4031
./app/test/test_malloc.c:786: mem = rte_calloc_socket(type, size, sizeof(char), align, socket);
4032
./app/test/test_malloc.c:789: if (addr_to_socket(mem) != desired_socket) {
4033
./app/test/test_malloc.c:795: /* Test rte_malloc_socket() */
4034
./app/test/test_malloc.c:796: mem = rte_malloc_socket(type, size, align, socket);
4035
./app/test/test_malloc.c:799: if (addr_to_socket(mem) != desired_socket) {
4036
./app/test/test_malloc.c:804: /* Test rte_zmalloc_socket() */
4037
./app/test/test_malloc.c:805: mem = rte_zmalloc_socket(type, size, align, socket);
4038
./app/test/test_malloc.c:808: if (addr_to_socket(mem) != desired_socket) {
4039
./app/test/test_malloc.c:818:test_alloc_socket(void)
4040
./app/test/test_malloc.c:823: if (test_alloc_single_socket(SOCKET_ID_ANY) < 0)
4041
./app/test/test_malloc.c:827: if (is_mem_on_socket(i)) {
4042
./app/test/test_malloc.c:829: if (test_alloc_single_socket(i) < 0) {
4043
./app/test/test_malloc.c:830: printf("Fail: rte_malloc_socket(..., %u) did not succeed\n",
4044
./app/test/test_malloc.c:836: if (test_alloc_single_socket(i) == 0) {
4045
./app/test/test_malloc.c:837: printf("Fail: rte_malloc_socket(..., %u) succeeded\n",
4046
./app/test/test_malloc.c:944: ret = test_alloc_socket();
4047
./app/test/test_malloc.c:946: printf("test_alloc_socket() failed\n");
4048
./app/test/test_malloc.c:949: else printf("test_alloc_socket() passed\n");
4049
./app/test/test_mempool_perf.c:151: while (rte_atomic32_read(&synchro) == 0);
4050
./app/test/virtual_pmd.c:132: rx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
4051
./app/test/virtual_pmd.c:164: tx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
4052
./app/test/virtual_pmd.c:550: pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id);
4053
./app/test/virtual_pmd.c:554: eth_drv = rte_zmalloc_socket(name, sizeof(*eth_drv), 0, socket_id);
4054
./app/test/virtual_pmd.c:558: pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id);
4055
./app/test/virtual_pmd.c:562: id_table = rte_zmalloc_socket(name, sizeof(*id_table), 0, socket_id);
4056
./app/test/virtual_pmd.c:567: dev_private = rte_zmalloc_socket(name, sizeof(*dev_private), 0, socket_id);
4057
./app/test/test_interrupts.c:136: if (write(pfds.writefd, "1", 1) < 0)
4058
./app/test/test_memcpy_perf.c:172:do_uncached_write(uint8_t *dst, int is_dst_cached,
4059
./app/test/test_memcpy_perf.c:310: do_uncached_write(large_buf_write, 0, small_buf_read, 1, SMALL_BUFFER_SIZE);
4060
./app/test/test_link_bonding.c:1432: refcnt = rte_mbuf_refcnt_read(mbufs[i]);
4061
./app/test/test_spinlock.c:145: while (rte_atomic32_read(&synchro) == 0);
4062
./app/test/test_timer.c:256: while (rte_atomic16_read(&slave_state[i]) != SLAVE_RUNNING)
4063
./app/test/test_timer.c:267: while (rte_atomic16_read(&slave_state[i]) != SLAVE_FINISHED)
4064
./app/test/test_timer.c:277: while (rte_atomic16_read(&slave_state[lcore_id]) != SLAVE_RUN_SIGNAL)
4065
./app/test/test_timer.c:360: my_collisions = rte_atomic32_read(&collisions);
4066
./app/test/test_mbuf.c:359: if (rte_mbuf_refcnt_read(m) != 2)
4067
./app/test/test_mbuf.c:387: if (rte_mbuf_refcnt_read(m) != 2)
4068
./app/test/test_mbuf.c:390: if (rte_mbuf_refcnt_read(m->next) != 2)
4069
./app/test/test_mbuf.c:407: if (rte_mbuf_refcnt_read(m) != 3)
4070
./app/test/test_mbuf.c:410: if (rte_mbuf_refcnt_read(m->next) != 3)
4071
./app/test/test_mbuf.c:478: if (rte_mbuf_refcnt_read(m) != 2)
4072
./app/test/test_mbuf.c:507: if (rte_mbuf_refcnt_read(m) != 3)
4073
./app/test/test_hash_scaling.c:169: rte_atomic64_read(&gcycles)/
4074
./app/test-pmd/mempool_anon.c:69: if ((rc = pread(fd, pa, nb, ofs)) < 0 || (rc -= nb) != 0) {
4075
./app/test-pmd/testpmd.c:2043: rc = read(0, &c, 1);
4076
./app/test-pmd/config.c:562: reg_v = port_id_pci_reg_read(port_id, reg_off);
4077
./app/test-pmd/config.c:588: reg_v = port_id_pci_reg_read(port_id, reg_off);
4078
./app/test-pmd/config.c:606: reg_v = port_id_pci_reg_read(port_id, reg_off);
4079
./app/test-pmd/config.c:626: reg_v = port_id_pci_reg_read(port_id, reg_off);
4080
./app/test-pmd/config.c:631: port_id_pci_reg_write(port_id, reg_off, reg_v);
4081
./app/test-pmd/config.c:668: reg_v = port_id_pci_reg_read(port_id, reg_off);
4082
./app/test-pmd/config.c:671: port_id_pci_reg_write(port_id, reg_off, reg_v);
4083
./app/test-pmd/config.c:682: port_id_pci_reg_write(port_id, reg_off, reg_v);
4084
./app/test-pmd/testpmd.h:430:port_pci_reg_read(struct rte_port *port, uint32_t reg_off)
4085
./app/test-pmd/testpmd.h:442:#define port_id_pci_reg_read(pt_id, reg_off) \
4086
./app/test-pmd/testpmd.h:443: port_pci_reg_read(&ports[(pt_id)], (reg_off))
4087
./app/test-pmd/testpmd.h:446:port_pci_reg_write(struct rte_port *port, uint32_t reg_off, uint32_t reg_v)
4088
./app/test-pmd/testpmd.h:456:#define port_id_pci_reg_write(pt_id, reg_off, reg_value) \
4089
./app/test-pmd/testpmd.h:457: port_pci_reg_write(&ports[(pt_id)], (reg_off), (reg_value))
4090
./lib/librte_cryptodev/rte_cryptodev.c:300: rte_zmalloc_socket("cryptodev device private",
4091
./lib/librte_cryptodev/rte_cryptodev.c:342: rte_zmalloc_socket(
4092
./lib/librte_cryptodev/rte_cryptodev.c:481: dev->data->queue_pairs = rte_zmalloc_socket(
4093
./lib/librte_port/rte_port_frag.c:119: port = rte_zmalloc_socket("PORT", sizeof(*port), RTE_CACHE_LINE_SIZE,
4094
./lib/librte_port/rte_port_frag.c:275:rte_port_frag_reader_stats_read(void *port,
4095
./lib/librte_port/rte_port_ethdev.c:80: port = rte_zmalloc_socket("PORT", sizeof(*port),
4096
./lib/librte_port/rte_port_ethdev.c:119:static int rte_port_ethdev_reader_stats_read(void *port,
4097
./lib/librte_port/rte_port_ethdev.c:179: port = rte_zmalloc_socket("PORT", sizeof(*port),
4098
./lib/librte_port/rte_port_ethdev.c:299:static int rte_port_ethdev_writer_stats_read(void *port,
4099
./lib/librte_port/rte_port_ethdev.c:360: port = rte_zmalloc_socket("PORT", sizeof(*port),
4100
./lib/librte_port/rte_port_ethdev.c:508:static int rte_port_ethdev_writer_nodrop_stats_read(void *port,
4101
./lib/librte_port/rte_port_ras.c:117: port = rte_zmalloc_socket("PORT", sizeof(*port),
4102
./lib/librte_port/rte_port_ras.c:325:rte_port_ras_writer_stats_read(void *port,
4103
./lib/librte_port/rte_port_ring.c:83: port = rte_zmalloc_socket("PORT", sizeof(*port),
4104
./lib/librte_port/rte_port_ring.c:147:rte_port_ring_reader_stats_read(void *port,
4105
./lib/librte_port/rte_port_ring.c:209: port = rte_zmalloc_socket("PORT", sizeof(*port),
4106
./lib/librte_port/rte_port_ring.c:415:rte_port_ring_writer_stats_read(void *port,
4107
./lib/librte_port/rte_port_ring.c:478: port = rte_zmalloc_socket("PORT", sizeof(*port),
4108
./lib/librte_port/rte_port_ring.c:736:rte_port_ring_writer_nodrop_stats_read(void *port,
4109
./lib/librte_port/rte_port_source_sink.c:79: port = rte_zmalloc_socket("PORT", sizeof(*port),
4110
./lib/librte_port/rte_port_source_sink.c:124:rte_port_source_stats_read(void *port,
4111
./lib/librte_port/rte_port_source_sink.c:166: port = rte_zmalloc_socket("PORT", sizeof(*port),
4112
./lib/librte_port/rte_port_source_sink.c:222:rte_port_sink_stats_read(void *port, struct rte_port_out_stats *stats,
4113
./lib/librte_port/rte_port_sched.c:78: port = rte_zmalloc_socket("PORT", sizeof(*port),
4114
./lib/librte_port/rte_port_sched.c:117:rte_port_sched_reader_stats_read(void *port,
4115
./lib/librte_port/rte_port_sched.c:177: port = rte_zmalloc_socket("PORT", sizeof(*port),
4116
./lib/librte_port/rte_port_sched.c:291:rte_port_sched_writer_stats_read(void *port,
4117
./lib/librte_vhost/virtio-net.c:581: new_ll_dev = rte_malloc_socket(NULL,
4118
./lib/librte_vhost/virtio-net.c:584: new_vq = rte_malloc_socket(NULL,
4119
./lib/librte_vhost/vhost_user/vhost-net-user.c:111:uds_socket(const char *path)
4120
./lib/librte_vhost/vhost_user/vhost-net-user.c:120: sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
4121
./lib/librte_vhost/vhost_user/vhost-net-user.c:128: ret = bind(sockfd, (struct sockaddr *)&un, sizeof(un));
4122
./lib/librte_vhost/vhost_user/vhost-net-user.c:136: ret = listen(sockfd, MAX_VIRTIO_BACKLOG);
4123
./lib/librte_vhost/vhost_user/vhost-net-user.c:167: ret = recvmsg(sockfd, &msgh, 0);
4124
./lib/librte_vhost/vhost_user/vhost-net-user.c:207: ret = read(sockfd, &msg->payload, msg->size);
4125
./lib/librte_vhost/vhost_user/vhost-net-user.c:252: ret = sendmsg(sockfd, &msgh, 0);
4126
./lib/librte_vhost/vhost_user/vhost-net-user.c:469: vserver->listenfd = uds_socket(path);
4127
./lib/librte_vhost/vhost_user/fd_man.c:215: * thread(now rte_vhost_driver_unregister) calls fdset_del concurrently, it
4128
./lib/librte_vhost/vhost_rxtx.c:245: eventfd_write(vq->callfd, (eventfd_t)1);
4129
./lib/librte_vhost/vhost_rxtx.c:550: eventfd_write(vq->callfd, (eventfd_t)1);
4130
./lib/librte_vhost/vhost_rxtx.c:778: eventfd_write(vq->callfd, (eventfd_t)1);
4131
./lib/librte_pipeline/rte_pipeline.c:218: p = rte_zmalloc_socket("PIPELINE", sizeof(struct rte_pipeline),
4132
./lib/librte_pipeline/rte_pipeline.c:358: default_entry = (struct rte_pipeline_table_entry *) rte_zmalloc_socket(
4133
./lib/librte_pipeline/rte_pipeline.c:1524:int rte_pipeline_port_in_stats_read(struct rte_pipeline *p, uint32_t port_id,
4134
./lib/librte_pipeline/rte_pipeline.c:1561:int rte_pipeline_port_out_stats_read(struct rte_pipeline *p, uint32_t port_id,
4135
./lib/librte_pipeline/rte_pipeline.c:1595:int rte_pipeline_table_stats_read(struct rte_pipeline *p, uint32_t table_id,
4136
./lib/librte_pipeline/rte_pipeline.h:549:int rte_pipeline_table_stats_read(struct rte_pipeline *p, uint32_t table_id,
4137
./lib/librte_pipeline/rte_pipeline.h:683:int rte_pipeline_port_in_stats_read(struct rte_pipeline *p, uint32_t port_id,
4138
./lib/librte_pipeline/rte_pipeline.h:821:int rte_pipeline_port_out_stats_read(struct rte_pipeline *p, uint32_t port_id,
4139
./lib/librte_ether/rte_ether.h:387: if (rte_mbuf_refcnt_read(*m) > 1) {
4140
./lib/librte_eal/bsdapp/eal/eal_thread.c:84: n = write(m2s, &c, 1);
4141
./lib/librte_eal/bsdapp/eal/eal_thread.c:90: n = read(s2m, &c, 1);
4142
./lib/librte_eal/bsdapp/eal/eal_thread.c:164: n = read(m2s, &c, 1);
4143
./lib/librte_eal/bsdapp/eal/eal_thread.c:175: n = write(s2m, &c, 1);
4144
./lib/librte_eal/bsdapp/eal/eal.c:438:eal_check_mem_on_local_socket(void)
4145
./lib/librte_eal/bsdapp/eal/eal.c:570: eal_check_mem_on_local_socket();
4146
./lib/librte_eal/linuxapp/kni/kni_vhost.c:586: sk_set_socket(&q->sk, NULL);
4147
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c:818: ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
4148
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c:824: ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
4149
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c:837: hw->eeprom.ops.read(hw, data_offset++,
4150
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c:840: hw->eeprom.ops.read(hw, data_offset, &eword);
4151
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c:1252: hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
4152
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c:1264: hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
4153
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c:1269: hw->eeprom.ops.read(hw, *list_offset, data_offset);
4154
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c:1278: if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
4155
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h:1292:#define page_count(p) atomic_read(&(p)->count)
4156
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c:1242: hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
4157
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c:1245: hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
4158
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:539: ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
4159
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:545: ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
4160
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:591: ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
4161
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:613: ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
4162
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:1741: if (hw->eeprom.ops.read(hw, i, &word) != 0) {
4163
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:1750: hw->eeprom.ops.read(hw, i, &pointer);
4164
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:1754: hw->eeprom.ops.read(hw, pointer, &length);
4165
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:1758: hw->eeprom.ops.read(hw, j, &word);
4166
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:1790: status = hw->eeprom.ops.read(hw, 0, &checksum);
4167
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:1795: hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
4168
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:1828: status = hw->eeprom.ops.read(hw, 0, &checksum);
4169
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:1832: status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
4170
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:2873: hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
4171
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:2916: hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
4172
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:2956: hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
4173
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:3509: hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
4174
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:3518: hw->eeprom.ops.read(hw, offset, &caps);
4175
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:3524: hw->eeprom.ops.read(hw, offset, wwnn_prefix);
4176
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:3527: hw->eeprom.ops.read(hw, offset, wwpn_prefix);
4177
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:3550: status = hw->eeprom.ops.read(hw, offset, &caps);
4178
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:3558: status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4179
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:3567: status = hw->eeprom.ops.read(hw, offset, &flags);
4180
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:3654: hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4181
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:3974: status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
4182
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:3983: status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
4183
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:3998: status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
4184
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:4049: hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
4185
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:4053: hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
4186
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c:4063: hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor);
4187
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.c:541: status = hw->eeprom.ops.read(hw, 0, &checksum);
4188
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.c:596: status = hw->eeprom.ops.read(hw, 0, &checksum);
4189
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c:171: hw->eeprom.ops.read(hw, ++data_offset, &data_value);
4190
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c:175: hw->eeprom.ops.read(hw, ++data_offset, &data_value);
4191
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c:2077: hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
4192
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c:2083: hw->eeprom.ops.read(hw, (fw_offset +
4193
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c:2091: hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
4194
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c:2115: status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
4195
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c:2122: status = hw->eeprom.ops.read(hw, (fw_offset +
4196
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c:2131: status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
4197
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:948: advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
4198
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:958: bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
4199
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:959: lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA);
4200
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:1017: advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
4201
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:1028: mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
4202
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:1033: bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
4203
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:1035: mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr);
4204
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:1042: bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
4205
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:1052: mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp);
4206
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:1062: mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
4207
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:1063: if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS)
4208
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:1074: bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
4209
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:1078: mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr);
4210
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:1119: mii_if->mdio_read(mii_if->dev, mii_data->phy_id,
4211
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:1156: mii_if->mdio_write(mii_if->dev, mii_data->phy_id,
4212
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.h:99:s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
4213
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c:77: ret_val = mbx->ops.read(hw, msg, size, mbx_id);
4214
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c:102: ret_val = mbx->ops.write(hw, msg, size, mbx_id);
4215
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c:253: ret_val = mbx->ops.read(hw, msg, size, mbx_id);
4216
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c:280: ret_val = mbx->ops.write(hw, msg, size, mbx_id);
4217
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h:1341:#define page_count(p) atomic_read(&(p)->count)
4218
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:2826: hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
4219
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c:10057: hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
4220
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.c:692:s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
4221
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.c:1042: return hw->nvm.ops.read(hw, offset, words, data);
4222
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.c:1060: return hw->nvm.ops.write(hw, offset, words, data);
4223
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.c:819: ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4224
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c:1655: ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
4225
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c:1900: ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4226
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c:2400: ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
4227
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c:2547: ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
4228
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c:2584: ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
4229
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c:2592: ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
4230
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c:2618: ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
4231
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c:2658: ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
4232
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c:2667: ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
4233
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:600: ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
4234
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:606: ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
4235
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:651: ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
4236
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:672: ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data);
4237
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:707: ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
4238
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:713: ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
4239
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:725: ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
4240
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:793: ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
4241
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:826: ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
4242
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:834: ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
4243
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:879: hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
4244
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:891: hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
4245
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:909: hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
4246
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:913: hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
4247
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:915: hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
4248
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:937: hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
4249
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:959: hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
4250
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c:960: hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
4251
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.c:313: ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data);
4252
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.c:324: ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
4253
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.c:345: ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
4254
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.c:824: ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
4255
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.c:1670: ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4256
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ptp.c:303: now = timecounter_read(&igb->tc);
4257
./lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ptp.c:341: ns = timecounter_read(&igb->tc);
4258
./lib/librte_eal/linuxapp/kni/kni_misc.c:269: down_write(&knet->kni_list_lock);
4259
./lib/librte_eal/linuxapp/kni/kni_misc.c:283: up_write(&knet->kni_list_lock);
4260
./lib/librte_eal/linuxapp/kni/kni_misc.c:302: down_read(&knet->kni_list_lock);
4261
./lib/librte_eal/linuxapp/kni/kni_misc.c:313: up_read(&knet->kni_list_lock);
4262
./lib/librte_eal/linuxapp/kni/kni_misc.c:426: down_read(&knet->kni_list_lock);
4263
./lib/librte_eal/linuxapp/kni/kni_misc.c:429: up_read(&knet->kni_list_lock);
4264
./lib/librte_eal/linuxapp/kni/kni_misc.c:433: up_read(&knet->kni_list_lock);
4265
./lib/librte_eal/linuxapp/kni/kni_misc.c:571: kthread_bind(kni->pthread, kni->core_id);
4266
./lib/librte_eal/linuxapp/kni/kni_misc.c:575: down_write(&knet->kni_list_lock);
4267
./lib/librte_eal/linuxapp/kni/kni_misc.c:577: up_write(&knet->kni_list_lock);
4268
./lib/librte_eal/linuxapp/kni/kni_misc.c:604: down_write(&knet->kni_list_lock);
4269
./lib/librte_eal/linuxapp/kni/kni_misc.c:622: up_write(&knet->kni_list_lock);
4270
./lib/librte_eal/linuxapp/eal/eal_pci.c:82: if (fwrite(buf, n, 1, f) == 0) {
4271
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:59: return pread(intr_handle->uio_cfg_fd, buf, len, offset);
4272
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:66: return pwrite(intr_handle->uio_cfg_fd, buf, len, offset);
4273
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:75: ret = pread(dev_fd, ®, sizeof(reg), PCI_COMMAND);
4274
./lib/librte_eal/linuxapp/eal/eal_pci_uio.c:88: ret = pwrite(dev_fd, ®, sizeof(reg), PCI_COMMAND);
4275
./lib/librte_eal/linuxapp/eal/eal_thread.c:84: n = write(m2s, &c, 1);
4276
./lib/librte_eal/linuxapp/eal/eal_thread.c:90: n = read(s2m, &c, 1);
4277
./lib/librte_eal/linuxapp/eal/eal_thread.c:164: n = read(m2s, &c, 1);
4278
./lib/librte_eal/linuxapp/eal/eal_thread.c:175: n = write(s2m, &c, 1);
4279
./lib/librte_eal/linuxapp/eal/eal_memory.c:187: if (read(fd, &page, sizeof(uint64_t)) < 0) {
4280
./lib/librte_eal/linuxapp/eal/eal_memory.c:239: retval = read(fd, &c, 1);
4281
./lib/librte_eal/linuxapp/eal/eal_memory.c:627:find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
4282
./lib/librte_eal/linuxapp/eal/eal_memory.c:949:calc_num_pages_per_socket(uint64_t * memory,
4283
./lib/librte_eal/linuxapp/eal/eal_memory.c:1192: if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){
4284
./lib/librte_eal/linuxapp/eal/eal_memory.c:1269: nr_hugepages = calc_num_pages_per_socket(memory,
4285
./lib/librte_eal/linuxapp/eal/eal_log.c:57:console_log_write(__attribute__((unused)) void *c, const char *buf, size_t size)
4286
./lib/librte_eal/linuxapp/eal/eal_log.c:67: ret = fwrite(buf, 1, size, stdout);
4287
./lib/librte_eal/linuxapp/eal/eal_log.c:117:early_log_write(__attribute__((unused)) void *c, const char *buf, size_t size)
4288
./lib/librte_eal/linuxapp/eal/eal_log.c:120: ret = fwrite(buf, size, 1, stdout);
4289
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:353: if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
4290
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:361: if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
4291
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:377: if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
4292
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:385: if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
4293
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:400: if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
4294
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:414: if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
4295
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:489: if (write(intr_pipe.writefd, "1", 1) < 0)
4296
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:552: if (ret >= 0 && write(intr_pipe.writefd, "1", 1) < 0) {
4297
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:663: int r = read(intr_pipe.readfd, buf.charbuf,
4298
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:709: bytes_read = read(events[n].data.fd, &buf, bytes_read);
4299
./lib/librte_eal/linuxapp/eal/eal_interrupts.c:929: nbytes = read(fd, &buf, bytes_read);
4300
./lib/librte_eal/linuxapp/eal/eal.c:674:eal_check_mem_on_local_socket(void)
4301
./lib/librte_eal/linuxapp/eal/eal.c:819: eal_check_mem_on_local_socket();
4302
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:132: ret = sendmsg(socket, &hdr, 0);
4303
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:156: ret = recvmsg(socket, &hdr, 0);
4304
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:190: ret = sendmsg(socket, &hdr, 0);
4305
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:220: ret = recvmsg(socket, &hdr, 0);
4306
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:243: socket_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
4307
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:254: if (connect(socket_fd, (struct sockaddr *) &addr, sockaddr_len) == 0)
4308
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:268:pci_vfio_mp_sync_thread(void __rte_unused * arg)
4309
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:339: socket_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
4310
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:352: ret = bind(socket_fd, (struct sockaddr *) &addr, sockaddr_len);
4311
./lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c:359: ret = listen(socket_fd, 50);
4312
./lib/librte_eal/common/malloc_heap.h:45:malloc_get_numa_socket(void)
4313
./lib/librte_eal/common/eal_common_log.c:254: if (fwrite(hist_buf->buf, hist_buf->size, 1, out) == 0) {
4314
./lib/librte_eal/common/eal_common_memzone.c:197: socket = malloc_get_numa_socket();
4315
./lib/librte_eal/common/include/generic/rte_atomic.h:176:rte_atomic16_read(const rte_atomic16_t *v)
4316
./lib/librte_eal/common/include/generic/rte_atomic.h:429:rte_atomic32_read(const rte_atomic32_t *v)
4317
./lib/librte_eal/common/include/generic/rte_atomic.h:697:rte_atomic64_read(rte_atomic64_t *v);
4318
./lib/librte_eal/common/include/generic/rte_atomic.h:701:rte_atomic64_read(rte_atomic64_t *v)
4319
./lib/librte_eal/common/include/arch/ppc_64/rte_cpuflags.h:148: while (read(auxv_fd, &auxv,
4320
./lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h:258:rte_atomic64_read(rte_atomic64_t *v)
4321
./lib/librte_eal/common/include/arch/x86/rte_atomic_32.h:106:rte_atomic64_read(rte_atomic64_t *v)
4322
./lib/librte_eal/common/include/arch/x86/rte_atomic_64.h:73:rte_atomic64_read(rte_atomic64_t *v)
4323
./lib/librte_eal/common/include/arch/arm/rte_cpuflags_32.h:146: while (read(auxv_fd, &auxv,
4324
./lib/librte_eal/common/include/arch/arm/rte_cpuflags_64.h:109: while (read(auxv_fd, &auxv,
4325
./lib/librte_eal/common/include/rte_malloc.h:186:rte_malloc_socket(const char *type, size_t size, unsigned align, int socket);
4326
./lib/librte_eal/common/include/rte_malloc.h:214:rte_zmalloc_socket(const char *type, size_t size, unsigned align, int socket);
4327
./lib/librte_eal/common/include/rte_malloc.h:242:rte_calloc_socket(const char *type, size_t num, size_t size, unsigned align, int socket);
4328
./lib/librte_eal/common/rte_malloc.c:69:rte_malloc_socket(const char *type, size_t size, unsigned align, int socket_arg)
4329
./lib/librte_eal/common/rte_malloc.c:83: socket = malloc_get_numa_socket();
4330
./lib/librte_eal/common/rte_malloc.c:117: return rte_malloc_socket(type, size, align, SOCKET_ID_ANY);
4331
./lib/librte_eal/common/rte_malloc.c:124:rte_zmalloc_socket(const char *type, size_t size, unsigned align, int socket)
4332
./lib/librte_eal/common/rte_malloc.c:126: void *ptr = rte_malloc_socket(type, size, align, socket);
4333
./lib/librte_eal/common/rte_malloc.c:139: return rte_zmalloc_socket(type, size, align, SOCKET_ID_ANY);
4334
./lib/librte_eal/common/rte_malloc.c:146:rte_calloc_socket(const char *type, size_t num, size_t size, unsigned align, int socket)
4335
./lib/librte_eal/common/rte_malloc.c:148: return rte_zmalloc_socket(type, num * size, align, socket);
4336
./lib/librte_sched/rte_sched.h:372:rte_sched_port_pkt_write(struct rte_mbuf *pkt,
4337
./lib/librte_sched/rte_bitmap.h:497:__rte_bitmap_scan_read(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab)
4338
./lib/librte_sched/rte_bitmap.h:541: if (__rte_bitmap_scan_read(bmp, pos, slab)) {
4339
./lib/librte_sched/rte_bitmap.h:548: __rte_bitmap_scan_read(bmp, pos, slab);
4340
./lib/librte_sched/rte_sched.c:948:rte_sched_port_pkt_write(struct rte_mbuf *pkt,
4341
./lib/librte_ip_frag/rte_ip_frag_common.c:90: if ((tbl = rte_zmalloc_socket(__func__, sz, RTE_CACHE_LINE_SIZE,
4342
./lib/librte_reorder/rte_reorder.c:172: b = rte_zmalloc_socket("REORDER_BUFFER", bufsize, 0, socket_id);
4343
./lib/librte_lpm/rte_lpm.c:195: lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
4344
./lib/librte_lpm/rte_lpm6.c:196: lpm = (struct rte_lpm6 *)rte_zmalloc_socket(mem_name, (size_t)mem_size,
4345
./lib/librte_lpm/rte_lpm6.c:205: lpm->rules_tbl = (struct rte_lpm6_rule *)rte_zmalloc_socket(NULL,
4346
./lib/librte_power/rte_power_kvm_vm.c:59: return guest_channel_host_connect(FD_PATH, lcore_id);
4347
./lib/librte_power/rte_power_kvm_vm.c:65: guest_channel_host_disconnect(lcore_id);
4348
./lib/librte_power/guest_channel.c:55:guest_channel_host_connect(const char *path, unsigned lcore_id)
4349
./lib/librte_power/guest_channel.c:136: ret = write(global_fds[lcore_id], buffer, buffer_len);
4350
./lib/librte_power/guest_channel.c:151:guest_channel_host_disconnect(unsigned lcore_id)
4351
./lib/librte_power/guest_channel.h:56:int guest_channel_host_connect(const char *path, unsigned lcore_id);
4352
./lib/librte_power/guest_channel.h:66:void guest_channel_host_disconnect(unsigned lcore_id);
4353
./lib/librte_acl/rte_acl.c:254: ctx = rte_zmalloc_socket(name, sz, RTE_CACHE_LINE_SIZE, param->socket_id);
4354
./lib/librte_acl/acl_gen.c:509: mem = rte_zmalloc_socket(ctx->name, total_size, RTE_CACHE_LINE_SIZE,
4355
./lib/librte_table/rte_table_lpm.c:116: lpm = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
4356
./lib/librte_table/rte_table_lpm.c:362:rte_table_lpm_stats_read(void *table, struct rte_table_stats *stats, int clear)
4357
./lib/librte_table/rte_table_stub.c:68: stub = rte_zmalloc_socket("TABLE", size, RTE_CACHE_LINE_SIZE,
4358
./lib/librte_table/rte_table_stub.c:99:rte_table_stub_stats_read(void *table, struct rte_table_stats *stats, int clear)
4359
./lib/librte_table/rte_table_hash_lru.c:176: t = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
4360
./lib/librte_table/rte_table_hash_lru.c:1069:rte_table_hash_lru_stats_read(void *table, struct rte_table_stats *stats, int clear)
4361
./lib/librte_table/rte_table_hash_key8.c:142: f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
4362
./lib/librte_table/rte_table_hash_key8.c:354: f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
4363
./lib/librte_table/rte_table_hash_key8.c:1416:rte_table_hash_key8_stats_read(void *table, struct rte_table_stats *stats, int clear)
4364
./lib/librte_table/rte_table_acl.c:130: acl = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
4365
./lib/librte_table/rte_table_acl.c:813:rte_table_acl_stats_read(void *table, struct rte_table_stats *stats, int clear)
4366
./lib/librte_table/rte_table_array.c:94: t = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
4367
./lib/librte_table/rte_table_array.c:215:rte_table_array_stats_read(void *table, struct rte_table_stats *stats, int clear)
4368
./lib/librte_table/rte_table_hash_key32.c:146: f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
4369
./lib/librte_table/rte_table_hash_key32.c:359: f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
4370
./lib/librte_table/rte_table_hash_key32.c:1111:rte_table_hash_key32_stats_read(void *table, struct rte_table_stats *stats, int clear)
4371
./lib/librte_table/rte_table_hash_ext.c:204: t = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
4372
./lib/librte_table/rte_table_hash_ext.c:1126:rte_table_hash_ext_stats_read(void *table, struct rte_table_stats *stats, int clear)
4373
./lib/librte_table/rte_table_hash_key16.c:146: f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
4374
./lib/librte_table/rte_table_hash_key16.c:366: f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
4375
./lib/librte_table/rte_table_hash_key16.c:1464:rte_table_hash_key16_stats_read(void *table, struct rte_table_stats *stats, int clear)
4376
./lib/librte_table/rte_table_lpm_ipv6.c:122: lpm = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
4377
./lib/librte_table/rte_table_lpm_ipv6.c:376:rte_table_lpm_ipv6_stats_read(void *table, struct rte_table_stats *stats, int clear)
4378
./lib/librte_hash/rte_fbk_hash.c:153: ht = (struct rte_fbk_hash_table *)rte_zmalloc_socket(hash_name, mem_size,
4379
./lib/librte_hash/rte_cuckoo_hash.c:242: h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
4380
./lib/librte_hash/rte_cuckoo_hash.c:253: buckets = rte_zmalloc_socket(NULL,
4381
./lib/librte_hash/rte_cuckoo_hash.c:278: k = rte_zmalloc_socket(NULL, key_tbl_size,
4382
./lib/librte_hash/rte_cuckoo_hash.c:335: h->local_free_slots = rte_zmalloc_socket(NULL,
4383
./lib/librte_mbuf/rte_mbuf.h:752: * rte_mbuf_refcnt_update(), rte_mbuf_refcnt_read(), and
4384
./lib/librte_mbuf/rte_mbuf.h:948:rte_mbuf_refcnt_read(const struct rte_mbuf *m)
4385
./lib/librte_mbuf/rte_mbuf.h:950: return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
4386
./lib/librte_mbuf/rte_mbuf.h:985: if (likely(rte_mbuf_refcnt_read(m) == 1)) {
4387
./lib/librte_mbuf/rte_mbuf.h:1009:rte_mbuf_refcnt_read(const struct rte_mbuf *m)
4388
./lib/librte_mbuf/rte_mbuf.h:1065: RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
4389
./lib/librte_mbuf/rte_mbuf.h:1081: RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
4390
./lib/librte_mbuf/rte_mbuf.h:1358: rte_mbuf_refcnt_read(mi) == 1);
4391
./lib/librte_mbuf/rte_mbuf.c:194: uint16_t cnt = rte_mbuf_refcnt_read(m);
4392
./lib/librte_cmdline/cmdline.c:115: ret = write(cl->s_out, &c, 1);
4393
./lib/librte_cmdline/cmdline.c:202: write(cl->s_out, buf, ret);
4394
./lib/librte_cmdline/cmdline.c:274: read_status = read(cl->s_in, &c, 1);
4395
./lib/librte_cmdline/cmdline.c:296: if (read(cl->s_in, &c, 1) <= 0)
4398
./examples/l3fwd/main.c:628: /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
4399
./examples/l3fwd/main.c:1143: * From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2:
4400
./examples/l3fwd-acl/main.c:1350: /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
4401
./examples/l3fwd-power/main.c:495: /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
4402
./examples/netmap_compat/netmap/netmap.h:41: * http://info.iet.unipi.it/~luigi/netmap/
4403
./examples/performance-thread/l3fwd-thread/main.c:748: /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
4404
./examples/performance-thread/l3fwd-thread/main.c:1298: * From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2:
4405
./examples/performance-thread/common/lthread_pool.h:38: * http://www.1024cores.net
4406
./examples/performance-thread/common/lthread_queue.h:38: * http://www.1024cores.net
4407
./examples/l3fwd-vf/main.c:356: /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
4408
./app/test/test_lpm_routes.h:45: * Source = http://psp1.iit.cnr.it/~mcsoft/ast/ast.html
4409
./lib/librte_ether/rte_ether.h:84: * See http://standards.ieee.org/regauth/groupmac/tutorial.html
4410
./lib/librte_sched/rte_approx.c:46: * http://people.ksp.sk/~misof/publications/2007approx.pdf
4411
./lib/librte_sched/rte_red.h:156: * http://software.intel.com/en-us/articles/fast-random-number-generator-on-the-intel-pentiumr-4-processor/
4412
./lib/librte_hash/rte_jhash.h:58: * http://burtleburtle.net/bob/hash/
4413
./lib/librte_mbuf/rte_mbuf.h:53: * http://www.kohala.com/start/tcpipiv2.html
4416
./examples/performance-thread/common/lthread.h:36: * https://github.com/halayli/lthread which carrys the following license.
4417
./examples/performance-thread/common/lthread_int.h:36: * https://github.com/halayli/lthread which carrys the following license.
4418
./examples/performance-thread/common/lthread_cond.h:36: * https://github.com/halayli/lthread which carrys the following license.
4419
./examples/performance-thread/common/lthread.c:36: * https://github.com/halayli/lthread which carrys the following license.
4420
./examples/performance-thread/common/lthread_sched.h:36: * https://github.com/halayli/lthread which carrys the following license.
4421
./examples/performance-thread/common/arch/x86/ctx.c:35: * https://github.com/halayli/lthread which carries the following license.
4422
./examples/performance-thread/common/lthread_sched.c:36: * https://github.com/halayli/lthread which carrys the following license.
4423
./examples/performance-thread/common/lthread_cond.c:36: * https://github.com/halayli/lthread which carrys the following license.
4424
./examples/performance-thread/common/lthread_api.h:36: * https://github.com/halayli/lthread which carrys the following license.
4425
./drivers/net/bonding/rte_eth_bond.h:86: * https://www.kernel.org/doc/Documentation/networking/bonding.txt.
4428
./drivers/net/ixgbe/ixgbe_ethdev.c:197:static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
4429
./drivers/net/ixgbe/ixgbe_ethdev.c:3638:ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
4430
./drivers/net/i40e/base/i40e_dcb.c:568: * @bridgetype: bridge type for the query (remote)
4431
./drivers/net/i40e/i40e_ethdev_vf.c:145:static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
4432
./drivers/net/i40e/i40e_ethdev_vf.c:2165:i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
4433
./drivers/net/i40e/i40e_ethdev.c:324:static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4434
./drivers/net/i40e/i40e_ethdev.c:2852:i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4435
./drivers/net/nfp/nfp_net.c:2133:nfp_net_reta_query(struct rte_eth_dev *dev,
4436
./drivers/net/mlx5/mlx5.h:190:int mlx5_dev_rss_reta_query(struct rte_eth_dev *,
4437
./drivers/net/mlx5/mlx5_rss.c:263:priv_dev_rss_reta_query(struct priv *priv,
4438
./drivers/net/mlx5/mlx5_rss.c:338:mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
4439
./drivers/net/mlx5/mlx5_rss.c:346: ret = priv_dev_rss_reta_query(priv, reta_conf, reta_size);
4440
./drivers/net/fm10k/fm10k_ethdev.c:1947:fm10k_reta_query(struct rte_eth_dev *dev,
4441
./drivers/net/null/rte_eth_null.c:411:eth_rss_reta_query(struct rte_eth_dev *dev,
4442
./drivers/net/bonding/rte_eth_bond_pmd.c:2078:bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
4443
./drivers/net/bnx2x/ecore_sp.c:216: rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
4444
./drivers/net/bnx2x/ecore_sp.h:961: * @param cmd command to execute (ECORE_MCAST_CMD_X, see above)
4445
./drivers/net/e1000/igb_ethdev.c:174:static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
4446
./drivers/net/e1000/igb_ethdev.c:2968:eth_igb_rss_reta_query(struct rte_eth_dev *dev,
4447
./app/test/test_link_bonding_rssconf.c:297: TEST_ASSERT_SUCCESS(rte_eth_dev_rss_reta_query(test_params.bond_port_id,
4448
./app/test/test_link_bonding_rssconf.c:313: TEST_ASSERT_SUCCESS(rte_eth_dev_rss_reta_query(port->port_id,
4449
./app/test-pmd/config.c:935: ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
4450
./lib/librte_ether/rte_ethdev.c:1914:rte_eth_dev_rss_reta_query(uint8_t port_id,
4451
./lib/librte_ether/rte_ethdev.h:2985:int rte_eth_dev_rss_reta_query(uint8_t port,
4454
./examples/vm_power_manager/channel_manager.h:56:#define CHANNEL_MGR_SOCKET_PATH "/tmp/powermonitor/"
4455
./examples/l2fwd-ivshmem/host/host.c:103:#define QEMU_CMD_FMT "/tmp/ivshmem_qemu_cmdline_%s"
4456
./app/test/test_eal_fs.c:48: char file_template[] = "/tmp/eal_test_XXXXXX";
4458
== priv_cmds (sudo, gksu, pkexec) ==
4464
== comments (XXX, FIXME, TODO) ==
4465
./examples/quota_watermark/qw/main.c:301: /* TODO: Check if nb_dq_pkts == nb_tx_pkts? */
4466
./examples/l3fwd-acl/main.c:791: * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X - is a hexedecimal digit).
4467
./examples/vhost_xen/xenstore_parse.c:190: * FIXME, 0 is reserved by system, use it as terminator.
4468
./examples/performance-thread/common/lthread_diag_api.h:74: * the LT_DIAG_LTHREAD_XXX, LT_DIAG_MUTEX_XXX or LT_DIAG_COND_XXX events
4469
./examples/qos_sched/app_thread.c:52: * Destination IP 0.0.XXX.0 defines traffic class
4470
./examples/qos_sched/app_thread.c:53: * Destination IP host (0.0.0.XXX) defines queue
4471
./drivers/net/ixgbe/ixgbe_rxtx.c:1600: * TODO:
4472
./drivers/net/ixgbe/ixgbe_rxtx.c:3714: /* FIXME if support DCB/RSS together with VMDq & SRIOV */
4473
./drivers/net/ixgbe/ixgbe_rxtx.c:3761: * FIXME if support DCB together with VMDq & SRIOV
4474
./drivers/net/ixgbe/ixgbe_rxtx.c:4075: * TODO: Consider setting the Receive Descriptor Minimum
4475
./drivers/net/ixgbe/ixgbe_bypass_api.h:101: // TODO:
4476
./drivers/net/mlx4/mlx4.h:144: * XXX somewhat undefined behavior, but works.
4477
./drivers/net/mlx4/mlx4.c:4180: /* XXX race condition if mlx4_rx_burst() is still running. */
4478
./drivers/net/mlx4/mlx4.c:4194: /* XXX race condition if mlx4_tx_burst() is still running. */
4479
./drivers/net/mlx4/mlx4.c:4238: /* FIXME: we should ask the device for these values. */
4480
./drivers/net/mlx4/mlx4.c:4332: /* FIXME: retrieve and add hardware counters. */
4481
./drivers/net/mlx4/mlx4.c:4369: /* FIXME: reset hardware counters. */
4482
./drivers/net/mlx4/mlx4.c:5625: * XXX if something went wrong in the loop above, there is a resource
4483
./drivers/net/i40e/base/i40e_dcb.h:181: * TODO: The below structures related LLDP/DCBX variables
4484
./drivers/net/enic/base/vnic_wq.h:143: |XXXXXXXXXXXXXXXXXXXX| 32-byte cacheline a
4485
./drivers/net/enic/base/vnic_wq.h:145: |XXXXXXXXXX | cacheline b
4486
./drivers/net/enic/base/vnic_wq.h:152: | XXXXXXXXXXXXX|
4487
./drivers/net/enic/base/vnic_wq.h:162: |eeeeeeeXXXXXXXXXXXXX| "e" is empty space, which we add to len
4488
./drivers/net/enic/base/vnic_devcmd.h:254: * (u32)a1=CMD_PERBI_XXX */
4489
./drivers/net/nfp/nfp_net_ctrl.h:90: * TODO:
4490
./drivers/net/nfp/nfp_net_ctrl.h:146: * TODO:
4491
./drivers/net/mlx5/mlx5_stats.c:106: /* FIXME: retrieve and add hardware counters. */
4492
./drivers/net/mlx5/mlx5_stats.c:141: /* FIXME: reset hardware counters. */
4493
./drivers/net/mlx5/mlx5_ethdev.c:490: /* FIXME: we should ask the device for these values. */
4494
./drivers/net/mlx5/mlx5_ethdev.c:520: /* FIXME: RETA update/query API expects the callee to know the size of
4495
./drivers/net/mlx5/mlx5.c:99: /* XXX race condition if mlx5_rx_burst() is still running. */
4496
./drivers/net/mlx5/mlx5.c:113: /* XXX race condition if mlx5_tx_burst() is still running. */
4497
./drivers/net/mlx5/mlx5.c:484: * XXX if something went wrong in the loop above, there is a resource
4498
./drivers/net/fm10k/base/fm10k_osdep.h:44:/* TODO: this does not look like it should be used... */
4499
./drivers/net/xenvirt/rte_eth_xenvirt.c:168: /* TODO drop tx_pkts if it contains multiple segments */
4500
./drivers/net/xenvirt/rte_eth_xenvirt.c:740:/*TODO: Support multiple process model */
4501
./drivers/net/xenvirt/virtqueue.h:235: * TODO: save one desc here?
4502
./drivers/net/bnx2x/bnx2x.h:1013: unsigned long link_report_flags; /* BNX2X_LINK_REPORT_XXX flags */
4503
./drivers/net/bnx2x/ecore_sp.c:751: * @opcode: CLASSIFY_RULE_OPCODE_XXX
4504
./drivers/net/bnx2x/ecore_sp.c:779: * @type: ECORE_FILTER_XXX_PENDING
4505
./drivers/net/bnx2x/ecore_sp.c:2070: /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
4506
./drivers/net/bnx2x/ecore_sp.c:4943: /* Handle the beginning of COMMON_XXX pases separately... */
4507
./drivers/net/bnx2x/ecore_hsi.h:293: /* Use the PIN_CFG_XXX defines on top */
4508
./drivers/net/bnx2x/ecore_hsi.h:430: module. Use the PIN_CFG_XXX defines on top */
4509
./drivers/net/bnx2x/ecore_hsi.h:440: present or not. Use the PIN_CFG_XXX defines on top */
4510
./drivers/net/bnx2x/ecore_hsi.h:445: module. Use the PIN_CFG_XXX defines on top */
4511
./drivers/net/bnx2x/ecore_hsi.h:451: * PIN_CFG_XXX defines on top
4512
./drivers/net/bnx2x/ecore_hsi.h:457: /* The output pin which reset the PHY. Use the PIN_CFG_XXX defines on
4513
./drivers/net/bnx2x/ecore_hsi.h:463: * The output pin which powers down the PHY. Use the PIN_CFG_XXX
4514
./drivers/net/bnx2x/ecore_hsi.h:477: * Use the PIN_CFG_XXX defines on top
4515
./drivers/net/bnx2x/ecore_hsi.h:2499: * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
4516
./drivers/net/e1000/igb_rxtx.c:1994: * FIXME if support RSS together with VMDq & SRIOV
4517
./drivers/net/e1000/base/e1000_i210.c:93: s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
4518
./drivers/net/e1000/igb_regs.h:177:/* FIXME: reading igb_regs_interrupt results side-effect which doesn't
4519
./drivers/net/e1000/em_rxtx.c:1628: * XXX TEMPORARY WORKAROUND: on some systems with 82573
4520
./drivers/net/e1000/igb_ethdev.c:3810: /* XXX: not bigger than max_rx_pktlen */
4521
./drivers/net/vmxnet3/vmxnet3_rxtx.c:401: /* TODO: Add transmit checksum offload here */
4522
./drivers/net/cxgbe/cxgbe_main.c:1021: /* TODO: deadman watchdog ?? */
4523
./drivers/net/cxgbe/sge.c:123: * XXX We shouldn't depend on being able to use these indices.
4524
./drivers/net/cxgbe/sge.c:124: * XXX Especially when some other Master PF has initialized the
4525
./drivers/net/cxgbe/sge.c:125: * XXX adapter or we use the Firmware Configuration File. We
4526
./drivers/net/cxgbe/sge.c:126: * XXX should really search through the Host Buffer Size register
4527
./drivers/net/cxgbe/sge.c:127: * XXX array for the appropriately sized buffer indices.
4528
./drivers/net/cxgbe/sge.c:1865: * TODO: For flow-control, queue may be stopped waiting to reclaim
4529
./drivers/net/cxgbe/sge.c:2109: * XXX Note that we should really read through the Host Buffer Size
4530
./drivers/net/cxgbe/sge.c:2110: * XXX register array and find the indices of the Buffer Sizes which
4531
./drivers/net/cxgbe/sge.c:2111: * XXX meet our needs!
4532
./drivers/net/cxgbe/cxgbe_ethdev.c:155: /* XXX: For now we support one MAC/port */
4533
./drivers/net/cxgbe/cxgbe_ethdev.c:157: device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
4534
./drivers/net/cxgbe/cxgbe_ethdev.c:199: /* TODO: address filters ?? */
4535
./drivers/net/cxgbe/cxgbe_ethdev.c:210: /* TODO: address filters ?? */
4536
./drivers/net/virtio/virtio_rxtx_simple.c:298:/* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
4537
./app/test-acl/main.c:404: * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X - is a hexedecimal digit).
4538
./app/test/test_eal_fs.c:48: char file_template[] = "/tmp/eal_test_XXXXXX";
4539
./app/test/test_malloc.c:933: /* TODO: uncomment following line once type limits are valid */
4540
./app/test-pmd/csumonly.c:407: /* XXX implement CRC32c, example available in
4541
./app/test-pmd/parameters.c:184: printf(" --txqflags=0xXXXXXXXX: hexadecimal bitmask of TX queue flags "
4542
./lib/librte_vhost/virtio-net.c:738: * FIXME: VHOST_SET_VRING_CALL is the first per-vring message
4543
./lib/librte_vhost/vhost_user/virtio-net-user.c:307: * TODO: cleanup the vring, it isn't usable since here.
4544
./lib/librte_vhost/vhost_cuse/vhost-net-cdev.c:233: /*TODO fix race condition.*/
4545
./lib/librte_vhost/vhost_cuse/eventfd_copy.c:87: /* TODO: check this earlier rather than fail until VM boots! */
4546
./lib/librte_vhost/vhost_rxtx.c:109: /* TODO: Allow to disable cmpset if no concurrency in application. */
4547
./lib/librte_eal/bsdapp/eal/eal_pci.c:276: /* TODO: get max_vfs */
4548
./lib/librte_eal/bsdapp/eal/eal_memory.c:56: /* XXX not implemented. This function is only used by
4549
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c:1522: /* XXX does this need to be initialized even w/o DCB? */
4550
./lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c:288: /* FIXME:
4551
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c:789: * XXX: This can be pushed down into the ethtool_* handlers that
4552
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.c:87: s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
4553
./lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c:1040: s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
4554
./lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.c:288: /* FIXME:
4555
./lib/librte_eal/common/eal_common_options.c:207: /* XXX this ignores failures from readdir() itself */
4556
./lib/librte_eal/common/include/rte_pci.h:295: * The input string to be parsed. Should have the format XXXX:XX:XX.X
4557
./lib/librte_eal/common/rte_malloc.c:243: * TODO: Set limit to memory that can be allocated to memory type
4558
./lib/librte_cmdline/cmdline_parse_etheraddr.c:84:/* the format can be either XX:XX:XX:XX:XX:XX or XXXX:XXXX:XXXX */
4559
./lib/librte_cmdline/cmdline_parse_etheraddr.c:123: /* Support the format XXXX:XXXX:XXXX */
4560
./lib/librte_cmdline/cmdline_cirbuf.c:237:/* XXX we could do a better algorithm here... */
4561
./lib/librte_cmdline/cmdline_cirbuf.c:258:/* XXX we could do a better algorithm here... */
4562
./lib/librte_cmdline/cmdline_rdline.h:218: * XXX error case when the buffer is full ?
4563
./lib/librte_cmdline/cmdline_vt100.c:161: else if (c >= 060 && c <= 0177) { /* XXX 0177 ? */
4565
== unsafe input mechanisms ==