~ubuntu-branches/debian/wheezy/linux-2.6/wheezy

« back to all changes in this revision

Viewing changes to net/batman-adv/originator.c

  • Committer: Bazaar Package Importer
  • Author(s): Ben Hutchings, Ben Hutchings, Aurelien Jarno
  • Date: 2011-06-07 12:14:05 UTC
  • mfrom: (43.1.9 sid)
  • Revision ID: james.westby@ubuntu.com-20110607121405-i3h1rd7nrnd2b73h
Tags: 2.6.39-2
[ Ben Hutchings ]
* [x86] Enable BACKLIGHT_APPLE, replacing BACKLIGHT_MBP_NVIDIA
  (Closes: #627492)
* cgroups: Disable memory resource controller by default. Allow it
  to be enabled using kernel parameter 'cgroup_enable=memory'.
* rt2800usb: Enable support for more USB devices including
  Linksys WUSB600N (Closes: #596626) (this change was accidentally
  omitted from 2.6.39-1)
* [x86] Remove Celeron from list of processors supporting PAE. Most
  'Celeron M' models do not.
* Update debconf template translations:
  - Swedish (Martin Bagge) (Closes: #628932)
  - French (David Prévot) (Closes: #628191)
* aufs: Update for 2.6.39 (Closes: #627837)
* Add stable 2.6.39.1, including:
  - ext4: dont set PageUptodate in ext4_end_bio()
  - pata_cmd64x: fix boot crash on parisc (Closes: #622997, #622745)
  - ext3: Fix fs corruption when make_indexed_dir() fails
  - netfilter: nf_ct_sip: validate Content-Length in TCP SIP messages
  - sctp: fix race between sctp_bind_addr_free() and
    sctp_bind_addr_conflict()
  - sctp: fix memory leak of the ASCONF queue when free asoc
  - md/bitmap: fix saving of events_cleared and other state
  - cdc_acm: Fix oops when Droids MuIn LCD is connected
  - cx88: Fix conversion from BKL to fine-grained locks (Closes: #619827)
  - keys: Set cred->user_ns in key_replace_session_keyring (CVE-2011-2184)
  - tmpfs: fix race between truncate and writepage
  - nfs41: Correct offset for LAYOUTCOMMIT
  - xen/mmu: fix a race window causing leave_mm BUG()
  - ext4: fix possible use-after-free in ext4_remove_li_request()
  For the complete list of changes, see:
   http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.39.1
* Bump ABI to 2
* netfilter: Enable IP_SET, IP_SET_BITMAP_IP, IP_SET_BITMAP_IPMAC,
  IP_SET_BITMAP_PORT, IP_SET_HASH_IP, IP_SET_HASH_IPPORT,
  IP_SET_HASH_IPPORTIP, IP_SET_HASH_IPPORTNET, IP_SET_HASH_NET,
  IP_SET_HASH_NETPORT, IP_SET_LIST_SET, NETFILTER_XT_SET as modules
  (Closes: #629401)

[ Aurelien Jarno ]
* [mipsel/loongson-2f] Disable_SCSI_LPFC to workaround GCC ICE.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
1
/*
2
 
 * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
 
2
 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
3
3
 *
4
4
 * Marek Lindner, Simon Wunderlich
5
5
 *
44
44
        if (bat_priv->orig_hash)
45
45
                return 1;
46
46
 
47
 
        spin_lock_bh(&bat_priv->orig_hash_lock);
48
47
        bat_priv->orig_hash = hash_new(1024);
49
48
 
50
49
        if (!bat_priv->orig_hash)
51
50
                goto err;
52
51
 
53
 
        spin_unlock_bh(&bat_priv->orig_hash_lock);
54
52
        start_purge_timer(bat_priv);
55
53
        return 1;
56
54
 
57
55
err:
58
 
        spin_unlock_bh(&bat_priv->orig_hash_lock);
59
56
        return 0;
60
57
}
61
58
 
62
 
struct neigh_node *
63
 
create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
64
 
                uint8_t *neigh, struct batman_if *if_incoming)
 
59
static void neigh_node_free_rcu(struct rcu_head *rcu)
 
60
{
 
61
        struct neigh_node *neigh_node;
 
62
 
 
63
        neigh_node = container_of(rcu, struct neigh_node, rcu);
 
64
        kfree(neigh_node);
 
65
}
 
66
 
 
67
void neigh_node_free_ref(struct neigh_node *neigh_node)
 
68
{
 
69
        if (atomic_dec_and_test(&neigh_node->refcount))
 
70
                call_rcu(&neigh_node->rcu, neigh_node_free_rcu);
 
71
}
 
72
 
 
73
struct neigh_node *create_neighbor(struct orig_node *orig_node,
 
74
                                   struct orig_node *orig_neigh_node,
 
75
                                   uint8_t *neigh,
 
76
                                   struct hard_iface *if_incoming)
65
77
{
66
78
        struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
67
79
        struct neigh_node *neigh_node;
73
85
        if (!neigh_node)
74
86
                return NULL;
75
87
 
76
 
        INIT_LIST_HEAD(&neigh_node->list);
 
88
        INIT_HLIST_NODE(&neigh_node->list);
 
89
        INIT_LIST_HEAD(&neigh_node->bonding_list);
77
90
 
78
91
        memcpy(neigh_node->addr, neigh, ETH_ALEN);
79
92
        neigh_node->orig_node = orig_neigh_node;
80
93
        neigh_node->if_incoming = if_incoming;
81
94
 
82
 
        list_add_tail(&neigh_node->list, &orig_node->neigh_list);
 
95
        /* extra reference for return */
 
96
        atomic_set(&neigh_node->refcount, 2);
 
97
 
 
98
        spin_lock_bh(&orig_node->neigh_list_lock);
 
99
        hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
 
100
        spin_unlock_bh(&orig_node->neigh_list_lock);
83
101
        return neigh_node;
84
102
}
85
103
 
86
 
static void free_orig_node(void *data, void *arg)
 
104
static void orig_node_free_rcu(struct rcu_head *rcu)
87
105
{
88
 
        struct list_head *list_pos, *list_pos_tmp;
89
 
        struct neigh_node *neigh_node;
90
 
        struct orig_node *orig_node = (struct orig_node *)data;
91
 
        struct bat_priv *bat_priv = (struct bat_priv *)arg;
 
106
        struct hlist_node *node, *node_tmp;
 
107
        struct neigh_node *neigh_node, *tmp_neigh_node;
 
108
        struct orig_node *orig_node;
 
109
 
 
110
        orig_node = container_of(rcu, struct orig_node, rcu);
 
111
 
 
112
        spin_lock_bh(&orig_node->neigh_list_lock);
 
113
 
 
114
        /* for all bonding members ... */
 
115
        list_for_each_entry_safe(neigh_node, tmp_neigh_node,
 
116
                                 &orig_node->bond_list, bonding_list) {
 
117
                list_del_rcu(&neigh_node->bonding_list);
 
118
                neigh_node_free_ref(neigh_node);
 
119
        }
92
120
 
93
121
        /* for all neighbors towards this originator ... */
94
 
        list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
95
 
                neigh_node = list_entry(list_pos, struct neigh_node, list);
96
 
 
97
 
                list_del(list_pos);
98
 
                kfree(neigh_node);
 
122
        hlist_for_each_entry_safe(neigh_node, node, node_tmp,
 
123
                                  &orig_node->neigh_list, list) {
 
124
                hlist_del_rcu(&neigh_node->list);
 
125
                neigh_node_free_ref(neigh_node);
99
126
        }
100
127
 
 
128
        spin_unlock_bh(&orig_node->neigh_list_lock);
 
129
 
101
130
        frag_list_free(&orig_node->frag_list);
102
 
        hna_global_del_orig(bat_priv, orig_node, "originator timed out");
 
131
        hna_global_del_orig(orig_node->bat_priv, orig_node,
 
132
                            "originator timed out");
103
133
 
104
134
        kfree(orig_node->bcast_own);
105
135
        kfree(orig_node->bcast_own_sum);
106
136
        kfree(orig_node);
107
137
}
108
138
 
 
139
void orig_node_free_ref(struct orig_node *orig_node)
 
140
{
 
141
        if (atomic_dec_and_test(&orig_node->refcount))
 
142
                call_rcu(&orig_node->rcu, orig_node_free_rcu);
 
143
}
 
144
 
109
145
void originator_free(struct bat_priv *bat_priv)
110
146
{
111
 
        if (!bat_priv->orig_hash)
 
147
        struct hashtable_t *hash = bat_priv->orig_hash;
 
148
        struct hlist_node *node, *node_tmp;
 
149
        struct hlist_head *head;
 
150
        spinlock_t *list_lock; /* spinlock to protect write access */
 
151
        struct orig_node *orig_node;
 
152
        int i;
 
153
 
 
154
        if (!hash)
112
155
                return;
113
156
 
114
157
        cancel_delayed_work_sync(&bat_priv->orig_work);
115
158
 
116
 
        spin_lock_bh(&bat_priv->orig_hash_lock);
117
 
        hash_delete(bat_priv->orig_hash, free_orig_node, bat_priv);
118
159
        bat_priv->orig_hash = NULL;
119
 
        spin_unlock_bh(&bat_priv->orig_hash_lock);
 
160
 
 
161
        for (i = 0; i < hash->size; i++) {
 
162
                head = &hash->table[i];
 
163
                list_lock = &hash->list_locks[i];
 
164
 
 
165
                spin_lock_bh(list_lock);
 
166
                hlist_for_each_entry_safe(orig_node, node, node_tmp,
 
167
                                          head, hash_entry) {
 
168
 
 
169
                        hlist_del_rcu(node);
 
170
                        orig_node_free_ref(orig_node);
 
171
                }
 
172
                spin_unlock_bh(list_lock);
 
173
        }
 
174
 
 
175
        hash_destroy(hash);
120
176
}
121
177
 
122
178
/* this function finds or creates an originator entry for the given
127
183
        int size;
128
184
        int hash_added;
129
185
 
130
 
        orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
131
 
                                                   compare_orig, choose_orig,
132
 
                                                   addr));
133
 
 
 
186
        orig_node = orig_hash_find(bat_priv, addr);
134
187
        if (orig_node)
135
188
                return orig_node;
136
189
 
141
194
        if (!orig_node)
142
195
                return NULL;
143
196
 
144
 
        INIT_LIST_HEAD(&orig_node->neigh_list);
145
 
 
 
197
        INIT_HLIST_HEAD(&orig_node->neigh_list);
 
198
        INIT_LIST_HEAD(&orig_node->bond_list);
 
199
        spin_lock_init(&orig_node->ogm_cnt_lock);
 
200
        spin_lock_init(&orig_node->bcast_seqno_lock);
 
201
        spin_lock_init(&orig_node->neigh_list_lock);
 
202
 
 
203
        /* extra reference for return */
 
204
        atomic_set(&orig_node->refcount, 2);
 
205
 
 
206
        orig_node->bat_priv = bat_priv;
146
207
        memcpy(orig_node->orig, addr, ETH_ALEN);
147
208
        orig_node->router = NULL;
148
209
        orig_node->hna_buff = NULL;
151
212
        orig_node->batman_seqno_reset = jiffies - 1
152
213
                                        - msecs_to_jiffies(RESET_PROTECTION_MS);
153
214
 
 
215
        atomic_set(&orig_node->bond_candidates, 0);
 
216
 
154
217
        size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS;
155
218
 
156
219
        orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
166
229
        if (!orig_node->bcast_own_sum)
167
230
                goto free_bcast_own;
168
231
 
169
 
        hash_added = hash_add(bat_priv->orig_hash, compare_orig, choose_orig,
170
 
                              orig_node);
 
232
        hash_added = hash_add(bat_priv->orig_hash, compare_orig,
 
233
                              choose_orig, orig_node, &orig_node->hash_entry);
171
234
        if (hash_added < 0)
172
235
                goto free_bcast_own_sum;
173
236
 
185
248
                                 struct orig_node *orig_node,
186
249
                                 struct neigh_node **best_neigh_node)
187
250
{
188
 
        struct list_head *list_pos, *list_pos_tmp;
 
251
        struct hlist_node *node, *node_tmp;
189
252
        struct neigh_node *neigh_node;
190
253
        bool neigh_purged = false;
191
254
 
192
255
        *best_neigh_node = NULL;
193
256
 
 
257
        spin_lock_bh(&orig_node->neigh_list_lock);
 
258
 
194
259
        /* for all neighbors towards this originator ... */
195
 
        list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
196
 
                neigh_node = list_entry(list_pos, struct neigh_node, list);
 
260
        hlist_for_each_entry_safe(neigh_node, node, node_tmp,
 
261
                                  &orig_node->neigh_list, list) {
197
262
 
198
263
                if ((time_after(jiffies,
199
264
                        neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
200
265
                    (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
 
266
                    (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
201
267
                    (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
202
268
 
203
 
                        if (neigh_node->if_incoming->if_status ==
204
 
                                                        IF_TO_BE_REMOVED)
 
269
                        if ((neigh_node->if_incoming->if_status ==
 
270
                                                                IF_INACTIVE) ||
 
271
                            (neigh_node->if_incoming->if_status ==
 
272
                                                        IF_NOT_IN_USE) ||
 
273
                            (neigh_node->if_incoming->if_status ==
 
274
                                                        IF_TO_BE_REMOVED))
205
275
                                bat_dbg(DBG_BATMAN, bat_priv,
206
276
                                        "neighbor purge: originator %pM, "
207
277
                                        "neighbor: %pM, iface: %s\n",
215
285
                                        (neigh_node->last_valid / HZ));
216
286
 
217
287
                        neigh_purged = true;
218
 
                        list_del(list_pos);
219
 
                        kfree(neigh_node);
 
288
 
 
289
                        hlist_del_rcu(&neigh_node->list);
 
290
                        bonding_candidate_del(orig_node, neigh_node);
 
291
                        neigh_node_free_ref(neigh_node);
220
292
                } else {
221
293
                        if ((!*best_neigh_node) ||
222
294
                            (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
223
295
                                *best_neigh_node = neigh_node;
224
296
                }
225
297
        }
 
298
 
 
299
        spin_unlock_bh(&orig_node->neigh_list_lock);
226
300
        return neigh_purged;
227
301
}
228
302
 
245
319
                                      best_neigh_node,
246
320
                                      orig_node->hna_buff,
247
321
                                      orig_node->hna_buff_len);
248
 
                        /* update bonding candidates, we could have lost
249
 
                         * some candidates. */
250
 
                        update_bonding_candidates(bat_priv, orig_node);
251
322
                }
252
323
        }
253
324
 
257
328
static void _purge_orig(struct bat_priv *bat_priv)
258
329
{
259
330
        struct hashtable_t *hash = bat_priv->orig_hash;
260
 
        struct hlist_node *walk, *safe;
 
331
        struct hlist_node *node, *node_tmp;
261
332
        struct hlist_head *head;
262
 
        struct element_t *bucket;
 
333
        spinlock_t *list_lock; /* spinlock to protect write access */
263
334
        struct orig_node *orig_node;
264
335
        int i;
265
336
 
266
337
        if (!hash)
267
338
                return;
268
339
 
269
 
        spin_lock_bh(&bat_priv->orig_hash_lock);
270
 
 
271
340
        /* for all origins... */
272
341
        for (i = 0; i < hash->size; i++) {
273
342
                head = &hash->table[i];
274
 
 
275
 
                hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
276
 
                        orig_node = bucket->data;
277
 
 
 
343
                list_lock = &hash->list_locks[i];
 
344
 
 
345
                spin_lock_bh(list_lock);
 
346
                hlist_for_each_entry_safe(orig_node, node, node_tmp,
 
347
                                          head, hash_entry) {
278
348
                        if (purge_orig_node(bat_priv, orig_node)) {
279
349
                                if (orig_node->gw_flags)
280
350
                                        gw_node_delete(bat_priv, orig_node);
281
 
                                hlist_del(walk);
282
 
                                kfree(bucket);
283
 
                                free_orig_node(orig_node, bat_priv);
 
351
                                hlist_del_rcu(node);
 
352
                                orig_node_free_ref(orig_node);
 
353
                                continue;
284
354
                        }
285
355
 
286
356
                        if (time_after(jiffies, orig_node->last_frag_packet +
287
357
                                                msecs_to_jiffies(FRAG_TIMEOUT)))
288
358
                                frag_list_free(&orig_node->frag_list);
289
359
                }
 
360
                spin_unlock_bh(list_lock);
290
361
        }
291
362
 
292
 
        spin_unlock_bh(&bat_priv->orig_hash_lock);
293
 
 
294
363
        gw_node_purge(bat_priv);
295
364
        gw_election(bat_priv);
296
365
 
318
387
        struct net_device *net_dev = (struct net_device *)seq->private;
319
388
        struct bat_priv *bat_priv = netdev_priv(net_dev);
320
389
        struct hashtable_t *hash = bat_priv->orig_hash;
321
 
        struct hlist_node *walk;
 
390
        struct hlist_node *node, *node_tmp;
322
391
        struct hlist_head *head;
323
 
        struct element_t *bucket;
324
392
        struct orig_node *orig_node;
325
393
        struct neigh_node *neigh_node;
326
394
        int batman_count = 0;
348
416
                   "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
349
417
                   "outgoingIF", "Potential nexthops");
350
418
 
351
 
        spin_lock_bh(&bat_priv->orig_hash_lock);
352
 
 
353
419
        for (i = 0; i < hash->size; i++) {
354
420
                head = &hash->table[i];
355
421
 
356
 
                hlist_for_each_entry(bucket, walk, head, hlist) {
357
 
                        orig_node = bucket->data;
358
 
 
 
422
                rcu_read_lock();
 
423
                hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
359
424
                        if (!orig_node->router)
360
425
                                continue;
361
426
 
374
439
                                   neigh_node->addr,
375
440
                                   neigh_node->if_incoming->net_dev->name);
376
441
 
377
 
                        list_for_each_entry(neigh_node, &orig_node->neigh_list,
378
 
                                            list) {
 
442
                        hlist_for_each_entry_rcu(neigh_node, node_tmp,
 
443
                                                 &orig_node->neigh_list, list) {
379
444
                                seq_printf(seq, " %pM (%3i)", neigh_node->addr,
380
445
                                                neigh_node->tq_avg);
381
446
                        }
383
448
                        seq_printf(seq, "\n");
384
449
                        batman_count++;
385
450
                }
 
451
                rcu_read_unlock();
386
452
        }
387
453
 
388
 
        spin_unlock_bh(&bat_priv->orig_hash_lock);
389
 
 
390
454
        if ((batman_count == 0))
391
455
                seq_printf(seq, "No batman nodes in range ...\n");
392
456
 
423
487
        return 0;
424
488
}
425
489
 
426
 
int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
 
490
int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num)
427
491
{
428
 
        struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
 
492
        struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
429
493
        struct hashtable_t *hash = bat_priv->orig_hash;
430
 
        struct hlist_node *walk;
 
494
        struct hlist_node *node;
431
495
        struct hlist_head *head;
432
 
        struct element_t *bucket;
433
496
        struct orig_node *orig_node;
434
 
        int i;
 
497
        int i, ret;
435
498
 
436
499
        /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
437
500
         * if_num */
438
 
        spin_lock_bh(&bat_priv->orig_hash_lock);
439
 
 
440
501
        for (i = 0; i < hash->size; i++) {
441
502
                head = &hash->table[i];
442
503
 
443
 
                hlist_for_each_entry(bucket, walk, head, hlist) {
444
 
                        orig_node = bucket->data;
 
504
                rcu_read_lock();
 
505
                hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
 
506
                        spin_lock_bh(&orig_node->ogm_cnt_lock);
 
507
                        ret = orig_node_add_if(orig_node, max_if_num);
 
508
                        spin_unlock_bh(&orig_node->ogm_cnt_lock);
445
509
 
446
 
                        if (orig_node_add_if(orig_node, max_if_num) == -1)
 
510
                        if (ret == -1)
447
511
                                goto err;
448
512
                }
 
513
                rcu_read_unlock();
449
514
        }
450
515
 
451
 
        spin_unlock_bh(&bat_priv->orig_hash_lock);
452
516
        return 0;
453
517
 
454
518
err:
455
 
        spin_unlock_bh(&bat_priv->orig_hash_lock);
 
519
        rcu_read_unlock();
456
520
        return -ENOMEM;
457
521
}
458
522
 
508
572
        return 0;
509
573
}
510
574
 
511
 
int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
 
575
int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
512
576
{
513
 
        struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
 
577
        struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
514
578
        struct hashtable_t *hash = bat_priv->orig_hash;
515
 
        struct hlist_node *walk;
 
579
        struct hlist_node *node;
516
580
        struct hlist_head *head;
517
 
        struct element_t *bucket;
518
 
        struct batman_if *batman_if_tmp;
 
581
        struct hard_iface *hard_iface_tmp;
519
582
        struct orig_node *orig_node;
520
583
        int i, ret;
521
584
 
522
585
        /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
523
586
         * if_num */
524
 
        spin_lock_bh(&bat_priv->orig_hash_lock);
525
 
 
526
587
        for (i = 0; i < hash->size; i++) {
527
588
                head = &hash->table[i];
528
589
 
529
 
                hlist_for_each_entry(bucket, walk, head, hlist) {
530
 
                        orig_node = bucket->data;
531
 
 
 
590
                rcu_read_lock();
 
591
                hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
 
592
                        spin_lock_bh(&orig_node->ogm_cnt_lock);
532
593
                        ret = orig_node_del_if(orig_node, max_if_num,
533
 
                                        batman_if->if_num);
 
594
                                        hard_iface->if_num);
 
595
                        spin_unlock_bh(&orig_node->ogm_cnt_lock);
534
596
 
535
597
                        if (ret == -1)
536
598
                                goto err;
537
599
                }
 
600
                rcu_read_unlock();
538
601
        }
539
602
 
540
603
        /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
541
604
        rcu_read_lock();
542
 
        list_for_each_entry_rcu(batman_if_tmp, &if_list, list) {
543
 
                if (batman_if_tmp->if_status == IF_NOT_IN_USE)
544
 
                        continue;
545
 
 
546
 
                if (batman_if == batman_if_tmp)
547
 
                        continue;
548
 
 
549
 
                if (batman_if->soft_iface != batman_if_tmp->soft_iface)
550
 
                        continue;
551
 
 
552
 
                if (batman_if_tmp->if_num > batman_if->if_num)
553
 
                        batman_if_tmp->if_num--;
 
605
        list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) {
 
606
                if (hard_iface_tmp->if_status == IF_NOT_IN_USE)
 
607
                        continue;
 
608
 
 
609
                if (hard_iface == hard_iface_tmp)
 
610
                        continue;
 
611
 
 
612
                if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
 
613
                        continue;
 
614
 
 
615
                if (hard_iface_tmp->if_num > hard_iface->if_num)
 
616
                        hard_iface_tmp->if_num--;
554
617
        }
555
618
        rcu_read_unlock();
556
619
 
557
 
        batman_if->if_num = -1;
558
 
        spin_unlock_bh(&bat_priv->orig_hash_lock);
 
620
        hard_iface->if_num = -1;
559
621
        return 0;
560
622
 
561
623
err:
562
 
        spin_unlock_bh(&bat_priv->orig_hash_lock);
 
624
        rcu_read_unlock();
563
625
        return -ENOMEM;
564
626
}