~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise

« back to all changes in this revision

Viewing changes to net/netfilter/nf_queue.c

  • Committer: Bazaar Package Importer
  • Author(s): Paolo Pisati
  • Date: 2011-06-29 15:23:51 UTC
  • mfrom: (26.1.1 natty-proposed)
  • Revision ID: james.westby@ubuntu.com-20110629152351-xs96tm303d95rpbk
Tags: 3.0.0-1200.2
* Rebased against 3.0.0-6.7
* BSP from TI based on 3.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
27
27
int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
28
28
{
29
29
        int ret;
 
30
        const struct nf_queue_handler *old;
30
31
 
31
32
        if (pf >= ARRAY_SIZE(queue_handler))
32
33
                return -EINVAL;
33
34
 
34
35
        mutex_lock(&queue_handler_mutex);
35
 
        if (queue_handler[pf] == qh)
 
36
        old = rcu_dereference_protected(queue_handler[pf],
 
37
                                        lockdep_is_held(&queue_handler_mutex));
 
38
        if (old == qh)
36
39
                ret = -EEXIST;
37
 
        else if (queue_handler[pf])
 
40
        else if (old)
38
41
                ret = -EBUSY;
39
42
        else {
40
43
                rcu_assign_pointer(queue_handler[pf], qh);
49
52
/* The caller must flush their queue before this */
50
53
int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
51
54
{
 
55
        const struct nf_queue_handler *old;
 
56
 
52
57
        if (pf >= ARRAY_SIZE(queue_handler))
53
58
                return -EINVAL;
54
59
 
55
60
        mutex_lock(&queue_handler_mutex);
56
 
        if (queue_handler[pf] && queue_handler[pf] != qh) {
 
61
        old = rcu_dereference_protected(queue_handler[pf],
 
62
                                        lockdep_is_held(&queue_handler_mutex));
 
63
        if (old && old != qh) {
57
64
                mutex_unlock(&queue_handler_mutex);
58
65
                return -EINVAL;
59
66
        }
73
80
 
74
81
        mutex_lock(&queue_handler_mutex);
75
82
        for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++)  {
76
 
                if (queue_handler[pf] == qh)
 
83
                if (rcu_dereference_protected(
 
84
                                queue_handler[pf],
 
85
                                lockdep_is_held(&queue_handler_mutex)
 
86
                                ) == qh)
77
87
                        rcu_assign_pointer(queue_handler[pf], NULL);
78
88
        }
79
89
        mutex_unlock(&queue_handler_mutex);
115
125
                      int (*okfn)(struct sk_buff *),
116
126
                      unsigned int queuenum)
117
127
{
118
 
        int status;
 
128
        int status = -ENOENT;
119
129
        struct nf_queue_entry *entry = NULL;
120
130
#ifdef CONFIG_BRIDGE_NETFILTER
121
131
        struct net_device *physindev;
124
134
        const struct nf_afinfo *afinfo;
125
135
        const struct nf_queue_handler *qh;
126
136
 
127
 
        /* QUEUE == DROP if noone is waiting, to be safe. */
 
137
        /* QUEUE == DROP if no one is waiting, to be safe. */
128
138
        rcu_read_lock();
129
139
 
130
140
        qh = rcu_dereference(queue_handler[pf]);
131
 
        if (!qh)
 
141
        if (!qh) {
 
142
                status = -ESRCH;
132
143
                goto err_unlock;
 
144
        }
133
145
 
134
146
        afinfo = nf_get_afinfo(pf);
135
147
        if (!afinfo)
136
148
                goto err_unlock;
137
149
 
138
150
        entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
139
 
        if (!entry)
 
151
        if (!entry) {
 
152
                status = -ENOMEM;
140
153
                goto err_unlock;
 
154
        }
141
155
 
142
156
        *entry = (struct nf_queue_entry) {
143
157
                .skb    = skb,
151
165
 
152
166
        /* If it's going away, ignore hook. */
153
167
        if (!try_module_get(entry->elem->owner)) {
154
 
                rcu_read_unlock();
155
 
                kfree(entry);
156
 
                return 0;
 
168
                status = -ECANCELED;
 
169
                goto err_unlock;
157
170
        }
158
 
 
159
171
        /* Bump dev refs so they don't vanish while packet is out */
160
172
        if (indev)
161
173
                dev_hold(indev);
182
194
                goto err;
183
195
        }
184
196
 
185
 
        return 1;
 
197
        return 0;
186
198
 
187
199
err_unlock:
188
200
        rcu_read_unlock();
189
201
err:
190
 
        kfree_skb(skb);
191
202
        kfree(entry);
192
 
        return 1;
 
203
        return status;
193
204
}
194
205
 
195
206
int nf_queue(struct sk_buff *skb,
201
212
             unsigned int queuenum)
202
213
{
203
214
        struct sk_buff *segs;
 
215
        int err;
 
216
        unsigned int queued;
204
217
 
205
218
        if (!skb_is_gso(skb))
206
219
                return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
216
229
        }
217
230
 
218
231
        segs = skb_gso_segment(skb, 0);
219
 
        kfree_skb(skb);
 
232
        /* Does not use PTR_ERR to limit the number of error codes that can be
 
233
         * returned by nf_queue.  For instance, callers rely on -ECANCELED to mean
 
234
         * 'ignore this hook'.
 
235
         */
220
236
        if (IS_ERR(segs))
221
 
                return 1;
 
237
                return -EINVAL;
222
238
 
 
239
        queued = 0;
 
240
        err = 0;
223
241
        do {
224
242
                struct sk_buff *nskb = segs->next;
225
243
 
226
244
                segs->next = NULL;
227
 
                if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn,
228
 
                                queuenum))
 
245
                if (err == 0)
 
246
                        err = __nf_queue(segs, elem, pf, hook, indev,
 
247
                                           outdev, okfn, queuenum);
 
248
                if (err == 0)
 
249
                        queued++;
 
250
                else
229
251
                        kfree_skb(segs);
230
252
                segs = nskb;
231
253
        } while (segs);
232
 
        return 1;
 
254
 
 
255
        /* also free orig skb if only some segments were queued */
 
256
        if (unlikely(err && queued))
 
257
                err = 0;
 
258
        if (err == 0)
 
259
                kfree_skb(skb);
 
260
        return err;
233
261
}
234
262
 
235
263
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
237
265
        struct sk_buff *skb = entry->skb;
238
266
        struct list_head *elem = &entry->elem->list;
239
267
        const struct nf_afinfo *afinfo;
 
268
        int err;
240
269
 
241
270
        rcu_read_lock();
242
271
 
270
299
                local_bh_enable();
271
300
                break;
272
301
        case NF_QUEUE:
273
 
                if (!__nf_queue(skb, elem, entry->pf, entry->hook,
274
 
                                entry->indev, entry->outdev, entry->okfn,
275
 
                                verdict >> NF_VERDICT_BITS))
276
 
                        goto next_hook;
 
302
                err = __nf_queue(skb, elem, entry->pf, entry->hook,
 
303
                                 entry->indev, entry->outdev, entry->okfn,
 
304
                                 verdict >> NF_VERDICT_QBITS);
 
305
                if (err < 0) {
 
306
                        if (err == -ECANCELED)
 
307
                                goto next_hook;
 
308
                        if (err == -ESRCH &&
 
309
                           (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
 
310
                                goto next_hook;
 
311
                        kfree_skb(skb);
 
312
                }
277
313
                break;
278
314
        case NF_STOLEN:
279
315
        default: