2
* Copyright (c) 2007 Oracle. All rights reserved.
4
* This software is available to you under a choice of one of two
5
* licenses. You may choose to be licensed under the terms of the GNU
6
* General Public License (GPL) Version 2, available from the file
7
* COPYING in the main directory of this source tree, or the
8
* OpenIB.org BSD license below:
10
* Redistribution and use in source and binary forms, with or
11
* without modification, are permitted provided that the following
14
* - Redistributions of source code must retain the above
15
* copyright notice, this list of conditions and the following
18
* - Redistributions in binary form must reproduce the above
19
* copyright notice, this list of conditions and the following
20
* disclaimer in the documentation and/or other materials
21
* provided with the distribution.
23
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33
#include <linux/types.h>
34
#include <linux/rbtree.h>
36
#include <asm-generic/bitops/le.h>
41
* This file implements the receive side of the unconventional congestion
44
* Messages waiting in the receive queue on the receiving socket are accounted
45
* against the sockets SO_RCVBUF option value. Only the payload bytes in the
46
* message are accounted for. If the number of bytes queued equals or exceeds
47
* rcvbuf then the socket is congested. All sends attempted to this socket's
48
* address should return block or return -EWOULDBLOCK.
50
* Applications are expected to be reasonably tuned such that this situation
51
* very rarely occurs. An application encountering this "back-pressure" is
54
* This is implemented by having each node maintain bitmaps which indicate
55
* which ports on bound addresses are congested. As the bitmap changes it is
56
* sent through all the connections which terminate in the local address of the
57
* bitmap which changed.
59
* The bitmaps are allocated as connections are brought up. This avoids
60
* allocation in the interrupt handling path which queues messages on sockets.
61
* The dense bitmaps let transports send the entire bitmap on any bitmap change
62
* reasonably efficiently. This is much easier to implement than some
63
* finer-grained communication of per-port congestion. The sender does a very
64
* inexpensive bit test to test if the port it's about to send to is congested
69
* Interaction with poll is a tad tricky. We want all processes stuck in
70
* poll to wake up and check whether a congested destination became uncongested.
71
* The really sad thing is we have no idea which destinations the application
72
* wants to send to - we don't even know which rds_connections are involved.
73
* So until we implement a more flexible rds poll interface, we have to make
75
* We maintain a global counter that is incremented each time a congestion map
76
* update is received. Each rds socket tracks this value, and if rds_poll
77
* finds that the saved generation number is smaller than the global generation
78
* number, it wakes up the process.
80
static atomic_t rds_cong_generation = ATOMIC_INIT(0);
83
* Congestion monitoring
85
static LIST_HEAD(rds_cong_monitor);
86
static DEFINE_RWLOCK(rds_cong_monitor_lock);
89
* Yes, a global lock. It's used so infrequently that it's worth keeping it
90
* global to simplify the locking. It's only used in the following
93
* - on connection buildup to associate a conn with its maps
94
* - on map changes to inform conns of a new map to send
96
* It's sadly ordered under the socket callback lock and the connection lock.
97
* Receive paths can mark ports congested from interrupt context so the
98
* lock masks interrupts.
100
static DEFINE_SPINLOCK(rds_cong_lock);
101
static struct rb_root rds_cong_tree = RB_ROOT;
103
static struct rds_cong_map *rds_cong_tree_walk(__be32 addr,
104
struct rds_cong_map *insert)
106
struct rb_node **p = &rds_cong_tree.rb_node;
107
struct rb_node *parent = NULL;
108
struct rds_cong_map *map;
112
map = rb_entry(parent, struct rds_cong_map, m_rb_node);
114
if (addr < map->m_addr)
116
else if (addr > map->m_addr)
123
rb_link_node(&insert->m_rb_node, parent, p);
124
rb_insert_color(&insert->m_rb_node, &rds_cong_tree);
130
* There is only ever one bitmap for any address. Connections try and allocate
131
* these bitmaps in the process getting pointers to them. The bitmaps are only
132
* ever freed as the module is removed after all connections have been freed.
134
static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
136
struct rds_cong_map *map;
137
struct rds_cong_map *ret = NULL;
142
map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL);
147
init_waitqueue_head(&map->m_waitq);
148
INIT_LIST_HEAD(&map->m_conn_list);
150
for (i = 0; i < RDS_CONG_MAP_PAGES; i++) {
151
zp = get_zeroed_page(GFP_KERNEL);
154
map->m_page_addrs[i] = zp;
157
spin_lock_irqsave(&rds_cong_lock, flags);
158
ret = rds_cong_tree_walk(addr, map);
159
spin_unlock_irqrestore(&rds_cong_lock, flags);
168
for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
169
free_page(map->m_page_addrs[i]);
173
rdsdebug("map %p for addr %x\n", ret, be32_to_cpu(addr));
179
* Put the conn on its local map's list. This is called when the conn is
180
* really added to the hash. It's nested under the rds_conn_lock, sadly.
182
void rds_cong_add_conn(struct rds_connection *conn)
186
rdsdebug("conn %p now on map %p\n", conn, conn->c_lcong);
187
spin_lock_irqsave(&rds_cong_lock, flags);
188
list_add_tail(&conn->c_map_item, &conn->c_lcong->m_conn_list);
189
spin_unlock_irqrestore(&rds_cong_lock, flags);
192
void rds_cong_remove_conn(struct rds_connection *conn)
196
rdsdebug("removing conn %p from map %p\n", conn, conn->c_lcong);
197
spin_lock_irqsave(&rds_cong_lock, flags);
198
list_del_init(&conn->c_map_item);
199
spin_unlock_irqrestore(&rds_cong_lock, flags);
202
int rds_cong_get_maps(struct rds_connection *conn)
204
conn->c_lcong = rds_cong_from_addr(conn->c_laddr);
205
conn->c_fcong = rds_cong_from_addr(conn->c_faddr);
207
if (conn->c_lcong == NULL || conn->c_fcong == NULL)
213
void rds_cong_queue_updates(struct rds_cong_map *map)
215
struct rds_connection *conn;
218
spin_lock_irqsave(&rds_cong_lock, flags);
220
list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
221
if (!test_and_set_bit(0, &conn->c_map_queued)) {
222
rds_stats_inc(s_cong_update_queued);
223
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
227
spin_unlock_irqrestore(&rds_cong_lock, flags);
230
void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
232
rdsdebug("waking map %p for %pI4\n",
234
rds_stats_inc(s_cong_update_received);
235
atomic_inc(&rds_cong_generation);
236
if (waitqueue_active(&map->m_waitq))
237
wake_up(&map->m_waitq);
238
if (waitqueue_active(&rds_poll_waitq))
239
wake_up_all(&rds_poll_waitq);
241
if (portmask && !list_empty(&rds_cong_monitor)) {
245
read_lock_irqsave(&rds_cong_monitor_lock, flags);
246
list_for_each_entry(rs, &rds_cong_monitor, rs_cong_list) {
247
spin_lock(&rs->rs_lock);
248
rs->rs_cong_notify |= (rs->rs_cong_mask & portmask);
249
rs->rs_cong_mask &= ~portmask;
250
spin_unlock(&rs->rs_lock);
251
if (rs->rs_cong_notify)
252
rds_wake_sk_sleep(rs);
254
read_unlock_irqrestore(&rds_cong_monitor_lock, flags);
258
int rds_cong_updated_since(unsigned long *recent)
260
unsigned long gen = atomic_read(&rds_cong_generation);
262
if (likely(*recent == gen))
269
* We're called under the locking that protects the sockets receive buffer
270
* consumption. This makes it a lot easier for the caller to only call us
271
* when it knows that an existing set bit needs to be cleared, and vice versa.
272
* We can't block and we need to deal with concurrent sockets working against
273
* the same per-address map.
275
void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
280
rdsdebug("setting congestion for %pI4:%u in map %p\n",
281
&map->m_addr, ntohs(port), map);
283
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
284
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
286
generic___set_le_bit(off, (void *)map->m_page_addrs[i]);
289
void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
294
rdsdebug("clearing congestion for %pI4:%u in map %p\n",
295
&map->m_addr, ntohs(port), map);
297
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
298
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
300
generic___clear_le_bit(off, (void *)map->m_page_addrs[i]);
303
static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
308
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
309
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
311
return generic_test_le_bit(off, (void *)map->m_page_addrs[i]);
314
void rds_cong_add_socket(struct rds_sock *rs)
318
write_lock_irqsave(&rds_cong_monitor_lock, flags);
319
if (list_empty(&rs->rs_cong_list))
320
list_add(&rs->rs_cong_list, &rds_cong_monitor);
321
write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
324
void rds_cong_remove_socket(struct rds_sock *rs)
327
struct rds_cong_map *map;
329
write_lock_irqsave(&rds_cong_monitor_lock, flags);
330
list_del_init(&rs->rs_cong_list);
331
write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
333
/* update congestion map for now-closed port */
334
spin_lock_irqsave(&rds_cong_lock, flags);
335
map = rds_cong_tree_walk(rs->rs_bound_addr, NULL);
336
spin_unlock_irqrestore(&rds_cong_lock, flags);
338
if (map && rds_cong_test_bit(map, rs->rs_bound_port)) {
339
rds_cong_clear_bit(map, rs->rs_bound_port);
340
rds_cong_queue_updates(map);
344
int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock,
347
if (!rds_cong_test_bit(map, port))
350
if (rs && rs->rs_cong_monitor) {
353
/* It would have been nice to have an atomic set_bit on
355
spin_lock_irqsave(&rs->rs_lock, flags);
356
rs->rs_cong_mask |= RDS_CONG_MONITOR_MASK(ntohs(port));
357
spin_unlock_irqrestore(&rs->rs_lock, flags);
359
/* Test again - a congestion update may have arrived in
361
if (!rds_cong_test_bit(map, port))
364
rds_stats_inc(s_cong_send_error);
368
rds_stats_inc(s_cong_send_blocked);
369
rdsdebug("waiting on map %p for port %u\n", map, be16_to_cpu(port));
371
return wait_event_interruptible(map->m_waitq,
372
!rds_cong_test_bit(map, port));
375
void rds_cong_exit(void)
377
struct rb_node *node;
378
struct rds_cong_map *map;
381
while ((node = rb_first(&rds_cong_tree))) {
382
map = rb_entry(node, struct rds_cong_map, m_rb_node);
383
rdsdebug("freeing map %p\n", map);
384
rb_erase(&map->m_rb_node, &rds_cong_tree);
385
for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
386
free_page(map->m_page_addrs[i]);
392
* Allocate a RDS message containing a congestion update.
394
struct rds_message *rds_cong_update_alloc(struct rds_connection *conn)
396
struct rds_cong_map *map = conn->c_lcong;
397
struct rds_message *rm;
399
rm = rds_message_map_pages(map->m_page_addrs, RDS_CONG_MAP_BYTES);
401
rm->m_inc.i_hdr.h_flags = RDS_FLAG_CONG_BITMAP;