1
// Copyright 2013 The Go Authors. All rights reserved.
2
// Use of this source code is governed by a BSD-style
3
// license that can be found in the LICENSE file.
10
#include "defs_GOOS_GOARCH.h"
11
#include "arch_GOARCH.h"
14
// Integrated network poller (platform-independent part).
15
// A particular implementation (epoll/kqueue) must define the following functions:
16
// void runtime·netpollinit(void); // to initialize the poller
17
// int32 runtime·netpollopen(int32 fd, PollDesc *pd); // to arm edge-triggered notifications
18
// and associate fd with pd.
19
// An implementation must call the following function to denote that the pd is ready.
20
// void runtime·netpollready(G **gpp, PollDesc *pd, int32 mode);
26
PollDesc* link; // in pollcache, protected by pollcache.Lock
27
Lock; // protectes the following fields
30
uintptr seq; // protects from stale timers and ready notifications
31
G* rg; // G waiting for read or READY (binary semaphore)
32
Timer rt; // read deadline timer (set if rt.fv != nil)
33
int64 rd; // read deadline
34
G* wg; // the same for writes
43
// PollDesc objects must be type-stable,
44
// because we can get ready notification from epoll/kqueue
45
// after the descriptor is closed/reused.
46
// Stale notifications are detected using seq variable,
47
// seq is incremented when deadlines are changed or descriptor is reused.
50
static void netpollblock(PollDesc*, int32);
51
static G* netpollunblock(PollDesc*, int32);
52
static void deadline(int64, Eface);
53
static void readDeadline(int64, Eface);
54
static void writeDeadline(int64, Eface);
55
static PollDesc* allocPollDesc(void);
56
static intgo checkerr(PollDesc *pd, int32 mode);
58
static FuncVal deadlineFn = {(void(*)(void))deadline};
59
static FuncVal readDeadlineFn = {(void(*)(void))readDeadline};
60
static FuncVal writeDeadlineFn = {(void(*)(void))writeDeadline};
62
func runtime_pollServerInit() {
63
runtime·netpollinit();
66
func runtime_pollOpen(fd int) (pd *PollDesc, errno int) {
69
if(pd->wg != nil && pd->wg != READY)
70
runtime·throw("runtime_pollOpen: blocked write on free descriptor");
71
if(pd->rg != nil && pd->rg != READY)
72
runtime·throw("runtime_pollOpen: blocked read on free descriptor");
82
errno = runtime·netpollopen(fd, pd);
85
func runtime_pollClose(pd *PollDesc) {
87
runtime·throw("runtime_pollClose: close w/o unblock");
88
if(pd->wg != nil && pd->wg != READY)
89
runtime·throw("runtime_pollClose: blocked write on closing descriptor");
90
if(pd->rg != nil && pd->rg != READY)
91
runtime·throw("runtime_pollClose: blocked read on closing descriptor");
92
runtime·netpollclose(pd->fd);
93
runtime·lock(&pollcache);
94
pd->link = pollcache.first;
96
runtime·unlock(&pollcache);
99
func runtime_pollReset(pd *PollDesc, mode int) (err int) {
101
err = checkerr(pd, mode);
112
func runtime_pollWait(pd *PollDesc, mode int) (err int) {
114
err = checkerr(pd, mode);
117
netpollblock(pd, mode);
118
err = checkerr(pd, mode);
123
func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
127
pd->seq++; // invalidate current timers
128
// Reset current timers.
130
runtime·deltimer(&pd->rt);
134
runtime·deltimer(&pd->wt);
138
if(d != 0 && d <= runtime·nanotime()) {
141
if(mode == 'r' || mode == 'r'+'w')
143
if(mode == 'w' || mode == 'r'+'w')
145
if(pd->rd > 0 && pd->rd == pd->wd) {
146
pd->rt.fv = &deadlineFn;
147
pd->rt.when = pd->rd;
148
// Copy current seq into the timer arg.
149
// Timer func will check the seq against current descriptor seq,
150
// if they differ the descriptor was reused or timers were reset.
151
pd->rt.arg.type = (Type*)pd->seq;
152
pd->rt.arg.data = pd;
153
runtime·addtimer(&pd->rt);
156
pd->rt.fv = &readDeadlineFn;
157
pd->rt.when = pd->rd;
158
pd->rt.arg.type = (Type*)pd->seq;
159
pd->rt.arg.data = pd;
160
runtime·addtimer(&pd->rt);
163
pd->wt.fv = &writeDeadlineFn;
164
pd->wt.when = pd->wd;
165
pd->wt.arg.type = (Type*)pd->seq;
166
pd->wt.arg.data = pd;
167
runtime·addtimer(&pd->wt);
174
func runtime_pollUnblock(pd *PollDesc) {
179
runtime·throw("runtime_pollUnblock: already closing");
182
rg = netpollunblock(pd, 'r');
183
wg = netpollunblock(pd, 'w');
185
runtime·deltimer(&pd->rt);
189
runtime·deltimer(&pd->wt);
199
// make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
201
runtime·netpollready(G **gpp, PollDesc *pd, int32 mode)
207
if(mode == 'r' || mode == 'r'+'w')
208
rg = netpollunblock(pd, 'r');
209
if(mode == 'w' || mode == 'r'+'w')
210
wg = netpollunblock(pd, 'w');
213
rg->schedlink = *gpp;
217
wg->schedlink = *gpp;
223
checkerr(PollDesc *pd, int32 mode)
226
return 1; // errClosing
227
if((mode == 'r' && pd->rd < 0) || (mode == 'w' && pd->wd < 0))
228
return 2; // errTimeout
233
netpollblock(PollDesc *pd, int32 mode)
245
runtime·throw("epoll: double wait");
247
runtime·park(runtime·unlock, &pd->Lock, "IO wait");
252
netpollunblock(PollDesc *pd, int32 mode)
271
deadlineimpl(int64 now, Eface arg, bool read, bool write)
278
pd = (PollDesc*)arg.data;
279
// This is the seq when the timer was set.
280
// If it's stale, ignore the timer event.
281
seq = (uintptr)arg.type;
285
// The descriptor was reused or timers were reset.
290
if(pd->rd <= 0 || pd->rt.fv == nil)
291
runtime·throw("deadlineimpl: inconsistent read deadline");
294
rg = netpollunblock(pd, 'r');
297
if(pd->wd <= 0 || (pd->wt.fv == nil && !read))
298
runtime·throw("deadlineimpl: inconsistent write deadline");
301
wg = netpollunblock(pd, 'w');
311
deadline(int64 now, Eface arg)
313
deadlineimpl(now, arg, true, true);
317
readDeadline(int64 now, Eface arg)
319
deadlineimpl(now, arg, true, false);
323
writeDeadline(int64 now, Eface arg)
325
deadlineimpl(now, arg, false, true);
334
runtime·lock(&pollcache);
335
if(pollcache.first == nil) {
336
n = PageSize/sizeof(*pd);
339
// Must be in non-GC memory because can be referenced
340
// only from epoll/kqueue internals.
341
pd = runtime·SysAlloc(n*sizeof(*pd));
342
for(i = 0; i < n; i++) {
343
pd[i].link = pollcache.first;
344
pollcache.first = &pd[i];
347
pd = pollcache.first;
348
pollcache.first = pd->link;
349
runtime·unlock(&pollcache);