3
"$Id: pvmdmimd.c,v 1.1 2004/01/14 17:34:27 pvmsrc Exp $";
6
* PVM version 3.4: Parallel Virtual Machine System
7
* University of Tennessee, Knoxville TN.
8
* Oak Ridge National Laboratory, Oak Ridge TN.
9
* Emory University, Atlanta GA.
10
* Authors: J. J. Dongarra, G. E. Fagg, M. Fischer
11
* G. A. Geist, J. A. Kohl, R. J. Manchek, P. Mucci,
12
* P. M. Papadopoulos, S. L. Scott, and V. S. Sunderam
13
* (C) 1997 All Rights Reserved
17
* Permission to use, copy, modify, and distribute this software and
18
* its documentation for any purpose and without fee is hereby granted
19
* provided that the above copyright notice appear in all copies and
20
* that both the copyright notice and this permission notice appear in
21
* supporting documentation.
23
* Neither the Institutions (Emory University, Oak Ridge National
24
* Laboratory, and University of Tennessee) nor the Authors make any
25
* representations about the suitability of this software for any
26
* purpose. This software is provided ``as is'' without express or
29
* PVM version 3 was funded in part by the U.S. Department of Energy,
30
* the National Science Foundation and the State of Tennessee.
38
* void mpp_init(int argc, char **argv):
39
* Initialization. Create a table to keep track of active nodes.
40
* argc, argv: passed from main.
42
* int mpp_load( struct waitc_spawn *wxp )
44
* Load executable onto nodes; create new entries in task table,
45
* encode node number and process type into task IDs, etc.
47
* Construction of Task ID:
49
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
50
* +-+-+-----------------------+-+-----+--------------------------+
51
* |s|g| host index |n| prt | node # (16384) |
52
* +-+-+-----------------------+-+-----+--------------------------+
54
* The "n" bit is set for node task but clear for host task.
56
* flags: exec options;
57
* name: executable to be loaded;
58
* argv: command line argument for executable
59
* count: number of tasks to be created;
60
* tids: array to store new task IDs;
61
* ptid: parent task ID.
63
* mpp_new(int count, int ptid):
64
* Allocate a set of nodes. (called by mpp_load())
65
* count: number of nodes; ptid: parent task ID.
68
* Send all pending packets to nodes via native send. Node number
69
* and process type are extracted from task ID.
71
* int mpp_mcast(int src, struct pkt pp, int tids[], int ntask):
73
* src: source task ID;
75
* tids: list of destination task IDs;
79
* Probe for pending packets from nodes (non-blocking). Returns
80
* 1 if packets are dectected, otherwise 0.
83
* Receive pending packets (from nodes) via native recv.
85
* struct task *mpp_find(int pid):
86
* Find a task in task table by its Unix pid.
88
* void mpp_free(struct task *tp):
89
* Remove node/process-type from active list.
93
Revision 1.1 2004/01/14 17:34:27 pvmsrc
97
Revision 1.4 2002/02/21 23:19:01 pvmsrc
98
Added new (not to be documented!) PVM_MAX_TASKS env var support.
99
- for Mahir Lokvancic <mahir@math.ufl.edu>.
100
- forcefully limits the number of tasks that can attach to a
101
pvmd, required on Solaris in rare circumstances when hard
102
FD_SETSIZE limit is reached, and all hell breaks loose...
103
- check return for task_new() call, can now produce NULL ptr,
104
indicating PvmOutOfRes...
107
Revision 1.3 2001/09/25 21:19:00 pvmsrc
108
Minor TMPNAMFUN()/tmpnam() cleanup.
109
- moved macro def to pvm3.h, renamed PVMTNPMAN().
110
- same for LEN_OF_TMP_NAM -> PVMTMPNAMLEN.
111
- mostly a huge waste of time, since *both* tmpnam() & mktemp()
112
produce the same "dangerous" warning message in Linux/gcc...
116
Revision 1.2 2000/02/17 21:10:16 pvmsrc
117
Cleaned up comments... mpp_load() args...
120
* Revision 1.1 1998/02/23 21:31:44 pvmsrc
121
* Copied from SP2MPI.
123
* Revision 1.10 1997/10/01 15:36:06 pvmsrc
124
* Removed unnecessary #include "fromlib.h" header.
125
* - all consts now included in pvm3.h...
126
* - header file eliminated.
129
* Revision 1.9 1997/08/27 20:18:58 pvmsrc
130
* Added blank args (0,0,0) in mpp_load() to make it call forkexec correctly.
131
* Protocol needs to be changed to allow taskers to access IBM poe directly.
134
* Revision 1.8 1997/07/09 13:54:58 pvmsrc
135
* Fixed Author Header.
137
* Revision 1.7 1997/06/02 13:48:38 pvmsrc
138
* Moved #include host.h above #include waitc.h.
139
* Removed old mesg.h include... gone baby.
141
* Revision 1.6 1997/05/06 20:14:36 pvmsrc
142
* Catch stdout/stderr correctly and redirect to outtid,tag,ctx
144
* Revision 1.5 1997/05/05 20:08:56 pvmsrc
145
* Pass outctx and trcctx to mpp tasks.
147
* Revision 1.4 1997/05/02 13:52:50 pvmsrc
148
* Start up MPI jobs correctly and get them configured.
150
* Revision 1.3 1997/03/25 15:52:21 pvmsrc
151
* PVM patches from the base 3.3.10 to 3.3.11 versions where applicable.
152
* Originals by Bob Manchek. Altered by Graham Fagg where required.
153
* -IP enabled over switch
154
* -RMPOOL env can be used instead of host list
156
* Revision 1.2 1997/01/28 19:30:57 pvmsrc
157
* New Copyright Notice & Authors.
159
* Revision 1.1 1996/09/23 23:15:09 pvmsrc
162
* Revision 1.3 1996/05/14 14:35:59 manchek
163
* inc'd changes from chulho@kgn.ibm.com
165
* Revision 1.2 1995/07/25 17:41:27 manchek
166
* mpp_output returns int
168
* Revision 1.1 1995/05/30 17:23:56 manchek
171
* Revision 1.3 1994/06/03 20:54:24 manchek
174
* Revision 1.2 1993/12/20 15:39:47 manchek
177
* Revision 1.1 1993/08/30 23:35:09 manchek
183
#include <sys/param.h>
184
#include <sys/types.h>
185
#include <sys/time.h>
186
#include <sys/socket.h>
187
#include <netinet/in.h>
188
#include <netinet/tcp.h>
189
#include <sys/stat.h>
197
#define CINDEX(s,c) strchr(s,c)
200
#define CINDEX(s,c) index(s,c)
204
#include <pvmproto.h>
208
#include "pvmalloc.h"
216
#define MPICOMM "/usr/bin/poe"
217
#define MPIOPT1 "-procs"
218
#define MPIOPT2 "-euilib"
219
#define MPIOPT3 "-hfile"
220
#define MPIOPT4 "-rmpool"
221
#define MPIOPARG2 "us" /* options: -procs # -euilib us */
222
#define MPIOPARG3 "ip" /* Enable IP over switch */
223
#define MPIARGC 7 /* number of command line arguments */
229
extern int pvmdebmask; /* from pvmd.c */
230
extern char **epaths; /* from pvmd.c */
231
extern int myhostpart; /* from pvmd.c */
232
extern int tidhmask; /* from pvmd.c */
233
extern int ourudpmtu; /* from pvmd.c */
234
extern struct htab *hosts; /* from pvmd.c */
235
extern struct task *locltasks; /* from task.c */
237
int tidtmask = TIDPTYPE; /* mask for ptype field of tids */
238
int tidnmask = TIDNODE; /* mask for node field of tids */
242
static int myndf = 0;
243
static struct nodeset *busynodes; /* active nodes; ordered by proc type */
244
static char pvmtxt[512]; /* scratch for error log */
245
static int ptypemask; /* mask; we use these bits of ptype in tids */
246
static char nodefile[PVMTMPNAMLEN]; /* tmp node file */
247
static char **nodelist = 0; /* default poe node list */
248
static int partsize = 0; /* number of nodes allocated */
249
static int hostfileused = TRUE; /* Check if MP_HOSTFILE used */
250
static char defaultpool[64]="1"; /* default MP_POOL if not set */
251
static char mpiadapter[]={MPIOPARG2}; /* default User-Space */
253
static int sp2pvminfo[SIZEHINFO];
263
char *hfn; /* host file name */
264
char nname[128]; /* node name */
268
if ((hfn = getenv("LOADLBATCH"))) {
269
if (strcmp(hfn, "yes") == 0) {
270
if ((hfn = getenv("LOADL_PROCESSOR_LIST"))) {
272
"LOADL_PROCESSOR_LIST=%s.\n",hfn);
273
pvmlogperror(pvmtxt);
275
for (hfn;*hfn!='\0';hfn++) {
281
"LOADL_PROCESSOR_LIST=%s - is not set\n",hfn);
282
pvmlogperror(pvmtxt);
286
sprintf(pvmtxt,"LOADLBATCH=%s - not set to yes\n",hfn);
287
pvmlogperror(pvmtxt);
290
} else if ((hfn = getenv("MP_PROCS"))) {
291
if ((partsize = atoi(hfn)) < 1) {
292
sprintf(pvmtxt,"MP_PROCS=%d must be >= to 1\n",partsize);
293
pvmlogperror(pvmtxt);
297
if ((hfn = getenv("MP_RMPOOL"))) {
300
"MP_RMPOOL=%d must be >= to 0\n",defaultpool);
301
pvmlogperror(pvmtxt);
304
strcpy(defaultpool,hfn);
306
} else if ((hfn = getenv("MP_HOSTFILE")))
308
if (!(hfp = fopen(hfn, "r"))) {
309
sprintf(pvmtxt, "sp2hostfile() fopen %s\n", hfn);
310
pvmlogperror(pvmtxt);
314
while (fscanf(hfp, "%s", nname) != EOF)
317
nodelist = TALLOC(partsize, char*, "nname");
319
for (i = 0; i < partsize; i++) {
320
fscanf(hfp, "%s", nname);
321
nodelist[i] = STRALLOC(nname);
326
pvmlogerror("mpp_init() no POE host file.\n");
327
pvmlogerror("mpp_init() MP_PROCS, MP_RMPOOL or MP_HOSTFILE must be set.\n");
330
if ((hfn = getenv("MP_EUILIB"))) {
331
if (strcmp(hfn, "ip") == 0) {
332
strcpy(mpiadapter,MPIOPARG3); /* IP over switch */
335
sprintf(pvmtxt, "%d nodes allocated.\n", partsize);
338
busynodes = TALLOC(1, struct nodeset, "nsets");
339
BZERO((char*)busynodes, sizeof(struct nodeset));
340
busynodes->n_link = busynodes;
341
busynodes->n_rlink = busynodes;
343
ptypemask = tidtmask >> (ffs(tidtmask) - 1);
347
/* create tmp poe host file from default */
349
sp2hostfile(first, count)
350
int first; /* first node in the set */
351
int count; /* number of nodes requested */
356
if (partsize < count) {
357
sprintf(pvmtxt, "sp2hostfile() need at least %d nodes\n", count+1);
358
pvmlogperror(pvmtxt);
361
(void)PVMTMPNAMFUN(nodefile);
362
if (!(tmpfp = fopen(nodefile, "w"))) {
363
sprintf(pvmtxt, "sp2hostfile() fopen %s", nodefile);
364
pvmlogperror(pvmtxt);
367
if (pvmdebmask & PDMNODE) {
368
sprintf(pvmtxt, "sp2hostfile() POE host file: %s\n", nodefile);
371
for (i = first; i < count + first; i++)
372
fprintf(tmpfp, "%s\n", nodelist[i]);
379
* find a set of free nodes from nodelist; assign ptype sequentially,
380
* only tasks spawned together get the same ptype
384
int count; /* number of nodes requested */
385
int ptid; /* parent's tid */
387
struct nodeset *sp, *newp, *sp2;
391
if (!(newp = TALLOC(1, struct nodeset, "nsets"))) {
392
pvmlogerror("mpp_new() can't get memory\n");
395
BZERO((char*)newp, sizeof(struct nodeset));
397
newp->n_size = count;
398
for (sp = busynodes->n_link; sp != busynodes; sp = sp->n_link) {
399
if (sp->n_first - last > count)
401
last = sp->n_first + sp->n_size - 1;
403
if (sp->n_link == busynodes && partsize - last > count)
406
if (ptype <= sp->n_ptype)
407
ptype = sp->n_ptype + 1;
409
if (sp == busynodes && partsize - last <= count) {
410
pvmlogerror("mpp_new() not enough nodes in partition\n");
412
return (struct nodeset *)0;
414
for (sp2 = busynodes->n_link; sp2 != busynodes; sp2 = sp2->n_link)
415
if ((sp2->n_ptype & ptypemask) == (ptype & ptypemask))
417
if (sp2 != busynodes || ptype == NPARTITIONS) {
418
for (ptype = 0; ptype < NPARTITIONS; ptype++) {
419
for (sp2 = busynodes->n_link; sp2 != busynodes; sp2 = sp2->n_link)
420
if ((sp2->n_ptype & ptypemask) == (ptype & ptypemask))
422
if (sp2 == busynodes)
425
if (ptype == NPARTITIONS) {
426
pvmlogerror("mpp_new() out of ptypes: too many spawns\n");
427
return (struct nodeset *)0;
432
if (pvmdebmask & PDMNODE) {
433
sprintf(pvmtxt, "mpp_new() %d nodes %d ... ptype=%d ptid=%x\n",
434
count, last+1, ptype, ptid);
437
newp->n_first = last + 1;
439
if (!sp2hostfile(newp->n_first, count)) {
441
return (struct nodeset *)0;
444
newp->n_ptype = ptype;
446
newp->n_alive = count - 1;
447
LISTPUTBEFORE(sp, newp, n_link, n_rlink);
454
* remove node/ptype from active list; if tid is the last to go, shutdown
455
* pvmhost's socket, but do not destroy the node set because pvmhost may
456
* not exit immediately. To avoid a race condition, let mpp_output()
471
ptype = TIDTOTYPE(tid);
472
tp->t_txq = 0; /* don't free pvmhost's txq */
473
for (sp = busynodes->n_link; sp != busynodes; sp = sp->n_link) {
474
if ((sp->n_ptype & ptypemask) == ptype) {
476
if (pvmdebmask & PDMNODE) {
477
sprintf(pvmtxt, "mpp_free() t%x type=%ld alive=%d\n",
478
tid, sp->n_ptype, sp->n_alive);
481
if (--sp->n_alive == 0) {
482
if (tp2 = task_find(sp->n_ptid)) {
483
tp2->t_flag |= TF_CLOSE;
484
if (tp2->t_sock != -1) {
486
wrk_fds_delete(tp2->t_sock, 3);
487
(void)close(tp2->t_sock);
490
shutdown(tp2->t_sock, 1);
491
/* close stdout after pvmhost dies */
492
tp2->t_out = tp->t_out;
496
LISTDELETE(sp, n_link, n_rlink);
500
tp->t_out = -1; /* don't free shared stdout if alive > 0 */
504
sprintf(pvmtxt, "mpp_free() t%x not active\n", tid);
510
/* load executable onto the given set of nodes */
513
struct waitc_spawn *wxp;
515
int flags = 0; /* exec options */
516
char *name; /* executable */
517
char **argv; /* arg list (argv[-1] must be there) */
518
int count; /* how many */
519
int *tids; /* array to store new tids */
520
int ptid; /* parent task ID */
521
int nenv; /* length of environment */
522
char **envp; /* environment strings */
523
int ptypepart; /* type field */
526
struct pkt *hosttxq; /* out-going queue of pvmhost */
529
char c[128]; /* buffer to store count, name.host */
533
char path[MAXPATHLEN];
536
int hostout; /* stdout of pvmhost */
537
struct hostd *hp = hosts->ht_hosts[hosts->ht_local];
538
int hostpid; /* Unix pid of pvmhost */
539
char htid[128]; /* buffer to store pvmhost tid */
542
static char *nullep[] = { "", 0 };
544
/* -- initialize some variables from the waitc_spawn struct -- */
546
name = wxp->w_argv[0];
548
count = wxp->w_veclen;
556
eplist = CINDEX(name, '/') ? nullep : epaths;
558
for (ep = eplist; *ep; ep++) {
559
/* search for file */
560
(void)strcpy(path, *ep);
562
(void)strcat(path, "/");
563
(void)strncat(path, name, sizeof(path) - strlen(path) - 1);
565
if (stat(path, &sb) == -1
566
|| ((sb.st_mode & S_IFMT) != S_IFREG)
567
|| !(sb.st_mode & S_IEXEC)) {
568
if (pvmdebmask & PDMTASK) {
569
sprintf(pvmtxt, "mpp_load() stat failed <%s>\n", path);
575
if (!(sp = mpp_new(count+1, ptid))) {
579
ptypepart = (sp->n_ptype << (ffs(tidtmask) - 1)) | TIDONNODE;
582
for (nargs = 0; argv[nargs]; nargs++);
585
/* ar[-1], poe, -procs, #, -euilib, us, -hfile fname */
586
nargs += MPIARGC + 1;
587
av = TALLOC(nargs + 1, char*, "argv");
588
av++; /* reserve room for debugger */
589
BZERO((char*)av, nargs * sizeof(char*));
594
av[--nargs] = nodefile;
595
av[--nargs] = MPIOPT3;
597
av[--nargs] = defaultpool;
598
av[--nargs] = MPIOPT4;
600
av[--nargs] = mpiadapter;
601
av[--nargs] = MPIOPT2;
602
sprintf(c, "%d", count+1);
604
av[--nargs] = MPIOPT1;
605
for (j = 2; j < nargs; j++)
606
av[j] = argv[j - 1]; /* poe name argv -procs # -euilib us */
608
if ((sock = mksock()) == -1) {
613
if (flags & PvmTaskDebug)
614
av++; /* pdbx name -procs # -euilib us */
615
/* if (err = forkexec(flags, av[0], av, 0, (char **)0, &tp))
617
if (err = forkexec(flags, av[0], av, 0, (char **)0, 0,
621
PVM_FREE(tp->t_a_out);
622
sprintf(c, "%s.host", name);
623
tp->t_a_out = STRALLOC(c);
624
sp->n_ptid = tp->t_tid; /* pvmhost's tid */
629
sprintf(htid, "PVMHTID=%d", tp->t_tid);
632
sp2pvminfo[0] = TDPROTOCOL;
633
sp2pvminfo[1] = myhostpart + ptypepart;
634
sp2pvminfo[2] = ptid;
635
sp2pvminfo[3] = MAXFRAGSIZE;
636
sp2pvminfo[4] = myndf;
637
sp2pvminfo[5] = partsize;
638
sp2pvminfo[6] = wxp->w_outtid;
639
sp2pvminfo[7] = wxp->w_outtag;
640
sp2pvminfo[8] = wxp->w_outctx;
641
sp2pvminfo[9] = wxp->w_trctid;
642
sp2pvminfo[10] = wxp->w_trctag;
643
sp2pvminfo[11] = wxp->w_trcctx;
647
if (sockconn(sock, tp, pvminfo) == -1) {
653
/* XXX task may not be on same host; can't do auth with tmp file */
654
tp->t_flag |= TF_CONN;
655
if (pvmdebmask & PDMTASK) {
656
sprintf(pvmtxt, "mpp_load() %d type=%d ptid=%x t%x...\n",
657
count, sp->n_ptype, ptid, myhostpart + ptypepart);
661
/* create new task structs */
663
for (j = 0; j < count; j++) {
664
if ((tp = task_new(myhostpart + ptypepart + j)) == NULL) {
668
tp->t_a_out = STRALLOC(name);
670
tp->t_flag |= TF_CONN; /* no need for the auth crap */
673
tp->t_txq = hosttxq; /* node tasks share pvmhost's txq */
674
tp->t_out = hostout; /* and stdout */
675
tp->t_pid = hostpid; /* pvm_kill should kill pvmhost */
676
tp->t_outtid = wxp->w_outtid; /* catch stdout/stderr */
677
tp->t_outtag = wxp->w_outtag;
678
tp->t_outctx = wxp->w_outctx;
682
if (pvmdebmask & PDMTASK) {
683
sprintf(pvmtxt, "mpp_load() didn't find <%s>\n", name);
689
for (j = 0; j < count; j++)
697
/* kill poe process */
705
char nname[128]; /* node name */
706
char comm[512]; /* command to issue */
707
char *hfn; /* host file name */
709
char *av[8]; /* for rsh args */
711
int pid = -1; /* pid of rsh */
714
if ((hfn = getenv("MP_HOSTFILE")) || stat(hfn = "host.list", &sb) != -1) {
715
if (fp = fopen(hfn, "r")) {
716
for (i = 0; i < node; i++)
717
fscanf(fp, "%s", nname);
719
if ((pid = fork()) == -1) {
720
pvmlogperror("sp2kill() fork");
725
av[ac++] = "/usr/bin/rsh";
727
av[ac++] = "poekill";
730
for (i = getdtablesize(); --i > 2; )
736
pvmlogperror("sp2kill() fopen");
739
pvmlogerror("sp2kill() no host file");
751
if (TIDISNODE(tp->t_tid)) {
752
if (signum == SIGTERM || signum == SIGKILL) {
753
/* sp2kill(tp->t_a_out, tp->t_tid & tidnmask); */
754
(void)kill(tp->t_pid, signum);
759
sprintf(pvmtxt,"mpp_kill() signal %d to node t%x ignored\n",
764
(void)kill(tp->t_pid, signum);
769
* Add pvmhost's socket to wfds if there are packets waiting to
770
* be sent to a related node task. Node tasks have no sockets;
771
* they share pvmhost's packet queue (txq). Pvmhost simply
772
* forwards any packets it receives to the appropriate node.
776
mpp_output(dummy1, dummy2)
780
struct nodeset *sp, *sp2;
784
for (sp = busynodes->n_link; sp != busynodes; sp = sp->n_link)
785
if ((tp = task_find(sp->n_ptid))) {
786
if (tp->t_txq->pk_link->pk_buf && tp->t_sock != -1)
787
wrk_fds_add(tp->t_sock, 2);
790
sprintf(pvmtxt, "mpp_output() pvmhost %d died!\n", sp->n_ptype);
792
/* clean up tasks it serves */
793
ptype = sp->n_ptype & ptypemask;
794
for (tp = locltasks->t_link; tp != locltasks; tp = tp->t_link)
795
if (TIDISNODE(tp->t_tid) && TIDTOTYPE(tp->t_tid) == ptype) {
798
task_cleanup(tp->t_link);
799
task_free(tp->t_link);
802
/* pvmhost has died, destroy the node set */
805
LISTDELETE(sp2, n_link, n_rlink);
812
/* replace tm_connect and tm_conn2 */
819
int pvminfo[SIZEHINFO]; /* host info */
820
int ptypepart; /* type field */
822
if (pvmdebmask & PDMNODE) {
823
sprintf(pvmtxt, "mpp_conn() pvmhost %x", tp2->t_tid);
826
tp2->t_sock = tp->t_sock;
827
tp2->t_sad = tp->t_sad;
828
tp2->t_salen = tp->t_salen;
829
tp2->t_flag |= TF_CONN;
831
for (sp = busynodes->n_link; sp != busynodes; sp = sp->n_link)
832
if (sp->n_ptid == tp2->t_tid)
834
if (sp == busynodes) {
835
pvmlogerror("mpp_conn() task is not pvmhost\n");
838
ptypepart = (sp->n_ptype << (ffs(tidtmask) - 1)) | TIDONNODE;
839
if (write(tp2->t_sock, sp2pvminfo, sizeof(sp2pvminfo))
840
!= sizeof(sp2pvminfo)) {
841
pvmlogperror("mpp_conn() write");
851
* Create socket to talk to pvmhost.
852
* Return socket descriptor if successful, -1 otherwise.
857
struct hostd *hp = hosts->ht_hosts[hosts->ht_local];
858
struct sockaddr_in sin;
864
if ((sock = socket(AF_INET, SOCK_STREAM, 0)) == -1) {
865
pvmlogperror("mksock() socket");
869
if (bind(sock, (struct sockaddr*)&sin, sizeof(sin)) == -1) {
870
pvmlogperror("mksock() bind");
875
if (getsockname(sock, (struct sockaddr*)&sin, &cc) == -1) {
876
pvmlogperror("mksock() getsockname");
880
if (listen(sock, 1) == -1) {
881
pvmlogperror("mksock() listen");
886
p = inadport_hex(&sin);
887
sprintf(buf, "PVMSOCK=%s", p);
896
* Wait for connect request from pvmhost and establish connection.
897
* Return 0 if successful, -1 otherwise.
898
* Close listening socket.
901
sockconn(sock, tp, hinfo)
902
int sock; /* listening post */
903
struct task *tp; /* pvm host */
904
int hinfo[]; /* host info to pass along */
908
if ((tp->t_sock = accept(sock, (struct sockaddr*)&tp->t_sad,
909
&tp->t_salen)) == -1) {
910
pvmlogperror("sockconn() accept");
913
if (pvmdebmask & (PDMPACKET|PDMTASK)) {
914
sprintf(pvmtxt, "sockconn() accept from %s sock %d\n",
915
inadport_decimal(&tp->t_sad), tp->t_sock);
921
if (setsockopt(tp->t_sock, IPPROTO_TCP, TCP_NODELAY,
922
(char*)&i, sizeof(int)) == -1)
923
pvmlogperror("sockconn() setsockopt");
926
if (write(tp->t_sock, hinfo, SIZEHINFO*sizeof(int))
927
!= SIZEHINFO*sizeof(int)) {
928
pvmlogperror("sockconn: write");
931
if ((i = fcntl(tp->t_sock, F_GETFL, 0)) == -1)
932
pvmlogperror("sockconn: fcntl");
935
(void)fcntl(tp->t_sock, F_SETFL, i);
937
wrk_fds_add(tp->t_sock, 1);