Revision 1.2 |
Revision 1.4 |
Line 2640 |
Line 2640 |
/* ==== pth_sched.c ==== */  
|
/* ==== pth_sched.c ==== */  
|
|
#ifdef USE_EPOLL
#include <assert.h>
#include <sys/epoll.h>
#endif
|
intern pth_t pth_main; /* the main thread */ intern pth_t pth_sched; /* the permanent scheduler thread */ intern pth_t pth_current; /* the currently running thread */
|
intern pth_t pth_main; /* the main thread */ intern pth_t pth_sched; /* the permanent scheduler thread */ intern pth_t pth_current; /* the currently running thread */
|
Line 2971 |
Line 2976 |
*/ if ( pth_pqueue_elements(&pth_RQ) == 0 && pth_pqueue_elements(&pth_NQ) == 0)
|
*/ if ( pth_pqueue_elements(&pth_RQ) == 0 && pth_pqueue_elements(&pth_NQ) == 0)
|
|
#ifdef USE_EPOLL
pth_sched_eventmanager_epoll(&snapshot, FALSE /* wait */);
#else
|
pth_sched_eventmanager(&snapshot, FALSE /* wait */);
|
pth_sched_eventmanager(&snapshot, FALSE /* wait */);
|
|
#endif
|
else
|
else
|
|
#ifdef USE_EPOLL
pth_sched_eventmanager_epoll(&snapshot, TRUE /* poll */);
#else
|
pth_sched_eventmanager(&snapshot, TRUE /* poll */);
|
pth_sched_eventmanager(&snapshot, TRUE /* poll */);
|
|
#endif
|
} /* NOTREACHED */
|
} /* NOTREACHED */
|
Line 3009 |
Line 3022 |
int rc; int sig; int n;
|
int rc; int sig; int n;
|
|
#ifdef USE_EPOLL
int i;
struct epoll_event epev, epevents[10];
long int eptimeout;
int epret;
#endif
|
pth_debug2("pth_sched_eventmanager: enter in %s mode", dopoll ? "polling" : "waiting");
|
pth_debug2("pth_sched_eventmanager: enter in %s mode", dopoll ? "polling" : "waiting");
|
Line 3018 |
Line 3037 |
loop_repeat = FALSE; /* initialize fd sets */
|
loop_repeat = FALSE; /* initialize fd sets */
|
|
#ifdef USE_EPOLL
/* TODO: Figure out what numpipes is */
if ((epfd = epoll_create(100)) == -1) {
perror("epoll_create");
exit(1);
}
#else
|
FD_ZERO(&rfds); FD_ZERO(&wfds); FD_ZERO(&efds); fdmax = -1;
|
FD_ZERO(&rfds); FD_ZERO(&wfds); FD_ZERO(&efds); fdmax = -1;
|
|
#endif
|
/* initialize signal status */ sigpending(&pth_sigpending);
|
/* initialize signal status */ sigpending(&pth_sigpending);
|
Line 3059 |
Line 3086 |
/* Filedescriptor I/O */ if (ev->ev_type == PTH_EVENT_FD) {
|
/* Filedescriptor I/O */ if (ev->ev_type == PTH_EVENT_FD) {
|
|
#ifdef USE_EPOLL
/* filedescriptors are checked later all at once.
Here we only assemble them in the fd sets */
if (ev->ev_goal & PTH_UNTIL_FD_READABLE) {
epev.events = EPOLLIN;
epev.data.fd = ev->ev_args.FD.fd;
epev.data.ptr = ev;
epret = epoll_ctl(epfd, EPOLL_CTL_ADD, ev->ev_args.FD.fd, &epev);
assert(epret == 0);
}
if (ev->ev_goal & PTH_UNTIL_FD_WRITEABLE) {
epev.events = EPOLLOUT;
epev.data.fd = ev->ev_args.FD.fd;
epev.data.ptr = ev;
epret = epoll_ctl(epfd, EPOLL_CTL_ADD, ev->ev_args.FD.fd, &epev);
assert(epret == 0);
}
if (ev->ev_goal & PTH_UNTIL_FD_EXCEPTION) {
epev.events = EPOLLERR | EPOLLHUP;
epev.data.fd = ev->ev_args.FD.fd;
epev.data.ptr = ev;
epret = epoll_ctl(epfd, EPOLL_CTL_ADD, ev->ev_args.FD.fd, &epev);
assert(epret == 0);
}
#else
|
/* filedescriptors are checked later all at once. Here we only assemble them in the fd sets */ if (ev->ev_goal & PTH_UNTIL_FD_READABLE)
|
/* filedescriptors are checked later all at once. Here we only assemble them in the fd sets */ if (ev->ev_goal & PTH_UNTIL_FD_READABLE)
|
Line 3069 |
Line 3121 |
FD_SET(ev->ev_args.FD.fd, &efds); if (fdmax < ev->ev_args.FD.fd) fdmax = ev->ev_args.FD.fd;
|
FD_SET(ev->ev_args.FD.fd, &efds); if (fdmax < ev->ev_args.FD.fd) fdmax = ev->ev_args.FD.fd;
|
|
#endif
|
} /* Filedescriptor Set Select I/O */ else if (ev->ev_type == PTH_EVENT_SELECT) {
|
} /* Filedescriptor Set Select I/O */ else if (ev->ev_type == PTH_EVENT_SELECT) {
|
Line 3185 |
Line 3238 |
if (dopoll) { /* do a polling with immediate timeout, i.e. check the fd sets only without blocking */
|
if (dopoll) { /* do a polling with immediate timeout, i.e. check the fd sets only without blocking */
|
|
#ifdef USE_EPOLL
eptimeout = 0;
#else
|
pth_time_set(&delay, PTH_TIME_ZERO); pdelay = &delay;
|
pth_time_set(&delay, PTH_TIME_ZERO); pdelay = &delay;
|
|
#endif
|
} else if (nexttimer_ev != NULL) { /* do a polling with a timeout set to the next timer, i.e. wait for the fd sets or the next timer */ pth_time_set(&delay, &nexttimer_value); pth_time_sub(&delay, now);
|
} else if (nexttimer_ev != NULL) { /* do a polling with a timeout set to the next timer, i.e. wait for the fd sets or the next timer */ pth_time_set(&delay, &nexttimer_value); pth_time_sub(&delay, now);
|
|
#ifdef USE_EPOLL
eptimeout = delay.tv_sec * 1000 + delay.tv_usec / 1000;
#else
|
pdelay = &delay;
|
pdelay = &delay;
|
|
#endif
|
} else { /* do a polling without a timeout, i.e. wait for the fd sets only with blocking */
|
} else { /* do a polling without a timeout, i.e. wait for the fd sets only with blocking */
|
|
#ifdef USE_EPOLL
eptimeout = -1;
#else
|
pdelay = NULL;
|
pdelay = NULL;
|
|
#endif
|
} /* clear pipe and let select() wait for the read-part of the pipe */ while (pth_sc(read)(pth_sigpipe[0], minibuf, sizeof(minibuf)) > 0) ;
|
} /* clear pipe and let select() wait for the read-part of the pipe */ while (pth_sc(read)(pth_sigpipe[0], minibuf, sizeof(minibuf)) > 0) ;
|
|
#ifdef USE_EPOLL
epev.events = EPOLLIN;
epev.data.fd = pth_sigpipe[0];
epret = epoll_ctl(epfd, EPOLL_CTL_ADD, pth_sigpipe[0], &epev);
assert(epret == 0);
#else
|
FD_SET(pth_sigpipe[0], &rfds); if (fdmax < pth_sigpipe[0]) fdmax = pth_sigpipe[0];
|
FD_SET(pth_sigpipe[0], &rfds); if (fdmax < pth_sigpipe[0]) fdmax = pth_sigpipe[0];
|
|
#endif
|
/* replace signal actions for signals we've to catch for events */ for (sig = 1; sig < PTH_NSIG; sig++) {
|
/* replace signal actions for signals we've to catch for events */ for (sig = 1; sig < PTH_NSIG; sig++) {
|
Line 3226 |
Line 3298 |
WHEN THE SCHEDULER SLEEPS AT ALL, THEN HERE!! */ rc = -1; if (!(dopoll && fdmax == -1))
|
WHEN THE SCHEDULER SLEEPS AT ALL, THEN HERE!! */ rc = -1; if (!(dopoll && fdmax == -1))
|
|
#ifdef USE_EPOLL
rc = epoll_wait(epfd, epevents, 10 /* TODO */, eptimeout);
if (rc < 0)
perror("epoll_wait");
#else
|
while ((rc = pth_sc(select)(fdmax+1, &rfds, &wfds, &efds, pdelay)) < 0 && errno == EINTR) ;
|
while ((rc = pth_sc(select)(fdmax+1, &rfds, &wfds, &efds, pdelay)) < 0 && errno == EINTR) ;
|
|
#endif
|
/* restore signal mask and actions and handle signals */ pth_sc(sigprocmask)(SIG_SETMASK, &oss, NULL);
|
/* restore signal mask and actions and handle signals */ pth_sc(sigprocmask)(SIG_SETMASK, &oss, NULL);
|
Line 3251 |
Line 3329 |
} /* if the internal signal pipe was used, adjust the select() results */
|
} /* if the internal signal pipe was used, adjust the select() results */
|
|
#ifdef USE_EPOLL
epret = epoll_ctl(epfd, EPOLL_CTL_DEL, pth_sigpipe[0], NULL);
if (epret != 0)
perror("epoll_ctl, EPOLL_CTL_DEL");
assert(epret == 0);
#else
|
if (!dopoll && rc > 0 && FD_ISSET(pth_sigpipe[0], &rfds)) { FD_CLR(pth_sigpipe[0], &rfds); rc--;
|
if (!dopoll && rc > 0 && FD_ISSET(pth_sigpipe[0], &rfds)) { FD_CLR(pth_sigpipe[0], &rfds); rc--;
|
Line 3262 |
Line 3346 |
FD_ZERO(&wfds); FD_ZERO(&efds); }
|
FD_ZERO(&wfds); FD_ZERO(&efds); }
|
|
#endif
|
/* now comes the final cleanup loop where we've to do two jobs: first we've to do the late handling of the fd I/O events and additionally if a thread has one occurred event, we move it from the waiting queue to the ready queue */
|
/* now comes the final cleanup loop where we've to do two jobs: first we've to do the late handling of the fd I/O events and additionally if a thread has one occurred event, we move it from the waiting queue to the ready queue */
|
|
#ifdef USE_EPOLL
/* Mark all FD events as occurred if epoll says they happened */
for (i = 0; i < rc; ++i) {
pth_debug2("pth_sched_eventmanager: "
"[I/O] event occurred for thread \"%s\"", t->name);
((struct pth_event_st*)epevents[i].data.ptr)->ev_status =
PTH_STATUS_OCCURRED;
}
#endif
|
/* for all threads in the waiting queue... */ t = pth_pqueue_head(&pth_WQ);
|
/* for all threads in the waiting queue... */ t = pth_pqueue_head(&pth_WQ);
|
Line 3284 |
Line 3379 |
if (ev->ev_status == PTH_STATUS_PENDING) { /* Filedescriptor I/O */ if (ev->ev_type == PTH_EVENT_FD) {
|
if (ev->ev_status == PTH_STATUS_PENDING) { /* Filedescriptor I/O */ if (ev->ev_type == PTH_EVENT_FD) {
|
|
#ifndef USE_EPOLL
|
if ( ( ev->ev_goal & PTH_UNTIL_FD_READABLE && FD_ISSET(ev->ev_args.FD.fd, &rfds)) || ( ev->ev_goal & PTH_UNTIL_FD_WRITEABLE
|
if ( ( ev->ev_goal & PTH_UNTIL_FD_READABLE && FD_ISSET(ev->ev_args.FD.fd, &rfds)) || ( ev->ev_goal & PTH_UNTIL_FD_WRITEABLE
|
Line 3321 |
Line 3417 |
"[I/O] event failed for thread \"%s\"", t->name); } }
|
"[I/O] event failed for thread \"%s\"", t->name); } }
|
|
#endif /* ndef USE_EPOLL */
|
} /* Filedescriptor Set I/O */ else if (ev->ev_type == PTH_EVENT_SELECT) {
|
} /* Filedescriptor Set I/O */ else if (ev->ev_type == PTH_EVENT_SELECT) {
|
Line 3433 |
Line 3530 |
} }
|
} }
|
|
#ifdef USE_EPOLL
close(epfd);
#endif
|
/* perhaps we have to internally loop... */ if (loop_repeat) { pth_time_set(now, PTH_TIME_NOW);
|
/* perhaps we have to internally loop... */ if (loop_repeat) { pth_time_set(now, PTH_TIME_NOW);
|
Line 3442 |
Line 3543 |
pth_debug1("pth_sched_eventmanager: leaving"); return; }
|
pth_debug1("pth_sched_eventmanager: leaving"); return; }
|
|
/*
* Another implementation of the event manager, this time built on top of
* Linux's very fast Epoll mechanism. This function is incomplete at the
* moment (i.e., it only handles file descriptor events and timeouts), but
* it should be possible to add the other event types back in when I have
* some free time.
*/
#ifdef USE_EPOLL
intern void pth_sched_eventmanager_epoll(pth_time_t *now, int dopoll)
{
pth_t nexttimer_thread;
pth_event_t nexttimer_ev;
pth_time_t nexttimer_value;
pth_event_t evh;
pth_event_t ev;
pth_t t;
pth_t tlast;
int this_occurred;
int any_occurred;
struct timeval delay;
int loop_repeat;
int rc;
int n;
int i;
struct epoll_event epev, epevents[10];
long int eptimeout;
int epret;
pth_debug2("pth_sched_eventmanager_epoll: enter in %s mode",
dopoll ? "polling" : "waiting");
/* entry point for internal looping in event handling */
loop_entry:
loop_repeat = FALSE;
/* initialize next timer */
pth_time_set(&nexttimer_value, PTH_TIME_ZERO);
nexttimer_thread = NULL;
nexttimer_ev = NULL;
/* for all threads in the waiting queue... */
any_occurred = FALSE;
for (t = pth_pqueue_head(&pth_WQ); t != NULL;
t = pth_pqueue_walk(&pth_WQ, t, PTH_WALK_NEXT)) {
/* cancellation support */
if (t->cancelreq == TRUE)
any_occurred = TRUE;
/* ... and all their events... */
if (t->events == NULL)
continue;
/* ...check whether events occurred */
ev = evh = t->events;
do {
if (ev->ev_status == PTH_STATUS_PENDING) {
this_occurred = FALSE;
/*
* Filedescriptor I/O -- descriptors already added to interest
* set in pth_high.c so we don't have to do anything here
* anymore.
*/
if (ev->ev_type == PTH_EVENT_FD) {
/* noop */
}
/* Timer */
else if (ev->ev_type == PTH_EVENT_TIME) {
if (pth_time_cmp(&(ev->ev_args.TIME.tv), now) < 0)
this_occurred = TRUE;
else {
/* remember the timer which will be elapsed next */
if ((nexttimer_thread == NULL && nexttimer_ev == NULL) ||
pth_time_cmp(&(ev->ev_args.TIME.tv), &nexttimer_value) < 0) {
nexttimer_thread = t;
nexttimer_ev = ev;
pth_time_set(&nexttimer_value, &(ev->ev_args.TIME.tv));
}
}
}
/* Message Port Arrivals */
else if (ev->ev_type == PTH_EVENT_MSG) {
if (pth_ring_elements(&(ev->ev_args.MSG.mp->mp_queue)) > 0)
this_occurred = TRUE;
}
/* Mutex Release */
else if (ev->ev_type == PTH_EVENT_MUTEX) {
if (!(ev->ev_args.MUTEX.mutex->mx_state & PTH_MUTEX_LOCKED))
this_occurred = TRUE;
}
/* Condition Variable Signal */
else if (ev->ev_type == PTH_EVENT_COND) {
if (ev->ev_args.COND.cond->cn_state & PTH_COND_SIGNALED) {
if (ev->ev_args.COND.cond->cn_state & PTH_COND_BROADCAST)
this_occurred = TRUE;
else {
if (!(ev->ev_args.COND.cond->cn_state & PTH_COND_HANDLED)) {
ev->ev_args.COND.cond->cn_state |= PTH_COND_HANDLED;
this_occurred = TRUE;
}
}
}
}
/* Thread Termination */
else if (ev->ev_type == PTH_EVENT_TID) {
if ( ( ev->ev_args.TID.tid == NULL
&& pth_pqueue_elements(&pth_DQ) > 0)
|| ( ev->ev_args.TID.tid != NULL
&& ev->ev_args.TID.tid->state == ev->ev_goal))
this_occurred = TRUE;
}
/* Custom Event Function */
else if (ev->ev_type == PTH_EVENT_FUNC) {
if (ev->ev_args.FUNC.func(ev->ev_args.FUNC.arg))
this_occurred = TRUE;
else {
pth_time_t tv;
pth_time_set(&tv, now);
pth_time_add(&tv, &(ev->ev_args.FUNC.tv));
if ((nexttimer_thread == NULL && nexttimer_ev == NULL) ||
pth_time_cmp(&tv, &nexttimer_value) < 0) {
nexttimer_thread = t;
nexttimer_ev = ev;
pth_time_set(&nexttimer_value, &tv);
}
}
}
/* tag event if it has occurred */
if (this_occurred) {
pth_debug2("pth_sched_eventmanager_epoll: [non-I/O] event occurred for thread \"%s\"", t->name);
ev->ev_status = PTH_STATUS_OCCURRED;
any_occurred = TRUE;
}
}
} while ((ev = ev->ev_next) != evh);
}
if (any_occurred)
dopoll = TRUE;
/* now decide how to poll for fd I/O and timers */
if (dopoll) {
/* do a polling with immediate timeout,
i.e. check the fd sets only without blocking */
eptimeout = 0;
}
else if (nexttimer_ev != NULL) {
/* do a polling with a timeout set to the next timer,
i.e. wait for the fd sets or the next timer */
pth_time_set(&delay, &nexttimer_value);
pth_time_sub(&delay, now);
eptimeout = delay.tv_sec * 1000 + delay.tv_usec / 1000;
}
else {
/* do a polling without a timeout,
i.e. wait for the fd sets only with blocking */
eptimeout = -1;
}
/* now do the polling for filedescriptor I/O and timers
WHEN THE SCHEDULER SLEEPS AT ALL, THEN HERE!! */
rc = epoll_wait(epfd, epevents, 10 /* TODO */, eptimeout);
if (rc < 0)
perror("epoll_wait");
/* if the timer elapsed, handle it */
if (!dopoll && rc == 0 && nexttimer_ev != NULL) {
if (nexttimer_ev->ev_type == PTH_EVENT_FUNC) {
/* it was an implicit timer event for a function event,
so repeat the event handling for rechecking the function */
loop_repeat = TRUE;
}
else {
/* it was an explicit timer event, standing for its own */
pth_debug2("pth_sched_eventmanager_epoll: [timeout] event occurred for thread \"%s\"",
nexttimer_thread->name);
nexttimer_ev->ev_status = PTH_STATUS_OCCURRED;
}
}
/* now comes the final cleanup loop where we've to
do two jobs: first we've to do the late handling of the fd I/O events and
additionally if a thread has one occurred event, we move it from the
waiting queue to the ready queue */
/* Mark all FD events as occurred if epoll says they happened */
for (i = 0; i < rc; ++i) {
pth_debug2("pth_sched_eventmanager_epoll: "
"[I/O] event occurred for thread \"%s\"", t->name);
((struct pth_event_st*)epevents[i].data.ptr)->ev_status =
PTH_STATUS_OCCURRED;
}
/* for all threads in the waiting queue... */
t = pth_pqueue_head(&pth_WQ);
while (t != NULL) {
/* do the late handling of the fd I/O and signal
events in the waiting event ring */
any_occurred = FALSE;
if (t->events != NULL) {
ev = evh = t->events;
do {
if (ev->ev_status != PTH_STATUS_PENDING) {
/* Condition Variable Signal */
if (ev->ev_type == PTH_EVENT_COND) {
/* clean signal */
if (ev->ev_args.COND.cond->cn_state & PTH_COND_SIGNALED) {
ev->ev_args.COND.cond->cn_state &= ~(PTH_COND_SIGNALED);
ev->ev_args.COND.cond->cn_state &= ~(PTH_COND_BROADCAST);
ev->ev_args.COND.cond->cn_state &= ~(PTH_COND_HANDLED);
}
}
}
/* local to global mapping */
if (ev->ev_status != PTH_STATUS_PENDING)
any_occurred = TRUE;
} while ((ev = ev->ev_next) != evh);
}
/* cancellation support */
if (t->cancelreq == TRUE) {
pth_debug2("pth_sched_eventmanager_epoll: cancellation request pending for thread \"%s\"", t->name);
any_occurred = TRUE;
}
/* walk to next thread in waiting queue */
tlast = t;
t = pth_pqueue_walk(&pth_WQ, t, PTH_WALK_NEXT);
/*
* move last thread to ready queue if any events occurred for it.
* we insert it with a slightly increased queue priority to it a
* better chance to immediately get scheduled, else the last running
* thread might immediately get again the CPU which is usually not
* what we want, because we oven use pth_yield() calls to give others
* a chance.
*/
if (any_occurred) {
pth_pqueue_delete(&pth_WQ, tlast);
tlast->state = PTH_STATE_READY;
pth_pqueue_insert(&pth_RQ, tlast->prio+1, tlast);
pth_debug2("pth_sched_eventmanager: thread \"%s\" moved from waiting "
"to ready queue", tlast->name);
}
}
/* perhaps we have to internally loop... */
if (loop_repeat) {
pth_time_set(now, PTH_TIME_NOW);
goto loop_entry;
}
pth_debug1("pth_sched_eventmanager_epoll: leaving");
return;
}
#endif /* def USE_EPOLL */
|
intern void pth_sched_eventmanager_sighandler(int sig) {
|
intern void pth_sched_eventmanager_sighandler(int sig) {
|
Line 4434 |
Line 4795 |
/* ==== pth_lib.c ==== */
|
/* ==== pth_lib.c ==== */
|
|
int epfd;
|
/* return the hexadecimal Pth library version number */ long pth_version(void) {
|
/* return the hexadecimal Pth library version number */ long pth_version(void) {
|
Line 4477 |
Line 4840 |
/* initialize syscall wrapping */ pth_syscall_init();
|
/* initialize syscall wrapping */ pth_syscall_init();
|
|
#ifdef USE_EPOLL
/* initialize the epoll file descriptor */
if (epfd != -1)
close(epfd);
/* TODO: Figure out what the size hint should actually be here */
if ((epfd = epoll_create(20)) == -1) {
perror("epoll_create");
exit(1);
}
#endif
|
/* initialize the scheduler */ if (!pth_scheduler_init()) { pth_shield { pth_syscall_kill(); }
|
/* initialize the scheduler */ if (!pth_scheduler_init()) { pth_shield { pth_syscall_kill(); }
|
Line 4548 |
Line 4923 |
pth_debug1("pth_kill: enter"); pth_thread_cleanup(pth_main); pth_scheduler_kill();
|
pth_debug1("pth_kill: enter"); pth_thread_cleanup(pth_main); pth_scheduler_kill();
|
|
#ifdef USE_EPOLL
close(epfd);
epfd = -1;
#endif
|
pth_initialized = FALSE; pth_tcb_free(pth_sched); pth_tcb_free(pth_main);
|
pth_initialized = FALSE; pth_tcb_free(pth_sched); pth_tcb_free(pth_main);
|
Line 5740 |
Line 6119 |
if (ev_extra != NULL) pth_event_concat(ev, ev_extra, NULL); }
|
if (ev_extra != NULL) pth_event_concat(ev, ev_extra, NULL); }
|
/* wait until accept has a chance */
|
/*
|
|
* wait until accept has a chance; must put socket back in original
* state before waiting so that other calls to pth_accept() won't
* unexpectedly be non-blocking
*/
pth_fdmode(s, fdmode);
|
pth_wait(ev);
|
pth_wait(ev);
|
|
if (pth_fdmode(s, PTH_FDMODE_NONBLOCK) == PTH_FDMODE_ERROR)
return pth_error(-1, EBADF);
|
/* check for the extra events */ if (ev_extra != NULL) { pth_event_isolate(ev);
|
/* check for the extra events */ if (ev_extra != NULL) { pth_event_isolate(ev);
|
Line 5811 |
Line 6197 |
let thread sleep until it is or the extra event occurs */ if (n == 0) { ev = pth_event(PTH_EVENT_FD|PTH_UNTIL_FD_READABLE|PTH_MODE_STATIC, &ev_key, fd);
|
let thread sleep until it is or the extra event occurs */ if (n == 0) { ev = pth_event(PTH_EVENT_FD|PTH_UNTIL_FD_READABLE|PTH_MODE_STATIC, &ev_key, fd);
|
|
#ifdef USE_EPOLL
/* Add this FD to the epoll interest set if we're using epoll */
{
struct epoll_event epev;
int epret;
epev.events = EPOLLIN | EPOLLERR | EPOLLHUP;
epev.data.fd = fd;
epev.data.ptr = ev;
epret = epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &epev);
assert(epret == 0);
}
#endif
|
if (ev_extra != NULL) pth_event_concat(ev, ev_extra, NULL); n = pth_wait(ev);
|
if (ev_extra != NULL) pth_event_concat(ev, ev_extra, NULL); n = pth_wait(ev);
|
Line 5886 |
Line 6285 |
let thread sleep until it is or event occurs */ if (n < 1) { ev = pth_event(PTH_EVENT_FD|PTH_UNTIL_FD_WRITEABLE|PTH_MODE_STATIC, &ev_key, fd);
|
let thread sleep until it is or event occurs */ if (n < 1) { ev = pth_event(PTH_EVENT_FD|PTH_UNTIL_FD_WRITEABLE|PTH_MODE_STATIC, &ev_key, fd);
|
|
#ifdef USE_EPOLL
/* Add this FD to the epoll interest set if we're using epoll */
{
struct epoll_event epev;
int epret;
epev.events = EPOLLOUT | EPOLLERR | EPOLLHUP;
epev.data.fd = fd;
epev.data.ptr = ev;
epret = epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &epev);
assert(epret == 0);
}
#endif
|
if (ev_extra != NULL) pth_event_concat(ev, ev_extra, NULL); pth_wait(ev);
|
if (ev_extra != NULL) pth_event_concat(ev, ev_extra, NULL); pth_wait(ev);
|
Line 7288 |
Line 7700 |
}
|
}
|