events 16 ext/-test-/wait_for_single_fd/wait_for_single_fd.c rc = rb_wait_for_single_fd(NUM2INT(fd), NUM2INT(events), tvp); events 64 ext/io/wait/wait.c int i = rb_wait_for_single_fd(fptr->fd, events, tv); events 68 ext/io/wait/wait.c return (i & events); events 360 ext/psych/yaml/api.c if (!QUEUE_INIT(emitter, emitter->events, INITIAL_QUEUE_SIZE)) events 374 ext/psych/yaml/api.c QUEUE_DEL(emitter, emitter->events); events 393 ext/psych/yaml/api.c while (!QUEUE_EMPTY(emitter, emitter->events)) { events 394 ext/psych/yaml/api.c yaml_event_delete(&DEQUEUE(emitter, emitter->events)); events 396 ext/psych/yaml/api.c QUEUE_DEL(emitter, emitter->events); events 283 ext/psych/yaml/emitter.c if (!ENQUEUE(emitter, emitter->events, *event)) { events 289 ext/psych/yaml/emitter.c if (!yaml_emitter_analyze_event(emitter, emitter->events.head)) events 291 ext/psych/yaml/emitter.c if (!yaml_emitter_state_machine(emitter, emitter->events.head)) events 293 ext/psych/yaml/emitter.c yaml_event_delete(&DEQUEUE(emitter, emitter->events)); events 315 ext/psych/yaml/emitter.c if (QUEUE_EMPTY(emitter, emitter->events)) events 318 ext/psych/yaml/emitter.c switch (emitter->events.head->type) { events 332 ext/psych/yaml/emitter.c if (emitter->events.tail - emitter->events.head > accumulate) events 335 ext/psych/yaml/emitter.c for (event = emitter->events.head; event != emitter->events.tail; event ++) { events 1102 ext/psych/yaml/emitter.c if (emitter->events.tail - emitter->events.head < 2) events 1105 ext/psych/yaml/emitter.c return (emitter->events.head[0].type == YAML_SEQUENCE_START_EVENT events 1106 ext/psych/yaml/emitter.c && emitter->events.head[1].type == YAML_SEQUENCE_END_EVENT); events 1116 ext/psych/yaml/emitter.c if (emitter->events.tail - emitter->events.head < 2) events 1119 ext/psych/yaml/emitter.c return (emitter->events.head[0].type == YAML_MAPPING_START_EVENT events 1120 ext/psych/yaml/emitter.c && emitter->events.head[1].type == YAML_MAPPING_END_EVENT); events 1130 ext/psych/yaml/emitter.c yaml_event_t *event = emitter->events.head; events 1638 ext/psych/yaml/yaml.h } events; events 1025 ext/win32ole/win32ole_event.c VALUE events = rb_ivar_get(obj, id_events); events 1026 ext/win32ole/win32ole_event.c if (NIL_P(events) || !RB_TYPE_P(events, T_ARRAY)) { events 1027 ext/win32ole/win32ole_event.c events = rb_ary_new(); events 1028 ext/win32ole/win32ole_event.c rb_ivar_set(obj, id_events, events); events 1030 ext/win32ole/win32ole_event.c ole_delete_event(events, event); events 1031 ext/win32ole/win32ole_event.c rb_ary_push(events, data); events 1131 ext/win32ole/win32ole_event.c VALUE events; events 1142 ext/win32ole/win32ole_event.c events = rb_ivar_get(self, id_events); events 1143 ext/win32ole/win32ole_event.c if (NIL_P(events)) { events 1146 ext/win32ole/win32ole_event.c ole_delete_event(events, event); events 55 include/ruby/debug.h void rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data); events 59 include/ruby/debug.h void rb_thread_add_event_hook(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data); events 65 include/ruby/debug.h VALUE rb_tracepoint_new(VALUE target_thread_not_supported_yet, rb_event_flag_t events, void (*func)(VALUE, void *), void *data); events 98 include/ruby/debug.h void rb_add_event_hook2(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flag); events 99 include/ruby/debug.h void rb_thread_add_event_hook2(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flag); events 33 include/ruby/io.h # define reqevents events events 148 include/ruby/io.h int rb_wait_for_single_fd(int fd, int events, struct timeval *tv); events 2107 include/ruby/ruby.h void rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data); events 736 include/ruby/win32.h int rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout); events 10099 io.c fds.events = events; events 2231 iseq.c rb_event_flag_t events = current_events & RUBY_EVENT_SPECIFIED_LINE; events 2237 iseq.c cont = (*func)(line, &events, data); events 2238 iseq.c if (current_events != events) { events 2241 iseq.c (VALUE)(current_events | (events & RUBY_EVENT_SPECIFIED_LINE)); events 3713 thread.c int events = read ? RB_WAITFD_IN : RB_WAITFD_OUT; events 3721 thread.c result = rb_wait_for_single_fd(fd, events, NULL); events 3851 thread.c fds.events = (short)events; events 3947 thread.c args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL; events 3948 thread.c args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL; events 3949 thread.c args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL; events 1445 thread_pthread.c pollfds[0].events = POLLIN; events 1447 thread_pthread.c pollfds[1].events = POLLIN; events 180 thread_win32.c HANDLE *targets = events; events 185 thread_win32.c events, count, timeout, th); events 195 thread_win32.c memcpy(targets, events, sizeof(HANDLE) * count); events 227 thread_win32.c return w32_wait_events(events, num, timeout, ruby_thread_from_native()); events 235 thread_win32.c BLOCKING_REGION(ret = rb_w32_wait_events_blocking(events, num, timeout), events 479 vm_core.h rb_event_flag_t events; events 1197 vm_core.h if ((th->event_hooks.events | th->vm->event_hooks.events) & flag) { events 34 vm_trace.c rb_event_flag_t events; events 68 vm_trace.c if (events & ((rb_event_flag_t)1 << i)) { events 84 vm_trace.c if (events & ((rb_event_flag_t)1 << i)) { events 108 vm_trace.c if ((events & RUBY_INTERNAL_EVENT_MASK) && (events & ~RUBY_INTERNAL_EVENT_MASK)) { events 114 vm_trace.c hook->events = events; events 125 vm_trace.c recalc_add_ruby_vm_event_flags(hook->events); events 126 vm_trace.c list->events |= hook->events; events 132 vm_trace.c rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags); events 139 vm_trace.c rb_threadptr_add_event_hook(thval2thread_t(thval), func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE); events 145 vm_trace.c rb_event_hook_t *hook = alloc_event_hook(func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE); events 152 vm_trace.c rb_threadptr_add_event_hook(thval2thread_t(thval), func, events, data, hook_flags); events 158 vm_trace.c rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags); events 232 vm_trace.c list->events = 0; events 238 vm_trace.c recalc_remove_ruby_vm_event_flags(hook->events); events 242 vm_trace.c list->events |= hook->events; /* update active events */ events 254 vm_trace.c if (!(hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) && (trace_arg->event & hook->events)) { events 274 vm_trace.c return (list->events & trace_arg->event) != 0; events 646 vm_trace.c rb_event_flag_t events; events 1017 vm_trace.c rb_thread_add_event_hook2(tp->target_th->self, (rb_event_hook_func_t)tp_call_trace, tp->events, tpval, events 1021 vm_trace.c rb_add_event_hook2((rb_event_hook_func_t)tp_call_trace, tp->events, tpval, events 1173 vm_trace.c tp->events = events; events 1219 vm_trace.c return tracepoint_new(rb_cTracePoint, target_th, events, func, data, Qundef); events 1272 vm_trace.c rb_event_flag_t events = 0; events 1277 vm_trace.c events |= symbol2event_flag(argv[i]); events 1281 vm_trace.c events = RUBY_EVENT_TRACEPOINT_ALL; events 1288 vm_trace.c return tracepoint_new(self, 0, events, 0, 0, rb_block_proc()); events 4390 win32/win32.c HANDLE events[MAXCHILDNUM]; events 4396 win32/win32.c events[count++] = child->hProcess; events 4403 win32/win32.c ret = rb_w32_wait_events_blocking(events, count, timeout); events 4413 win32/win32.c cause = FindChildSlotByHandle(events[ret]);