You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4899 lines
120 KiB

11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
11 years ago
11 years ago
10 years ago
11 years ago
  1. /*
  2. * libev event processing core, watcher management
  3. *
  4. * Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann <libev@schmorp.de>
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without modifica-
  8. * tion, are permitted provided that the following conditions are met:
  9. *
  10. * 1. Redistributions of source code must retain the above copyright notice,
  11. * this list of conditions and the following disclaimer.
  12. *
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. *
  17. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
  18. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
  19. * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
  20. * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
  21. * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  22. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  23. * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  24. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
  25. * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  26. * OF THE POSSIBILITY OF SUCH DAMAGE.
  27. *
  28. * Alternatively, the contents of this file may be used under the terms of
  29. * the GNU General Public License ("GPL") version 2 or any later version,
  30. * in which case the provisions of the GPL are applicable instead of
  31. * the above. If you wish to allow the use of your version of this file
  32. * only under the terms of the GPL and not to allow others to use your
  33. * version of this file under the BSD license, indicate your decision
  34. * by deleting the provisions above and replace them with the notice
  35. * and other provisions required by the GPL. If you do not delete the
  36. * provisions above, a recipient may use your version of this file under
  37. * either the BSD or the GPL.
  38. */
  39. /* this big block deduces configuration from config.h */
  40. #ifndef EV_STANDALONE
  41. # ifdef EV_CONFIG_H
  42. # include EV_CONFIG_H
  43. # else
  44. # include "config.h"
  45. # endif
  46. # if HAVE_FLOOR
  47. # ifndef EV_USE_FLOOR
  48. # define EV_USE_FLOOR 1
  49. # endif
  50. # endif
  51. # if HAVE_CLOCK_SYSCALL
  52. # ifndef EV_USE_CLOCK_SYSCALL
  53. # define EV_USE_CLOCK_SYSCALL 1
  54. # ifndef EV_USE_REALTIME
  55. # define EV_USE_REALTIME 0
  56. # endif
  57. # ifndef EV_USE_MONOTONIC
  58. # define EV_USE_MONOTONIC 1
  59. # endif
  60. # endif
  61. # elif !defined EV_USE_CLOCK_SYSCALL
  62. # define EV_USE_CLOCK_SYSCALL 0
  63. # endif
  64. # if HAVE_CLOCK_GETTIME
  65. # ifndef EV_USE_MONOTONIC
  66. # define EV_USE_MONOTONIC 1
  67. # endif
  68. # ifndef EV_USE_REALTIME
  69. # define EV_USE_REALTIME 0
  70. # endif
  71. # else
  72. # ifndef EV_USE_MONOTONIC
  73. # define EV_USE_MONOTONIC 0
  74. # endif
  75. # ifndef EV_USE_REALTIME
  76. # define EV_USE_REALTIME 0
  77. # endif
  78. # endif
  79. # if HAVE_NANOSLEEP
  80. # ifndef EV_USE_NANOSLEEP
  81. # define EV_USE_NANOSLEEP EV_FEATURE_OS
  82. # endif
  83. # else
  84. # undef EV_USE_NANOSLEEP
  85. # define EV_USE_NANOSLEEP 0
  86. # endif
  87. # if HAVE_SELECT && HAVE_SYS_SELECT_H
  88. # ifndef EV_USE_SELECT
  89. # define EV_USE_SELECT EV_FEATURE_BACKENDS
  90. # endif
  91. # else
  92. # undef EV_USE_SELECT
  93. # define EV_USE_SELECT 0
  94. # endif
  95. # if HAVE_POLL && HAVE_POLL_H
  96. # ifndef EV_USE_POLL
  97. # define EV_USE_POLL EV_FEATURE_BACKENDS
  98. # endif
  99. # else
  100. # undef EV_USE_POLL
  101. # define EV_USE_POLL 0
  102. # endif
  103. # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
  104. # ifndef EV_USE_EPOLL
  105. # define EV_USE_EPOLL EV_FEATURE_BACKENDS
  106. # endif
  107. # else
  108. # undef EV_USE_EPOLL
  109. # define EV_USE_EPOLL 0
  110. # endif
  111. # if HAVE_KQUEUE && HAVE_SYS_EVENT_H
  112. # ifndef EV_USE_KQUEUE
  113. # define EV_USE_KQUEUE EV_FEATURE_BACKENDS
  114. # endif
  115. # else
  116. # undef EV_USE_KQUEUE
  117. # define EV_USE_KQUEUE 0
  118. # endif
  119. # if HAVE_PORT_H && HAVE_PORT_CREATE
  120. # ifndef EV_USE_PORT
  121. # define EV_USE_PORT EV_FEATURE_BACKENDS
  122. # endif
  123. # else
  124. # undef EV_USE_PORT
  125. # define EV_USE_PORT 0
  126. # endif
  127. # if HAVE_INOTIFY_INIT && HAVE_SYS_INOTIFY_H
  128. # ifndef EV_USE_INOTIFY
  129. # define EV_USE_INOTIFY EV_FEATURE_OS
  130. # endif
  131. # else
  132. # undef EV_USE_INOTIFY
  133. # define EV_USE_INOTIFY 0
  134. # endif
  135. # if HAVE_SIGNALFD && HAVE_SYS_SIGNALFD_H
  136. # ifndef EV_USE_SIGNALFD
  137. # define EV_USE_SIGNALFD EV_FEATURE_OS
  138. # endif
  139. # else
  140. # undef EV_USE_SIGNALFD
  141. # define EV_USE_SIGNALFD 0
  142. # endif
  143. # if HAVE_EVENTFD
  144. # ifndef EV_USE_EVENTFD
  145. # define EV_USE_EVENTFD EV_FEATURE_OS
  146. # endif
  147. # else
  148. # undef EV_USE_EVENTFD
  149. # define EV_USE_EVENTFD 0
  150. # endif
  151. #endif
  152. #include <stdlib.h>
  153. #include <string.h>
  154. #include <fcntl.h>
  155. #include <stddef.h>
  156. #include <stdio.h>
  157. #include <assert.h>
  158. #include <errno.h>
  159. #include <sys/types.h>
  160. #include <time.h>
  161. #include <limits.h>
  162. #include <signal.h>
  163. #ifdef EV_H
  164. # include EV_H
  165. #else
  166. # include "ev.h"
  167. #endif
  168. #if EV_NO_THREADS
  169. # undef EV_NO_SMP
  170. # define EV_NO_SMP 1
  171. # undef ECB_NO_THREADS
  172. # define ECB_NO_THREADS 1
  173. #endif
  174. #if EV_NO_SMP
  175. # undef EV_NO_SMP
  176. # define ECB_NO_SMP 1
  177. #endif
  178. #ifndef _WIN32
  179. # include <sys/time.h>
  180. # include <sys/wait.h>
  181. # include <unistd.h>
  182. #else
  183. # include <io.h>
  184. # define WIN32_LEAN_AND_MEAN
  185. # include <winsock2.h>
  186. # include <windows.h>
  187. # ifndef EV_SELECT_IS_WINSOCKET
  188. # define EV_SELECT_IS_WINSOCKET 1
  189. # endif
  190. # undef EV_AVOID_STDIO
  191. #endif
  192. /* OS X, in its infinite idiocy, actually HARDCODES
  193. * a limit of 1024 into their select. Where people have brains,
  194. * OS X engineers apparently have a vacuum. Or maybe they were
  195. * ordered to have a vacuum, or they do anything for money.
  196. * This might help. Or not.
  197. */
  198. #define _DARWIN_UNLIMITED_SELECT 1
  199. /* this block tries to deduce configuration from header-defined symbols and defaults */
  200. /* try to deduce the maximum number of signals on this platform */
  201. #if defined EV_NSIG
  202. /* use what's provided */
  203. #elif defined NSIG
  204. # define EV_NSIG (NSIG)
  205. #elif defined _NSIG
  206. # define EV_NSIG (_NSIG)
  207. #elif defined SIGMAX
  208. # define EV_NSIG (SIGMAX+1)
  209. #elif defined SIG_MAX
  210. # define EV_NSIG (SIG_MAX+1)
  211. #elif defined _SIG_MAX
  212. # define EV_NSIG (_SIG_MAX+1)
  213. #elif defined MAXSIG
  214. # define EV_NSIG (MAXSIG+1)
  215. #elif defined MAX_SIG
  216. # define EV_NSIG (MAX_SIG+1)
  217. #elif defined SIGARRAYSIZE
  218. # define EV_NSIG (SIGARRAYSIZE) /* Assume ary[SIGARRAYSIZE] */
  219. #elif defined _sys_nsig
  220. # define EV_NSIG (_sys_nsig) /* Solaris 2.5 */
  221. #else
  222. # define EV_NSIG (8 * sizeof (sigset_t) + 1)
  223. #endif
  224. #ifndef EV_USE_FLOOR
  225. # define EV_USE_FLOOR 0
  226. #endif
  227. #ifndef EV_USE_CLOCK_SYSCALL
  228. # if __linux && __GLIBC__ == 2 && __GLIBC_MINOR__ < 17
  229. # define EV_USE_CLOCK_SYSCALL EV_FEATURE_OS
  230. # else
  231. # define EV_USE_CLOCK_SYSCALL 0
  232. # endif
  233. #endif
  234. #if !(_POSIX_TIMERS > 0)
  235. # ifndef EV_USE_MONOTONIC
  236. # define EV_USE_MONOTONIC 0
  237. # endif
  238. # ifndef EV_USE_REALTIME
  239. # define EV_USE_REALTIME 0
  240. # endif
  241. #endif
  242. #ifndef EV_USE_MONOTONIC
  243. # if defined _POSIX_MONOTONIC_CLOCK && _POSIX_MONOTONIC_CLOCK >= 0
  244. # define EV_USE_MONOTONIC EV_FEATURE_OS
  245. # else
  246. # define EV_USE_MONOTONIC 0
  247. # endif
  248. #endif
  249. #ifndef EV_USE_REALTIME
  250. # define EV_USE_REALTIME !EV_USE_CLOCK_SYSCALL
  251. #endif
  252. #ifndef EV_USE_NANOSLEEP
  253. # if _POSIX_C_SOURCE >= 199309L
  254. # define EV_USE_NANOSLEEP EV_FEATURE_OS
  255. # else
  256. # define EV_USE_NANOSLEEP 0
  257. # endif
  258. #endif
  259. #ifndef EV_USE_SELECT
  260. # define EV_USE_SELECT EV_FEATURE_BACKENDS
  261. #endif
  262. #ifndef EV_USE_POLL
  263. # ifdef _WIN32
  264. # define EV_USE_POLL 0
  265. # else
  266. # define EV_USE_POLL EV_FEATURE_BACKENDS
  267. # endif
  268. #endif
  269. #ifndef EV_USE_EPOLL
  270. # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
  271. # define EV_USE_EPOLL EV_FEATURE_BACKENDS
  272. # else
  273. # define EV_USE_EPOLL 0
  274. # endif
  275. #endif
  276. #ifndef EV_USE_KQUEUE
  277. # define EV_USE_KQUEUE 0
  278. #endif
  279. #ifndef EV_USE_PORT
  280. # define EV_USE_PORT 0
  281. #endif
  282. #ifndef EV_USE_INOTIFY
  283. # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
  284. # define EV_USE_INOTIFY EV_FEATURE_OS
  285. # else
  286. # define EV_USE_INOTIFY 0
  287. # endif
  288. #endif
  289. #ifndef EV_PID_HASHSIZE
  290. # define EV_PID_HASHSIZE EV_FEATURE_DATA ? 16 : 1
  291. #endif
  292. #ifndef EV_INOTIFY_HASHSIZE
  293. # define EV_INOTIFY_HASHSIZE EV_FEATURE_DATA ? 16 : 1
  294. #endif
  295. #ifndef EV_USE_EVENTFD
  296. # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
  297. # define EV_USE_EVENTFD EV_FEATURE_OS
  298. # else
  299. # define EV_USE_EVENTFD 0
  300. # endif
  301. #endif
  302. #ifndef EV_USE_SIGNALFD
  303. # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
  304. # define EV_USE_SIGNALFD EV_FEATURE_OS
  305. # else
  306. # define EV_USE_SIGNALFD 0
  307. # endif
  308. #endif
  309. #if 0 /* debugging */
  310. # define EV_VERIFY 3
  311. # define EV_USE_4HEAP 1
  312. # define EV_HEAP_CACHE_AT 1
  313. #endif
  314. #ifndef EV_VERIFY
  315. # define EV_VERIFY (EV_FEATURE_API ? 1 : 0)
  316. #endif
  317. #ifndef EV_USE_4HEAP
  318. # define EV_USE_4HEAP EV_FEATURE_DATA
  319. #endif
  320. #ifndef EV_HEAP_CACHE_AT
  321. # define EV_HEAP_CACHE_AT EV_FEATURE_DATA
  322. #endif
  323. #ifdef ANDROID
  324. /* supposedly, android doesn't typedef fd_mask */
  325. # undef EV_USE_SELECT
  326. # define EV_USE_SELECT 0
  327. /* supposedly, we need to include syscall.h, not sys/syscall.h, so just disable */
  328. # undef EV_USE_CLOCK_SYSCALL
  329. # define EV_USE_CLOCK_SYSCALL 0
  330. #endif
  331. /* aix's poll.h seems to cause lots of trouble */
  332. #ifdef _AIX
  333. /* AIX has a completely broken poll.h header */
  334. # undef EV_USE_POLL
  335. # define EV_USE_POLL 0
  336. #endif
  337. /* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */
  338. /* which makes programs even slower. might work on other unices, too. */
  339. #if EV_USE_CLOCK_SYSCALL
  340. # include <sys/syscall.h>
  341. # ifdef SYS_clock_gettime
  342. # define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
  343. # undef EV_USE_MONOTONIC
  344. # define EV_USE_MONOTONIC 1
  345. # else
  346. # undef EV_USE_CLOCK_SYSCALL
  347. # define EV_USE_CLOCK_SYSCALL 0
  348. # endif
  349. #endif
  350. /* this block fixes any misconfiguration where we know we run into trouble otherwise */
  351. #ifndef CLOCK_MONOTONIC
  352. # undef EV_USE_MONOTONIC
  353. # define EV_USE_MONOTONIC 0
  354. #endif
  355. #ifndef CLOCK_REALTIME
  356. # undef EV_USE_REALTIME
  357. # define EV_USE_REALTIME 0
  358. #endif
  359. #if !EV_STAT_ENABLE
  360. # undef EV_USE_INOTIFY
  361. # define EV_USE_INOTIFY 0
  362. #endif
  363. #if !EV_USE_NANOSLEEP
  364. /* hp-ux has it in sys/time.h, which we unconditionally include above */
  365. # if !defined _WIN32 && !defined __hpux
  366. # include <sys/select.h>
  367. # endif
  368. #endif
  369. #if EV_USE_INOTIFY
  370. # include <sys/statfs.h>
  371. # include <sys/inotify.h>
  372. /* some very old inotify.h headers don't have IN_DONT_FOLLOW */
  373. # ifndef IN_DONT_FOLLOW
  374. # undef EV_USE_INOTIFY
  375. # define EV_USE_INOTIFY 0
  376. # endif
  377. #endif
  378. #if EV_USE_EVENTFD
  379. /* our minimum requirement is glibc 2.7 which has the stub, but not the header */
  380. # include <stdint.h>
  381. # ifndef EFD_NONBLOCK
  382. # define EFD_NONBLOCK O_NONBLOCK
  383. # endif
  384. # ifndef EFD_CLOEXEC
  385. # ifdef O_CLOEXEC
  386. # define EFD_CLOEXEC O_CLOEXEC
  387. # else
  388. # define EFD_CLOEXEC 02000000
  389. # endif
  390. # endif
  391. EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
  392. #endif
  393. #if EV_USE_SIGNALFD
  394. /* our minimum requirement is glibc 2.7 which has the stub, but not the header */
  395. # include <stdint.h>
  396. # ifndef SFD_NONBLOCK
  397. # define SFD_NONBLOCK O_NONBLOCK
  398. # endif
  399. # ifndef SFD_CLOEXEC
  400. # ifdef O_CLOEXEC
  401. # define SFD_CLOEXEC O_CLOEXEC
  402. # else
  403. # define SFD_CLOEXEC 02000000
  404. # endif
  405. # endif
  406. EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags);
  407. struct signalfd_siginfo
  408. {
  409. uint32_t ssi_signo;
  410. char pad[128 - sizeof (uint32_t)];
  411. };
  412. #endif
  413. /**/
  414. #if EV_VERIFY >= 3
  415. # define EV_FREQUENT_CHECK ev_verify (EV_A)
  416. #else
  417. # define EV_FREQUENT_CHECK do { } while (0)
  418. #endif
  419. /*
  420. * This is used to work around floating point rounding problems.
  421. * This value is good at least till the year 4000.
  422. */
  423. #define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
  424. /*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
  425. #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
  426. #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
  427. #define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
  428. #define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
  429. /* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
  430. /* ECB.H BEGIN */
  431. /*
  432. * libecb - http://software.schmorp.de/pkg/libecb
  433. *
  434. * Copyright (©) 2009-2014 Marc Alexander Lehmann <libecb@schmorp.de>
  435. * Copyright (©) 2011 Emanuele Giaquinta
  436. * All rights reserved.
  437. *
  438. * Redistribution and use in source and binary forms, with or without modifica-
  439. * tion, are permitted provided that the following conditions are met:
  440. *
  441. * 1. Redistributions of source code must retain the above copyright notice,
  442. * this list of conditions and the following disclaimer.
  443. *
  444. * 2. Redistributions in binary form must reproduce the above copyright
  445. * notice, this list of conditions and the following disclaimer in the
  446. * documentation and/or other materials provided with the distribution.
  447. *
  448. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
  449. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
  450. * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
  451. * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
  452. * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  453. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  454. * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  455. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
  456. * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  457. * OF THE POSSIBILITY OF SUCH DAMAGE.
  458. *
  459. * Alternatively, the contents of this file may be used under the terms of
  460. * the GNU General Public License ("GPL") version 2 or any later version,
  461. * in which case the provisions of the GPL are applicable instead of
  462. * the above. If you wish to allow the use of your version of this file
  463. * only under the terms of the GPL and not to allow others to use your
  464. * version of this file under the BSD license, indicate your decision
  465. * by deleting the provisions above and replace them with the notice
  466. * and other provisions required by the GPL. If you do not delete the
  467. * provisions above, a recipient may use your version of this file under
  468. * either the BSD or the GPL.
  469. */
  470. #ifndef ECB_H
  471. #define ECB_H
  472. /* 16 bits major, 16 bits minor */
  473. #define ECB_VERSION 0x00010003
  474. #ifdef _WIN32
  475. typedef signed char int8_t;
  476. typedef unsigned char uint8_t;
  477. typedef signed short int16_t;
  478. typedef unsigned short uint16_t;
  479. typedef signed int int32_t;
  480. typedef unsigned int uint32_t;
  481. #if __GNUC__
  482. typedef signed long long int64_t;
  483. typedef unsigned long long uint64_t;
  484. #else /* _MSC_VER || __BORLANDC__ */
  485. typedef signed __int64 int64_t;
  486. typedef unsigned __int64 uint64_t;
  487. #endif
  488. #ifdef _WIN64
  489. #define ECB_PTRSIZE 8
  490. typedef uint64_t uintptr_t;
  491. typedef int64_t intptr_t;
  492. #else
  493. #define ECB_PTRSIZE 4
  494. typedef uint32_t uintptr_t;
  495. typedef int32_t intptr_t;
  496. #endif
  497. #else
  498. #include <inttypes.h>
  499. #if UINTMAX_MAX > 0xffffffffU
  500. #define ECB_PTRSIZE 8
  501. #else
  502. #define ECB_PTRSIZE 4
  503. #endif
  504. #endif
  505. /* work around x32 idiocy by defining proper macros */
  506. #if __amd64 || __x86_64 || _M_AMD64 || _M_X64
  507. #if _ILP32
  508. #define ECB_AMD64_X32 1
  509. #else
  510. #define ECB_AMD64 1
  511. #endif
  512. #endif
  513. /* many compilers define _GNUC_ to some versions but then only implement
  514. * what their idiot authors think are the "more important" extensions,
  515. * causing enormous grief in return for some better fake benchmark numbers.
  516. * or so.
  517. * we try to detect these and simply assume they are not gcc - if they have
  518. * an issue with that they should have done it right in the first place.
  519. */
  520. #ifndef ECB_GCC_VERSION
  521. #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__
  522. #define ECB_GCC_VERSION(major,minor) 0
  523. #else
  524. #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
  525. #endif
  526. #endif
  527. #define ECB_CPP (__cplusplus+0)
  528. #define ECB_CPP11 (__cplusplus >= 201103L)
  529. #if ECB_CPP
  530. #define ECB_C 0
  531. #define ECB_STDC_VERSION 0
  532. #else
  533. #define ECB_C 1
  534. #define ECB_STDC_VERSION __STDC_VERSION__
  535. #endif
  536. #define ECB_C99 (ECB_STDC_VERSION >= 199901L)
  537. #define ECB_C11 (ECB_STDC_VERSION >= 201112L)
  538. #if ECB_CPP
  539. #define ECB_EXTERN_C extern "C"
  540. #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
  541. #define ECB_EXTERN_C_END }
  542. #else
  543. #define ECB_EXTERN_C extern
  544. #define ECB_EXTERN_C_BEG
  545. #define ECB_EXTERN_C_END
  546. #endif
  547. /*****************************************************************************/
  548. /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
  549. /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
  550. #if ECB_NO_THREADS
  551. #define ECB_NO_SMP 1
  552. #endif
  553. #if ECB_NO_SMP
  554. #define ECB_MEMORY_FENCE do { } while (0)
  555. #endif
  556. #ifndef ECB_MEMORY_FENCE
  557. #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
  558. #if __i386 || __i386__
  559. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
  560. #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
  561. #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
  562. #elif __amd64 || __amd64__ || __x86_64 || __x86_64__
  563. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
  564. #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
  565. #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
  566. #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
  567. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
  568. #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
  569. || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__
  570. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
  571. #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
  572. || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__
  573. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
  574. #elif __aarch64__
  575. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
  576. #elif (__sparc || __sparc__) && !__sparcv8
  577. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
  578. #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
  579. #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
  580. #elif defined __s390__ || defined __s390x__
  581. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
  582. #elif defined __mips__
  583. /* GNU/Linux emulates sync on mips1 architectures, so we force its use */
  584. /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
  585. #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
  586. #elif defined __alpha__
  587. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
  588. #elif defined __hppa__
  589. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
  590. #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
  591. #elif defined __ia64__
  592. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
  593. #elif defined __m68k__
  594. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
  595. #elif defined __m88k__
  596. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory")
  597. #elif defined __sh__
  598. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
  599. #endif
  600. #endif
  601. #endif
  602. #ifndef ECB_MEMORY_FENCE
  603. #if ECB_GCC_VERSION(4,7)
  604. /* see comment below (stdatomic.h) about the C11 memory model. */
  605. #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
  606. #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
  607. #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
  608. /* The __has_feature syntax from clang is so misdesigned that we cannot use it
  609. * without risking compile time errors with other compilers. We *could*
  610. * define our own ecb_clang_has_feature, but I just can't be bothered to work
  611. * around this shit time and again.
  612. * #elif defined __clang && __has_feature (cxx_atomic)
  613. * // see comment below (stdatomic.h) about the C11 memory model.
  614. * #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
  615. * #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
  616. * #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
  617. */
  618. #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
  619. #define ECB_MEMORY_FENCE __sync_synchronize ()
  620. #elif _MSC_VER >= 1500 /* VC++ 2008 */
  621. /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
  622. #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
  623. #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier()
  624. #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */
  625. #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier()
  626. #elif _MSC_VER >= 1400 /* VC++ 2005 */
  627. #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
  628. #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
  629. #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
  630. #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
  631. #elif defined _WIN32
  632. #include <WinNT.h>
  633. #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
  634. #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
  635. #include <mbarrier.h>
  636. #define ECB_MEMORY_FENCE __machine_rw_barrier ()
  637. #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier ()
  638. #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier ()
  639. #elif __xlC__
  640. #define ECB_MEMORY_FENCE __sync ()
  641. #endif
  642. #endif
  643. #ifndef ECB_MEMORY_FENCE
  644. #if ECB_C11 && !defined __STDC_NO_ATOMICS__
  645. /* we assume that these memory fences work on all variables/all memory accesses, */
  646. /* not just C11 atomics and atomic accesses */
  647. #include <stdatomic.h>
  648. /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
  649. /* any fence other than seq_cst, which isn't very efficient for us. */
  650. /* Why that is, we don't know - either the C11 memory model is quite useless */
  651. /* for most usages, or gcc and clang have a bug */
  652. /* I *currently* lean towards the latter, and inefficiently implement */
  653. /* all three of ecb's fences as a seq_cst fence */
  654. /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
  655. /* for all __atomic_thread_fence's except seq_cst */
  656. #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
  657. #endif
  658. #endif
  659. #ifndef ECB_MEMORY_FENCE
  660. #if !ECB_AVOID_PTHREADS
  661. /*
  662. * if you get undefined symbol references to pthread_mutex_lock,
  663. * or failure to find pthread.h, then you should implement
  664. * the ECB_MEMORY_FENCE operations for your cpu/compiler
  665. * OR provide pthread.h and link against the posix thread library
  666. * of your system.
  667. */
  668. #include <pthread.h>
  669. #define ECB_NEEDS_PTHREADS 1
  670. #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1
  671. static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER;
  672. #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0)
  673. #endif
  674. #endif
  675. #if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE
  676. #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
  677. #endif
  678. #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
  679. #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
  680. #endif
  681. /*****************************************************************************/
  682. #if __cplusplus
  683. #define ecb_inline static inline
  684. #elif ECB_GCC_VERSION(2,5)
  685. #define ecb_inline static __inline__
  686. #elif ECB_C99
  687. #define ecb_inline static inline
  688. #else
  689. #define ecb_inline static
  690. #endif
  691. #if ECB_GCC_VERSION(3,3)
  692. #define ecb_restrict __restrict__
  693. #elif ECB_C99
  694. #define ecb_restrict restrict
  695. #else
  696. #define ecb_restrict
  697. #endif
  698. typedef int ecb_bool;
  699. #define ECB_CONCAT_(a, b) a ## b
  700. #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b)
  701. #define ECB_STRINGIFY_(a) # a
  702. #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
  703. #define ecb_function_ ecb_inline
  704. #if ECB_GCC_VERSION(3,1)
  705. #define ecb_attribute(attrlist) __attribute__(attrlist)
  706. #define ecb_is_constant(expr) __builtin_constant_p (expr)
  707. #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
  708. #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
  709. #else
  710. #define ecb_attribute(attrlist)
  711. /* possible C11 impl for integral types
  712. typedef struct ecb_is_constant_struct ecb_is_constant_struct;
  713. #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */
  714. #define ecb_is_constant(expr) 0
  715. #define ecb_expect(expr,value) (expr)
  716. #define ecb_prefetch(addr,rw,locality)
  717. #endif
  718. /* no emulation for ecb_decltype */
  719. #if ECB_GCC_VERSION(4,5)
  720. #define ecb_decltype(x) __decltype(x)
  721. #elif ECB_GCC_VERSION(3,0)
  722. #define ecb_decltype(x) __typeof(x)
  723. #endif
  724. #if _MSC_VER >= 1300
  725. #define ecb_deprecated __declspec(deprecated)
  726. #else
  727. #define ecb_deprecated ecb_attribute ((__deprecated__))
  728. #endif
  729. #define ecb_noinline ecb_attribute ((__noinline__))
  730. #define ecb_unused ecb_attribute ((__unused__))
  731. #define ecb_const ecb_attribute ((__const__))
  732. #define ecb_pure ecb_attribute ((__pure__))
  733. /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx __declspec(noreturn) */
  734. #if ECB_C11
  735. #define ecb_noreturn _Noreturn
  736. #else
  737. #define ecb_noreturn ecb_attribute ((__noreturn__))
  738. #endif
  739. #if ECB_GCC_VERSION(4,3)
  740. #define ecb_artificial ecb_attribute ((__artificial__))
  741. #define ecb_hot ecb_attribute ((__hot__))
  742. #define ecb_cold ecb_attribute ((__cold__))
  743. #else
  744. #define ecb_artificial
  745. #define ecb_hot
  746. #define ecb_cold
  747. #endif
  748. /* put around conditional expressions if you are very sure that the */
  749. /* expression is mostly true or mostly false. note that these return */
  750. /* booleans, not the expression. */
  751. #define ecb_expect_false(expr) ecb_expect (!!(expr), 0)
  752. #define ecb_expect_true(expr) ecb_expect (!!(expr), 1)
  753. /* for compatibility to the rest of the world */
  754. #define ecb_likely(expr) ecb_expect_true (expr)
  755. #define ecb_unlikely(expr) ecb_expect_false (expr)
  756. /* count trailing zero bits and count # of one bits */
  757. #if ECB_GCC_VERSION(3,4)
  758. /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */
  759. #define ecb_ld32(x) (__builtin_clz (x) ^ 31)
  760. #define ecb_ld64(x) (__builtin_clzll (x) ^ 63)
  761. #define ecb_ctz32(x) __builtin_ctz (x)
  762. #define ecb_ctz64(x) __builtin_ctzll (x)
  763. #define ecb_popcount32(x) __builtin_popcount (x)
  764. /* no popcountll */
  765. #else
  766. ecb_function_ int ecb_ctz32 (uint32_t x) ecb_const;
  767. ecb_function_ int
  768. ecb_ctz32 (uint32_t x)
  769. {
  770. int r = 0;
  771. x &= ~x + 1; /* this isolates the lowest bit */
  772. #if ECB_branchless_on_i386
  773. r += !!(x & 0xaaaaaaaa) << 0;
  774. r += !!(x & 0xcccccccc) << 1;
  775. r += !!(x & 0xf0f0f0f0) << 2;
  776. r += !!(x & 0xff00ff00) << 3;
  777. r += !!(x & 0xffff0000) << 4;
  778. #else
  779. if (x & 0xaaaaaaaa) r += 1;
  780. if (x & 0xcccccccc) r += 2;
  781. if (x & 0xf0f0f0f0) r += 4;
  782. if (x & 0xff00ff00) r += 8;
  783. if (x & 0xffff0000) r += 16;
  784. #endif
  785. return r;
  786. }
  787. ecb_function_ int ecb_ctz64 (uint64_t x) ecb_const;
  788. ecb_function_ int
  789. ecb_ctz64 (uint64_t x)
  790. {
  791. int shift = x & 0xffffffffU ? 0 : 32;
  792. return ecb_ctz32 (x >> shift) + shift;
  793. }
  794. ecb_function_ int ecb_popcount32 (uint32_t x) ecb_const;
  795. ecb_function_ int
  796. ecb_popcount32 (uint32_t x)
  797. {
  798. x -= (x >> 1) & 0x55555555;
  799. x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
  800. x = ((x >> 4) + x) & 0x0f0f0f0f;
  801. x *= 0x01010101;
  802. return x >> 24;
  803. }
  804. ecb_function_ int ecb_ld32 (uint32_t x) ecb_const;
  805. ecb_function_ int ecb_ld32 (uint32_t x)
  806. {
  807. int r = 0;
  808. if (x >> 16) { x >>= 16; r += 16; }
  809. if (x >> 8) { x >>= 8; r += 8; }
  810. if (x >> 4) { x >>= 4; r += 4; }
  811. if (x >> 2) { x >>= 2; r += 2; }
  812. if (x >> 1) { r += 1; }
  813. return r;
  814. }
  815. ecb_function_ int ecb_ld64 (uint64_t x) ecb_const;
  816. ecb_function_ int ecb_ld64 (uint64_t x)
  817. {
  818. int r = 0;
  819. if (x >> 32) { x >>= 32; r += 32; }
  820. return r + ecb_ld32 (x);
  821. }
  822. #endif
  823. ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) ecb_const;
  824. ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
  825. ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) ecb_const;
  826. ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); }
  827. ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const;
  828. ecb_function_ uint8_t ecb_bitrev8 (uint8_t x)
  829. {
  830. return ( (x * 0x0802U & 0x22110U)
  831. | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
  832. }
  833. ecb_function_ uint16_t ecb_bitrev16 (uint16_t x) ecb_const;
  834. ecb_function_ uint16_t ecb_bitrev16 (uint16_t x)
  835. {
  836. x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1);
  837. x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2);
  838. x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4);
  839. x = ( x >> 8 ) | ( x << 8);
  840. return x;
  841. }
  842. ecb_function_ uint32_t ecb_bitrev32 (uint32_t x) ecb_const;
  843. ecb_function_ uint32_t ecb_bitrev32 (uint32_t x)
  844. {
  845. x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
  846. x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2);
  847. x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4);
  848. x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8);
  849. x = ( x >> 16 ) | ( x << 16);
  850. return x;
  851. }
  852. /* popcount64 is only available on 64 bit cpus as gcc builtin */
  853. /* so for this version we are lazy */
  854. ecb_function_ int ecb_popcount64 (uint64_t x) ecb_const;
  855. ecb_function_ int
  856. ecb_popcount64 (uint64_t x)
  857. {
  858. return ecb_popcount32 (x) + ecb_popcount32 (x >> 32);
  859. }
  860. ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) ecb_const;
  861. ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) ecb_const;
  862. ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) ecb_const;
  863. ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) ecb_const;
  864. ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) ecb_const;
  865. ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) ecb_const;
  866. ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) ecb_const;
  867. ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) ecb_const;
  868. ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); }
  869. ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); }
  870. ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); }
  871. ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); }
  872. ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); }
  873. ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); }
  874. ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); }
  875. ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); }
  876. #if ECB_GCC_VERSION(4,3)
  877. #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
  878. #define ecb_bswap32(x) __builtin_bswap32 (x)
  879. #define ecb_bswap64(x) __builtin_bswap64 (x)
  880. #else
  881. ecb_function_ uint16_t ecb_bswap16 (uint16_t x) ecb_const;
  882. ecb_function_ uint16_t
  883. ecb_bswap16 (uint16_t x)
  884. {
  885. return ecb_rotl16 (x, 8);
  886. }
  887. ecb_function_ uint32_t ecb_bswap32 (uint32_t x) ecb_const;
  888. ecb_function_ uint32_t
  889. ecb_bswap32 (uint32_t x)
  890. {
  891. return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16);
  892. }
  893. ecb_function_ uint64_t ecb_bswap64 (uint64_t x) ecb_const;
  894. ecb_function_ uint64_t
  895. ecb_bswap64 (uint64_t x)
  896. {
  897. return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);
  898. }
  899. #endif
  900. #if ECB_GCC_VERSION(4,5)
  901. #define ecb_unreachable() __builtin_unreachable ()
  902. #else
  903. /* this seems to work fine, but gcc always emits a warning for it :/ */
  904. ecb_inline void ecb_unreachable (void) ecb_noreturn;
  905. ecb_inline void ecb_unreachable (void) { }
  906. #endif
  907. /* try to tell the compiler that some condition is definitely true */
  908. #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
  909. ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const;
  910. ecb_inline unsigned char
  911. ecb_byteorder_helper (void)
  912. {
  913. /* the union code still generates code under pressure in gcc, */
  914. /* but less than using pointers, and always seems to */
  915. /* successfully return a constant. */
  916. /* the reason why we have this horrible preprocessor mess */
  917. /* is to avoid it in all cases, at least on common architectures */
  918. /* or when using a recent enough gcc version (>= 4.6) */
  919. #if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64
  920. return 0x44;
  921. #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  922. return 0x44;
  923. #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  924. return 0x11;
  925. #else
  926. union
  927. {
  928. uint32_t i;
  929. uint8_t c;
  930. } u = { 0x11223344 };
  931. return u.c;
  932. #endif
  933. }
  934. ecb_inline ecb_bool ecb_big_endian (void) ecb_const;
  935. ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; }
  936. ecb_inline ecb_bool ecb_little_endian (void) ecb_const;
  937. ecb_inline ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; }
  938. #if ECB_GCC_VERSION(3,0) || ECB_C99
  939. #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
  940. #else
  941. #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
  942. #endif
  943. #if __cplusplus
  944. template<typename T>
  945. static inline T ecb_div_rd (T val, T div)
  946. {
  947. return val < 0 ? - ((-val + div - 1) / div) : (val ) / div;
  948. }
  949. template<typename T>
  950. static inline T ecb_div_ru (T val, T div)
  951. {
  952. return val < 0 ? - ((-val ) / div) : (val + div - 1) / div;
  953. }
  954. #else
  955. #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div))
  956. #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div))
  957. #endif
  958. #if ecb_cplusplus_does_not_suck
  959. /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */
  960. template<typename T, int N>
  961. static inline int ecb_array_length (const T (&arr)[N])
  962. {
  963. return N;
  964. }
  965. #else
  966. #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
  967. #endif
  968. /*******************************************************************************/
  969. /* floating point stuff, can be disabled by defining ECB_NO_LIBM */
  970. /* basically, everything uses "ieee pure-endian" floating point numbers */
  971. /* the only noteworthy exception is ancient armle, which uses order 43218765 */
  972. #if 0 \
  973. || __i386 || __i386__ \
  974. || __amd64 || __amd64__ || __x86_64 || __x86_64__ \
  975. || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
  976. || defined __s390__ || defined __s390x__ \
  977. || defined __mips__ \
  978. || defined __alpha__ \
  979. || defined __hppa__ \
  980. || defined __ia64__ \
  981. || defined __m68k__ \
  982. || defined __m88k__ \
  983. || defined __sh__ \
  984. || defined _M_IX86 || defined _M_AMD64 || defined _M_IA64 \
  985. || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
  986. || defined __aarch64__
  987. #define ECB_STDFP 1
  988. #include <string.h> /* for memcpy */
  989. #else
  990. #define ECB_STDFP 0
  991. #endif
  992. #ifndef ECB_NO_LIBM
  993. #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */
  994. /* only the oldest of old doesn't have this one. solaris. */
  995. #ifdef INFINITY
  996. #define ECB_INFINITY INFINITY
  997. #else
  998. #define ECB_INFINITY HUGE_VAL
  999. #endif
  1000. #ifdef NAN
  1001. #define ECB_NAN NAN
  1002. #else
  1003. #define ECB_NAN ECB_INFINITY
  1004. #endif
  1005. /* converts an ieee half/binary16 to a float */
  1006. ecb_function_ float ecb_binary16_to_float (uint16_t x) ecb_const;
  1007. ecb_function_ float
  1008. ecb_binary16_to_float (uint16_t x)
  1009. {
  1010. int e = (x >> 10) & 0x1f;
  1011. int m = x & 0x3ff;
  1012. float r;
  1013. if (!e ) r = ldexpf (m , -24);
  1014. else if (e != 31) r = ldexpf (m + 0x400, e - 25);
  1015. else if (m ) r = ECB_NAN;
  1016. else r = ECB_INFINITY;
  1017. return x & 0x8000 ? -r : r;
  1018. }
  1019. /* convert a float to ieee single/binary32 */
  1020. ecb_function_ uint32_t ecb_float_to_binary32 (float x) ecb_const;
  1021. ecb_function_ uint32_t
  1022. ecb_float_to_binary32 (float x)
  1023. {
  1024. uint32_t r;
  1025. #if ECB_STDFP
  1026. memcpy (&r, &x, 4);
  1027. #else
  1028. /* slow emulation, works for anything but -0 */
  1029. uint32_t m;
  1030. int e;
  1031. if (x == 0e0f ) return 0x00000000U;
  1032. if (x > +3.40282346638528860e+38f) return 0x7f800000U;
  1033. if (x < -3.40282346638528860e+38f) return 0xff800000U;
  1034. if (x != x ) return 0x7fbfffffU;
  1035. m = frexpf (x, &e) * 0x1000000U;
  1036. r = m & 0x80000000U;
  1037. if (r)
  1038. m = -m;
  1039. if (e <= -126)
  1040. {
  1041. m &= 0xffffffU;
  1042. m >>= (-125 - e);
  1043. e = -126;
  1044. }
  1045. r |= (e + 126) << 23;
  1046. r |= m & 0x7fffffU;
  1047. #endif
  1048. return r;
  1049. }
  1050. /* converts an ieee single/binary32 to a float */
  1051. ecb_function_ float ecb_binary32_to_float (uint32_t x) ecb_const;
  1052. ecb_function_ float
  1053. ecb_binary32_to_float (uint32_t x)
  1054. {
  1055. float r;
  1056. #if ECB_STDFP
  1057. memcpy (&r, &x, 4);
  1058. #else
  1059. /* emulation, only works for normals and subnormals and +0 */
  1060. int neg = x >> 31;
  1061. int e = (x >> 23) & 0xffU;
  1062. x &= 0x7fffffU;
  1063. if (e)
  1064. x |= 0x800000U;
  1065. else
  1066. e = 1;
  1067. /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
  1068. r = ldexpf (x * (0.5f / 0x800000U), e - 126);
  1069. r = neg ? -r : r;
  1070. #endif
  1071. return r;
  1072. }
  1073. /* convert a double to ieee double/binary64 */
  1074. ecb_function_ uint64_t ecb_double_to_binary64 (double x) ecb_const;
  1075. ecb_function_ uint64_t
  1076. ecb_double_to_binary64 (double x)
  1077. {
  1078. uint64_t r;
  1079. #if ECB_STDFP
  1080. memcpy (&r, &x, 8);
  1081. #else
  1082. /* slow emulation, works for anything but -0 */
  1083. uint64_t m;
  1084. int e;
  1085. if (x == 0e0 ) return 0x0000000000000000U;
  1086. if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
  1087. if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
  1088. if (x != x ) return 0X7ff7ffffffffffffU;
  1089. m = frexp (x, &e) * 0x20000000000000U;
  1090. r = m & 0x8000000000000000;;
  1091. if (r)
  1092. m = -m;
  1093. if (e <= -1022)
  1094. {
  1095. m &= 0x1fffffffffffffU;
  1096. m >>= (-1021 - e);
  1097. e = -1022;
  1098. }
  1099. r |= ((uint64_t)(e + 1022)) << 52;
  1100. r |= m & 0xfffffffffffffU;
  1101. #endif
  1102. return r;
  1103. }
  1104. /* converts an ieee double/binary64 to a double */
  1105. ecb_function_ double ecb_binary64_to_double (uint64_t x) ecb_const;
  1106. ecb_function_ double
  1107. ecb_binary64_to_double (uint64_t x)
  1108. {
  1109. double r;
  1110. #if ECB_STDFP
  1111. memcpy (&r, &x, 8);
  1112. #else
  1113. /* emulation, only works for normals and subnormals and +0 */
  1114. int neg = x >> 63;
  1115. int e = (x >> 52) & 0x7ffU;
  1116. x &= 0xfffffffffffffU;
  1117. if (e)
  1118. x |= 0x10000000000000U;
  1119. else
  1120. e = 1;
  1121. /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
  1122. r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
  1123. r = neg ? -r : r;
  1124. #endif
  1125. return r;
  1126. }
  1127. #endif
  1128. #endif
  1129. /* ECB.H END */
  1130. #if ECB_MEMORY_FENCE_NEEDS_PTHREADS
  1131. /* if your architecture doesn't need memory fences, e.g. because it is
  1132. * single-cpu/core, or if you use libev in a project that doesn't use libev
  1133. * from multiple threads, then you can define ECB_AVOID_PTHREADS when compiling
  1134. * libev, in which cases the memory fences become nops.
  1135. * alternatively, you can remove this #error and link against libpthread,
  1136. * which will then provide the memory fences.
  1137. */
  1138. # error "memory fences not defined for your architecture, please report"
  1139. #endif
  1140. #ifndef ECB_MEMORY_FENCE
  1141. # define ECB_MEMORY_FENCE do { } while (0)
  1142. # define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
  1143. # define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
  1144. #endif
  1145. #define expect_false(cond) ecb_expect_false (cond)
  1146. #define expect_true(cond) ecb_expect_true (cond)
  1147. #define noinline ecb_noinline
  1148. #define inline_size ecb_inline
  1149. #if EV_FEATURE_CODE
  1150. # define inline_speed ecb_inline
  1151. #else
  1152. # define inline_speed static noinline
  1153. #endif
  1154. #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
  1155. #if EV_MINPRI == EV_MAXPRI
  1156. # define ABSPRI(w) (((W)w), 0)
  1157. #else
  1158. # define ABSPRI(w) (((W)w)->priority - EV_MINPRI)
  1159. #endif
  1160. #define EMPTY /* required for microsofts broken pseudo-c compiler */
  1161. #define EMPTY2(a,b) /* used to suppress some warnings */
  1162. typedef ev_watcher *W;
  1163. typedef ev_watcher_list *WL;
  1164. typedef ev_watcher_time *WT;
  1165. #define ev_active(w) ((W)(w))->active
  1166. #define ev_at(w) ((WT)(w))->at
  1167. #if EV_USE_REALTIME
  1168. /* sig_atomic_t is used to avoid per-thread variables or locking but still */
  1169. /* giving it a reasonably high chance of working on typical architectures */
  1170. static EV_ATOMIC_T have_realtime; /* did clock_gettime (CLOCK_REALTIME) work? */
  1171. #endif
  1172. #if EV_USE_MONOTONIC
  1173. static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
  1174. #endif
  1175. #ifndef EV_FD_TO_WIN32_HANDLE
  1176. # define EV_FD_TO_WIN32_HANDLE(fd) _get_osfhandle (fd)
  1177. #endif
  1178. #ifndef EV_WIN32_HANDLE_TO_FD
  1179. # define EV_WIN32_HANDLE_TO_FD(handle) _open_osfhandle (handle, 0)
  1180. #endif
  1181. #ifndef EV_WIN32_CLOSE_FD
  1182. # define EV_WIN32_CLOSE_FD(fd) close (fd)
  1183. #endif
  1184. #ifdef _WIN32
  1185. # include "ev_win32.c"
  1186. #endif
  1187. /*****************************************************************************/
  1188. /* define a suitable floor function (only used by periodics atm) */
  1189. #if EV_USE_FLOOR
  1190. # include <math.h>
  1191. # define ev_floor(v) floor (v)
  1192. #else
  1193. #include <float.h>
  1194. /* a floor() replacement function, should be independent of ev_tstamp type */
  1195. static ev_tstamp noinline
  1196. ev_floor (ev_tstamp v)
  1197. {
  1198. /* the choice of shift factor is not terribly important */
  1199. #if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */
  1200. const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.;
  1201. #else
  1202. const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
  1203. #endif
  1204. /* argument too large for an unsigned long? */
  1205. if (expect_false (v >= shift))
  1206. {
  1207. ev_tstamp f;
  1208. if (v == v - 1.)
  1209. return v; /* very large number */
  1210. f = shift * ev_floor (v * (1. / shift));
  1211. return f + ev_floor (v - f);
  1212. }
  1213. /* special treatment for negative args? */
  1214. if (expect_false (v < 0.))
  1215. {
  1216. ev_tstamp f = -ev_floor (-v);
  1217. return f - (f == v ? 0 : 1);
  1218. }
  1219. /* fits into an unsigned long */
  1220. return (unsigned long)v;
  1221. }
  1222. #endif
  1223. /*****************************************************************************/
  1224. #ifdef __linux
  1225. # include <sys/utsname.h>
  1226. #endif
  1227. static unsigned int noinline ecb_cold
  1228. ev_linux_version (void)
  1229. {
  1230. #ifdef __linux
  1231. unsigned int v = 0;
  1232. struct utsname buf;
  1233. int i;
  1234. char *p = buf.release;
  1235. if (uname (&buf))
  1236. return 0;
  1237. for (i = 3+1; --i; )
  1238. {
  1239. unsigned int c = 0;
  1240. for (;;)
  1241. {
  1242. if (*p >= '0' && *p <= '9')
  1243. c = c * 10 + *p++ - '0';
  1244. else
  1245. {
  1246. p += *p == '.';
  1247. break;
  1248. }
  1249. }
  1250. v = (v << 8) | c;
  1251. }
  1252. return v;
  1253. #else
  1254. return 0;
  1255. #endif
  1256. }
  1257. /*****************************************************************************/
  1258. #if EV_AVOID_STDIO
  1259. static void noinline ecb_cold
  1260. ev_printerr (const char *msg)
  1261. {
  1262. write (STDERR_FILENO, msg, strlen (msg));
  1263. }
  1264. #endif
  1265. static void (*syserr_cb)(const char *msg) EV_THROW;
  1266. void ecb_cold
  1267. ev_set_syserr_cb (void (*cb)(const char *msg) EV_THROW) EV_THROW
  1268. {
  1269. syserr_cb = cb;
  1270. }
  1271. static void noinline ecb_cold
  1272. ev_syserr (const char *msg)
  1273. {
  1274. if (!msg)
  1275. msg = "(libev) system error";
  1276. if (syserr_cb)
  1277. syserr_cb (msg);
  1278. else
  1279. {
  1280. #if EV_AVOID_STDIO
  1281. ev_printerr (msg);
  1282. ev_printerr (": ");
  1283. ev_printerr (strerror (errno));
  1284. ev_printerr ("\n");
  1285. #else
  1286. perror (msg);
  1287. #endif
  1288. abort ();
  1289. }
  1290. }
  1291. static void *
  1292. ev_realloc_emul (void *ptr, long size) EV_THROW
  1293. {
  1294. /* some systems, notably openbsd and darwin, fail to properly
  1295. * implement realloc (x, 0) (as required by both ansi c-89 and
  1296. * the single unix specification, so work around them here.
  1297. * recently, also (at least) fedora and debian started breaking it,
  1298. * despite documenting it otherwise.
  1299. */
  1300. if (size)
  1301. return realloc (ptr, size);
  1302. free (ptr);
  1303. return 0;
  1304. }
  1305. static void *(*alloc)(void *ptr, long size) EV_THROW = ev_realloc_emul;
  1306. void ecb_cold
  1307. ev_set_allocator (void *(*cb)(void *ptr, long size) EV_THROW) EV_THROW
  1308. {
  1309. alloc = cb;
  1310. }
  1311. inline_speed void *
  1312. ev_realloc (void *ptr, long size)
  1313. {
  1314. ptr = alloc (ptr, size);
  1315. if (!ptr && size)
  1316. {
  1317. #if EV_AVOID_STDIO
  1318. ev_printerr ("(libev) memory allocation failed, aborting.\n");
  1319. #else
  1320. fprintf (stderr, "(libev) cannot allocate %ld bytes, aborting.", size);
  1321. #endif
  1322. abort ();
  1323. }
  1324. return ptr;
  1325. }
  1326. #define ev_malloc(size) ev_realloc (0, (size))
  1327. #define ev_free(ptr) ev_realloc ((ptr), 0)
  1328. /*****************************************************************************/
  1329. /* set in reify when reification needed */
  1330. #define EV_ANFD_REIFY 1
  1331. /* file descriptor info structure */
  1332. typedef struct
  1333. {
  1334. WL head;
  1335. unsigned char events; /* the events watched for */
  1336. unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
  1337. unsigned char emask; /* the epoll backend stores the actual kernel mask in here */
  1338. unsigned char unused;
  1339. #if EV_USE_EPOLL
  1340. unsigned int egen; /* generation counter to counter epoll bugs */
  1341. #endif
  1342. #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
  1343. SOCKET handle;
  1344. #endif
  1345. #if EV_USE_IOCP
  1346. OVERLAPPED or, ow;
  1347. #endif
  1348. } ANFD;
  1349. /* stores the pending event set for a given watcher */
  1350. typedef struct
  1351. {
  1352. W w;
  1353. int events; /* the pending event set for the given watcher */
  1354. } ANPENDING;
  1355. #if EV_USE_INOTIFY
  1356. /* hash table entry per inotify-id */
  1357. typedef struct
  1358. {
  1359. WL head;
  1360. } ANFS;
  1361. #endif
  1362. /* Heap Entry */
  1363. #if EV_HEAP_CACHE_AT
  1364. /* a heap element */
  1365. typedef struct {
  1366. ev_tstamp at;
  1367. WT w;
  1368. } ANHE;
  1369. #define ANHE_w(he) (he).w /* access watcher, read-write */
  1370. #define ANHE_at(he) (he).at /* access cached at, read-only */
  1371. #define ANHE_at_cache(he) (he).at = (he).w->at /* update at from watcher */
  1372. #else
  1373. /* a heap element */
  1374. typedef WT ANHE;
  1375. #define ANHE_w(he) (he)
  1376. #define ANHE_at(he) (he)->at
  1377. #define ANHE_at_cache(he)
  1378. #endif
  1379. #if EV_MULTIPLICITY
  1380. struct ev_loop
  1381. {
  1382. ev_tstamp ev_rt_now;
  1383. #define ev_rt_now ((loop)->ev_rt_now)
  1384. #define VAR(name,decl) decl;
  1385. #include "ev_vars.h"
  1386. #undef VAR
  1387. };
  1388. #include "ev_wrap.h"
  1389. static struct ev_loop default_loop_struct;
  1390. EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */
  1391. #else
  1392. EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */
  1393. #define VAR(name,decl) static decl;
  1394. #include "ev_vars.h"
  1395. #undef VAR
  1396. static int ev_default_loop_ptr;
  1397. #endif
  1398. #if EV_FEATURE_API
  1399. # define EV_RELEASE_CB if (expect_false (release_cb)) release_cb (EV_A)
  1400. # define EV_ACQUIRE_CB if (expect_false (acquire_cb)) acquire_cb (EV_A)
  1401. # define EV_INVOKE_PENDING invoke_cb (EV_A)
  1402. #else
  1403. # define EV_RELEASE_CB (void)0
  1404. # define EV_ACQUIRE_CB (void)0
  1405. # define EV_INVOKE_PENDING ev_invoke_pending (EV_A)
  1406. #endif
  1407. #define EVBREAK_RECURSE 0x80
  1408. /*****************************************************************************/
  1409. #ifndef EV_HAVE_EV_TIME
  1410. ev_tstamp
  1411. ev_time (void) EV_THROW
  1412. {
  1413. #if EV_USE_REALTIME
  1414. if (expect_true (have_realtime))
  1415. {
  1416. struct timespec ts;
  1417. clock_gettime (CLOCK_REALTIME, &ts);
  1418. return ts.tv_sec + ts.tv_nsec * 1e-9;
  1419. }
  1420. #endif
  1421. struct timeval tv;
  1422. gettimeofday (&tv, 0);
  1423. return tv.tv_sec + tv.tv_usec * 1e-6;
  1424. }
  1425. #endif
  1426. inline_size ev_tstamp
  1427. get_clock (void)
  1428. {
  1429. #if EV_USE_MONOTONIC
  1430. if (expect_true (have_monotonic))
  1431. {
  1432. struct timespec ts;
  1433. clock_gettime (CLOCK_MONOTONIC, &ts);
  1434. return ts.tv_sec + ts.tv_nsec * 1e-9;
  1435. }
  1436. #endif
  1437. return ev_time ();
  1438. }
  1439. #if EV_MULTIPLICITY
  1440. ev_tstamp
  1441. ev_now (EV_P) EV_THROW
  1442. {
  1443. return ev_rt_now;
  1444. }
  1445. #endif
  1446. void
  1447. ev_sleep (ev_tstamp delay) EV_THROW
  1448. {
  1449. if (delay > 0.)
  1450. {
  1451. #if EV_USE_NANOSLEEP
  1452. struct timespec ts;
  1453. EV_TS_SET (ts, delay);
  1454. nanosleep (&ts, 0);
  1455. #elif defined _WIN32
  1456. Sleep ((unsigned long)(delay * 1e3));
  1457. #else
  1458. struct timeval tv;
  1459. /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */
  1460. /* something not guaranteed by newer posix versions, but guaranteed */
  1461. /* by older ones */
  1462. EV_TV_SET (tv, delay);
  1463. select (0, 0, 0, 0, &tv);
  1464. #endif
  1465. }
  1466. }
  1467. /*****************************************************************************/
  1468. #define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */
  1469. /* find a suitable new size for the given array, */
  1470. /* hopefully by rounding to a nice-to-malloc size */
  1471. inline_size int
  1472. array_nextsize (int elem, int cur, int cnt)
  1473. {
  1474. int ncur = cur + 1;
  1475. do
  1476. ncur <<= 1;
  1477. while (cnt > ncur);
  1478. /* if size is large, round to MALLOC_ROUND - 4 * longs to accommodate malloc overhead */
  1479. if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4)
  1480. {
  1481. ncur *= elem;
  1482. ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1);
  1483. ncur = ncur - sizeof (void *) * 4;
  1484. ncur /= elem;
  1485. }
  1486. return ncur;
  1487. }
  1488. static void * noinline ecb_cold
  1489. array_realloc (int elem, void *base, int *cur, int cnt)
  1490. {
  1491. *cur = array_nextsize (elem, *cur, cnt);
  1492. return ev_realloc (base, elem * *cur);
  1493. }
  1494. #define array_init_zero(base,count) \
  1495. memset ((void *)(base), 0, sizeof (*(base)) * (count))
  1496. #define array_needsize(type,base,cur,cnt,init) \
  1497. if (expect_false ((cnt) > (cur))) \
  1498. { \
  1499. int ecb_unused ocur_ = (cur); \
  1500. (base) = (type *)array_realloc \
  1501. (sizeof (type), (base), &(cur), (cnt)); \
  1502. init ((base) + (ocur_), (cur) - ocur_); \
  1503. }
  1504. #if 0
  1505. #define array_slim(type,stem) \
  1506. if (stem ## max < array_roundsize (stem ## cnt >> 2)) \
  1507. { \
  1508. stem ## max = array_roundsize (stem ## cnt >> 1); \
  1509. base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\
  1510. fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\
  1511. }
  1512. #endif
  1513. #define array_free(stem, idx) \
  1514. ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0
  1515. /*****************************************************************************/
  1516. /* dummy callback for pending events */
  1517. static void noinline
  1518. pendingcb (EV_P_ ev_prepare *w, int revents)
  1519. {
  1520. }
  1521. void noinline
  1522. ev_feed_event (EV_P_ void *w, int revents) EV_THROW
  1523. {
  1524. W w_ = (W)w;
  1525. int pri = ABSPRI (w_);
  1526. if (expect_false (w_->pending))
  1527. pendings [pri][w_->pending - 1].events |= revents;
  1528. else
  1529. {
  1530. w_->pending = ++pendingcnt [pri];
  1531. array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, EMPTY2);
  1532. pendings [pri][w_->pending - 1].w = w_;
  1533. pendings [pri][w_->pending - 1].events = revents;
  1534. }
  1535. pendingpri = NUMPRI - 1;
  1536. }
  1537. inline_speed void
  1538. feed_reverse (EV_P_ W w)
  1539. {
  1540. array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, EMPTY2);
  1541. rfeeds [rfeedcnt++] = w;
  1542. }
  1543. inline_size void
  1544. feed_reverse_done (EV_P_ int revents)
  1545. {
  1546. do
  1547. ev_feed_event (EV_A_ rfeeds [--rfeedcnt], revents);
  1548. while (rfeedcnt);
  1549. }
  1550. inline_speed void
  1551. queue_events (EV_P_ W *events, int eventcnt, int type)
  1552. {
  1553. int i;
  1554. for (i = 0; i < eventcnt; ++i)
  1555. ev_feed_event (EV_A_ events [i], type);
  1556. }
  1557. /*****************************************************************************/
  1558. inline_speed void
  1559. fd_event_nocheck (EV_P_ int fd, int revents)
  1560. {
  1561. ANFD *anfd = anfds + fd;
  1562. ev_io *w;
  1563. for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
  1564. {
  1565. int ev = w->events & revents;
  1566. if (ev)
  1567. ev_feed_event (EV_A_ (W)w, ev);
  1568. }
  1569. }
  1570. /* do not submit kernel events for fds that have reify set */
  1571. /* because that means they changed while we were polling for new events */
  1572. inline_speed void
  1573. fd_event (EV_P_ int fd, int revents)
  1574. {
  1575. ANFD *anfd = anfds + fd;
  1576. if (expect_true (!anfd->reify))
  1577. fd_event_nocheck (EV_A_ fd, revents);
  1578. }
  1579. void
  1580. ev_feed_fd_event (EV_P_ int fd, int revents) EV_THROW
  1581. {
  1582. if (fd >= 0 && fd < anfdmax)
  1583. fd_event_nocheck (EV_A_ fd, revents);
  1584. }
  1585. /* make sure the external fd watch events are in-sync */
  1586. /* with the kernel/libev internal state */
  1587. inline_size void
  1588. fd_reify (EV_P)
  1589. {
  1590. int i;
  1591. #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
  1592. for (i = 0; i < fdchangecnt; ++i)
  1593. {
  1594. int fd = fdchanges [i];
  1595. ANFD *anfd = anfds + fd;
  1596. if (anfd->reify & EV__IOFDSET && anfd->head)
  1597. {
  1598. SOCKET handle = EV_FD_TO_WIN32_HANDLE (fd);
  1599. if (handle != anfd->handle)
  1600. {
  1601. unsigned long arg;
  1602. assert (("libev: only socket fds supported in this configuration", ioctlsocket (handle, FIONREAD, &arg) == 0));
  1603. /* handle changed, but fd didn't - we need to do it in two steps */
  1604. backend_modify (EV_A_ fd, anfd->events, 0);
  1605. anfd->events = 0;
  1606. anfd->handle = handle;
  1607. }
  1608. }
  1609. }
  1610. #endif
  1611. for (i = 0; i < fdchangecnt; ++i)
  1612. {
  1613. int fd = fdchanges [i];
  1614. ANFD *anfd = anfds + fd;
  1615. ev_io *w;
  1616. unsigned char o_events = anfd->events;
  1617. unsigned char o_reify = anfd->reify;
  1618. anfd->reify = 0;
  1619. /*if (expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
  1620. {
  1621. anfd->events = 0;
  1622. for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
  1623. anfd->events |= (unsigned char)w->events;
  1624. if (o_events != anfd->events)
  1625. o_reify = EV__IOFDSET; /* actually |= */
  1626. }
  1627. if (o_reify & EV__IOFDSET)
  1628. backend_modify (EV_A_ fd, o_events, anfd->events);
  1629. }
  1630. fdchangecnt = 0;
  1631. }
  1632. /* something about the given fd changed */
  1633. inline_size void
  1634. fd_change (EV_P_ int fd, int flags)
  1635. {
  1636. unsigned char reify = anfds [fd].reify;
  1637. anfds [fd].reify |= flags;
  1638. if (expect_true (!reify))
  1639. {
  1640. ++fdchangecnt;
  1641. array_needsize (int, fdchanges, fdchangemax, fdchangecnt, EMPTY2);
  1642. fdchanges [fdchangecnt - 1] = fd;
  1643. }
  1644. }
  1645. /* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */
  1646. inline_speed void ecb_cold
  1647. fd_kill (EV_P_ int fd)
  1648. {
  1649. ev_io *w;
  1650. while ((w = (ev_io *)anfds [fd].head))
  1651. {
  1652. ev_io_stop (EV_A_ w);
  1653. ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
  1654. }
  1655. }
  1656. /* check whether the given fd is actually valid, for error recovery */
  1657. inline_size int ecb_cold
  1658. fd_valid (int fd)
  1659. {
  1660. #ifdef _WIN32
  1661. return EV_FD_TO_WIN32_HANDLE (fd) != -1;
  1662. #else
  1663. return fcntl (fd, F_GETFD) != -1;
  1664. #endif
  1665. }
  1666. /* called on EBADF to verify fds */
  1667. static void noinline ecb_cold
  1668. fd_ebadf (EV_P)
  1669. {
  1670. int fd;
  1671. for (fd = 0; fd < anfdmax; ++fd)
  1672. if (anfds [fd].events)
  1673. if (!fd_valid (fd) && errno == EBADF)
  1674. fd_kill (EV_A_ fd);
  1675. }
  1676. /* called on ENOMEM in select/poll to kill some fds and retry */
  1677. static void noinline ecb_cold
  1678. fd_enomem (EV_P)
  1679. {
  1680. int fd;
  1681. for (fd = anfdmax; fd--; )
  1682. if (anfds [fd].events)
  1683. {
  1684. fd_kill (EV_A_ fd);
  1685. break;
  1686. }
  1687. }
  1688. /* usually called after fork if backend needs to re-arm all fds from scratch */
  1689. static void noinline
  1690. fd_rearm_all (EV_P)
  1691. {
  1692. int fd;
  1693. for (fd = 0; fd < anfdmax; ++fd)
  1694. if (anfds [fd].events)
  1695. {
  1696. anfds [fd].events = 0;
  1697. anfds [fd].emask = 0;
  1698. fd_change (EV_A_ fd, EV__IOFDSET | EV_ANFD_REIFY);
  1699. }
  1700. }
  1701. /* used to prepare libev internal fd's */
  1702. /* this is not fork-safe */
  1703. inline_speed void
  1704. fd_intern (int fd)
  1705. {
  1706. #ifdef _WIN32
  1707. unsigned long arg = 1;
  1708. ioctlsocket (EV_FD_TO_WIN32_HANDLE (fd), FIONBIO, &arg);
  1709. #else
  1710. fcntl (fd, F_SETFD, FD_CLOEXEC);
  1711. fcntl (fd, F_SETFL, O_NONBLOCK);
  1712. #endif
  1713. }
  1714. /*****************************************************************************/
  1715. /*
  1716. * the heap functions want a real array index. array index 0 is guaranteed to not
  1717. * be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives
  1718. * the branching factor of the d-tree.
  1719. */
  1720. /*
  1721. * at the moment we allow libev the luxury of two heaps,
  1722. * a small-code-size 2-heap one and a ~1.5kb larger 4-heap
  1723. * which is more cache-efficient.
  1724. * the difference is about 5% with 50000+ watchers.
  1725. */
  1726. #if EV_USE_4HEAP
  1727. #define DHEAP 4
  1728. #define HEAP0 (DHEAP - 1) /* index of first element in heap */
  1729. #define HPARENT(k) ((((k) - HEAP0 - 1) / DHEAP) + HEAP0)
  1730. #define UPHEAP_DONE(p,k) ((p) == (k))
  1731. /* away from the root */
  1732. inline_speed void
  1733. downheap (ANHE *heap, int N, int k)
  1734. {
  1735. ANHE he = heap [k];
  1736. ANHE *E = heap + N + HEAP0;
  1737. for (;;)
  1738. {
  1739. ev_tstamp minat;
  1740. ANHE *minpos;
  1741. ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1;
  1742. /* find minimum child */
  1743. if (expect_true (pos + DHEAP - 1 < E))
  1744. {
  1745. /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
  1746. if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
  1747. if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
  1748. if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
  1749. }
  1750. else if (pos < E)
  1751. {
  1752. /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
  1753. if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
  1754. if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
  1755. if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
  1756. }
  1757. else
  1758. break;
  1759. if (ANHE_at (he) <= minat)
  1760. break;
  1761. heap [k] = *minpos;
  1762. ev_active (ANHE_w (*minpos)) = k;
  1763. k = minpos - heap;
  1764. }
  1765. heap [k] = he;
  1766. ev_active (ANHE_w (he)) = k;
  1767. }
  1768. #else /* 4HEAP */
  1769. #define HEAP0 1
  1770. #define HPARENT(k) ((k) >> 1)
  1771. #define UPHEAP_DONE(p,k) (!(p))
  1772. /* away from the root */
  1773. inline_speed void
  1774. downheap (ANHE *heap, int N, int k)
  1775. {
  1776. ANHE he = heap [k];
  1777. for (;;)
  1778. {
  1779. int c = k << 1;
  1780. if (c >= N + HEAP0)
  1781. break;
  1782. c += c + 1 < N + HEAP0 && ANHE_at (heap [c]) > ANHE_at (heap [c + 1])
  1783. ? 1 : 0;
  1784. if (ANHE_at (he) <= ANHE_at (heap [c]))
  1785. break;
  1786. heap [k] = heap [c];
  1787. ev_active (ANHE_w (heap [k])) = k;
  1788. k = c;
  1789. }
  1790. heap [k] = he;
  1791. ev_active (ANHE_w (he)) = k;
  1792. }
  1793. #endif
  1794. /* towards the root */
  1795. inline_speed void
  1796. upheap (ANHE *heap, int k)
  1797. {
  1798. ANHE he = heap [k];
  1799. for (;;)
  1800. {
  1801. int p = HPARENT (k);
  1802. if (UPHEAP_DONE (p, k) || ANHE_at (heap [p]) <= ANHE_at (he))
  1803. break;
  1804. heap [k] = heap [p];
  1805. ev_active (ANHE_w (heap [k])) = k;
  1806. k = p;
  1807. }
  1808. heap [k] = he;
  1809. ev_active (ANHE_w (he)) = k;
  1810. }
  1811. /* move an element suitably so it is in a correct place */
  1812. inline_size void
  1813. adjustheap (ANHE *heap, int N, int k)
  1814. {
  1815. if (k > HEAP0 && ANHE_at (heap [k]) <= ANHE_at (heap [HPARENT (k)]))
  1816. upheap (heap, k);
  1817. else
  1818. downheap (heap, N, k);
  1819. }
  1820. /* rebuild the heap: this function is used only once and executed rarely */
  1821. inline_size void
  1822. reheap (ANHE *heap, int N)
  1823. {
  1824. int i;
  1825. /* we don't use floyds algorithm, upheap is simpler and is more cache-efficient */
  1826. /* also, this is easy to implement and correct for both 2-heaps and 4-heaps */
  1827. for (i = 0; i < N; ++i)
  1828. upheap (heap, i + HEAP0);
  1829. }
  1830. /*****************************************************************************/
  1831. /* associate signal watchers to a signal signal */
  1832. typedef struct
  1833. {
  1834. EV_ATOMIC_T pending;
  1835. #if EV_MULTIPLICITY
  1836. EV_P;
  1837. #endif
  1838. WL head;
  1839. } ANSIG;
  1840. static ANSIG signals [EV_NSIG - 1];
  1841. /*****************************************************************************/
  1842. #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
  1843. static void noinline ecb_cold
  1844. evpipe_init (EV_P)
  1845. {
  1846. if (!ev_is_active (&pipe_w))
  1847. {
  1848. int fds [2];
  1849. # if EV_USE_EVENTFD
  1850. fds [0] = -1;
  1851. fds [1] = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC);
  1852. if (fds [1] < 0 && errno == EINVAL)
  1853. fds [1] = eventfd (0, 0);
  1854. if (fds [1] < 0)
  1855. # endif
  1856. {
  1857. while (pipe (fds))
  1858. ev_syserr ("(libev) error creating signal/async pipe");
  1859. fd_intern (fds [0]);
  1860. }
  1861. evpipe [0] = fds [0];
  1862. if (evpipe [1] < 0)
  1863. evpipe [1] = fds [1]; /* first call, set write fd */
  1864. else
  1865. {
  1866. /* on subsequent calls, do not change evpipe [1] */
  1867. /* so that evpipe_write can always rely on its value. */
  1868. /* this branch does not do anything sensible on windows, */
  1869. /* so must not be executed on windows */
  1870. dup2 (fds [1], evpipe [1]);
  1871. close (fds [1]);
  1872. }
  1873. fd_intern (evpipe [1]);
  1874. ev_io_set (&pipe_w, evpipe [0] < 0 ? evpipe [1] : evpipe [0], EV_READ);
  1875. ev_io_start (EV_A_ &pipe_w);
  1876. ev_unref (EV_A); /* watcher should not keep loop alive */
  1877. }
  1878. }
  1879. inline_speed void
  1880. evpipe_write (EV_P_ EV_ATOMIC_T *flag)
  1881. {
  1882. ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */
  1883. if (expect_true (*flag))
  1884. return;
  1885. *flag = 1;
  1886. ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */
  1887. pipe_write_skipped = 1;
  1888. ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */
  1889. if (pipe_write_wanted)
  1890. {
  1891. int old_errno;
  1892. pipe_write_skipped = 0;
  1893. ECB_MEMORY_FENCE_RELEASE;
  1894. old_errno = errno; /* save errno because write will clobber it */
  1895. #if EV_USE_EVENTFD
  1896. if (evpipe [0] < 0)
  1897. {
  1898. uint64_t counter = 1;
  1899. write (evpipe [1], &counter, sizeof (uint64_t));
  1900. }
  1901. else
  1902. #endif
  1903. {
  1904. #ifdef _WIN32
  1905. WSABUF buf;
  1906. DWORD sent;
  1907. buf.buf = &buf;
  1908. buf.len = 1;
  1909. WSASend (EV_FD_TO_WIN32_HANDLE (evpipe [1]), &buf, 1, &sent, 0, 0, 0);
  1910. #else
  1911. write (evpipe [1], &(evpipe [1]), 1);
  1912. #endif
  1913. }
  1914. errno = old_errno;
  1915. }
  1916. }
  1917. /* called whenever the libev signal pipe */
  1918. /* got some events (signal, async) */
  1919. static void
  1920. pipecb (EV_P_ ev_io *iow, int revents)
  1921. {
  1922. int i;
  1923. if (revents & EV_READ)
  1924. {
  1925. #if EV_USE_EVENTFD
  1926. if (evpipe [0] < 0)
  1927. {
  1928. uint64_t counter;
  1929. read (evpipe [1], &counter, sizeof (uint64_t));
  1930. }
  1931. else
  1932. #endif
  1933. {
  1934. char dummy[4];
  1935. #ifdef _WIN32
  1936. WSABUF buf;
  1937. DWORD recvd;
  1938. DWORD flags = 0;
  1939. buf.buf = dummy;
  1940. buf.len = sizeof (dummy);
  1941. WSARecv (EV_FD_TO_WIN32_HANDLE (evpipe [0]), &buf, 1, &recvd, &flags, 0, 0);
  1942. #else
  1943. read (evpipe [0], &dummy, sizeof (dummy));
  1944. #endif
  1945. }
  1946. }
  1947. pipe_write_skipped = 0;
  1948. ECB_MEMORY_FENCE; /* push out skipped, acquire flags */
  1949. #if EV_SIGNAL_ENABLE
  1950. if (sig_pending)
  1951. {
  1952. sig_pending = 0;
  1953. ECB_MEMORY_FENCE;
  1954. for (i = EV_NSIG - 1; i--; )
  1955. if (expect_false (signals [i].pending))
  1956. ev_feed_signal_event (EV_A_ i + 1);
  1957. }
  1958. #endif
  1959. #if EV_ASYNC_ENABLE
  1960. if (async_pending)
  1961. {
  1962. async_pending = 0;
  1963. ECB_MEMORY_FENCE;
  1964. for (i = asynccnt; i--; )
  1965. if (asyncs [i]->sent)
  1966. {
  1967. asyncs [i]->sent = 0;
  1968. ECB_MEMORY_FENCE_RELEASE;
  1969. ev_feed_event (EV_A_ asyncs [i], EV_ASYNC);
  1970. }
  1971. }
  1972. #endif
  1973. }
  1974. /*****************************************************************************/
  1975. void
  1976. ev_feed_signal (int signum) EV_THROW
  1977. {
  1978. #if EV_MULTIPLICITY
  1979. EV_P;
  1980. ECB_MEMORY_FENCE_ACQUIRE;
  1981. EV_A = signals [signum - 1].loop;
  1982. if (!EV_A)
  1983. return;
  1984. #endif
  1985. signals [signum - 1].pending = 1;
  1986. evpipe_write (EV_A_ &sig_pending);
  1987. }
  1988. static void
  1989. ev_sighandler (int signum)
  1990. {
  1991. #ifdef _WIN32
  1992. signal (signum, ev_sighandler);
  1993. #endif
  1994. ev_feed_signal (signum);
  1995. }
  1996. void noinline
  1997. ev_feed_signal_event (EV_P_ int signum) EV_THROW
  1998. {
  1999. WL w;
  2000. if (expect_false (signum <= 0 || signum >= EV_NSIG))
  2001. return;
  2002. --signum;
  2003. #if EV_MULTIPLICITY
  2004. /* it is permissible to try to feed a signal to the wrong loop */
  2005. /* or, likely more useful, feeding a signal nobody is waiting for */
  2006. if (expect_false (signals [signum].loop != EV_A))
  2007. return;
  2008. #endif
  2009. signals [signum].pending = 0;
  2010. ECB_MEMORY_FENCE_RELEASE;
  2011. for (w = signals [signum].head; w; w = w->next)
  2012. ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
  2013. }
  2014. #if EV_USE_SIGNALFD
  2015. static void
  2016. sigfdcb (EV_P_ ev_io *iow, int revents)
  2017. {
  2018. struct signalfd_siginfo si[2], *sip; /* these structs are big */
  2019. for (;;)
  2020. {
  2021. ssize_t res = read (sigfd, si, sizeof (si));
  2022. /* not ISO-C, as res might be -1, but works with SuS */
  2023. for (sip = si; (char *)sip < (char *)si + res; ++sip)
  2024. ev_feed_signal_event (EV_A_ sip->ssi_signo);
  2025. if (res < (ssize_t)sizeof (si))
  2026. break;
  2027. }
  2028. }
  2029. #endif
  2030. #endif
  2031. /*****************************************************************************/
  2032. #if EV_CHILD_ENABLE
  2033. static WL childs [EV_PID_HASHSIZE];
  2034. static ev_signal childev;
  2035. #ifndef WIFCONTINUED
  2036. # define WIFCONTINUED(status) 0
  2037. #endif
  2038. /* handle a single child status event */
  2039. inline_speed void
  2040. child_reap (EV_P_ int chain, int pid, int status)
  2041. {
  2042. ev_child *w;
  2043. int traced = WIFSTOPPED (status) || WIFCONTINUED (status);
  2044. for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next)
  2045. {
  2046. if ((w->pid == pid || !w->pid)
  2047. && (!traced || (w->flags & 1)))
  2048. {
  2049. ev_set_priority (w, EV_MAXPRI); /* need to do it *now*, this *must* be the same prio as the signal watcher itself */
  2050. w->rpid = pid;
  2051. w->rstatus = status;
  2052. ev_feed_event (EV_A_ (W)w, EV_CHILD);
  2053. }
  2054. }
  2055. }
  2056. #ifndef WCONTINUED
  2057. # define WCONTINUED 0
  2058. #endif
  2059. /* called on sigchld etc., calls waitpid */
  2060. static void
  2061. childcb (EV_P_ ev_signal *sw, int revents)
  2062. {
  2063. int pid, status;
  2064. /* some systems define WCONTINUED but then fail to support it (linux 2.4) */
  2065. if (0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)))
  2066. if (!WCONTINUED
  2067. || errno != EINVAL
  2068. || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED)))
  2069. return;
  2070. /* make sure we are called again until all children have been reaped */
  2071. /* we need to do it this way so that the callback gets called before we continue */
  2072. ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);
  2073. child_reap (EV_A_ pid, pid, status);
  2074. if ((EV_PID_HASHSIZE) > 1)
  2075. child_reap (EV_A_ 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */
  2076. }
  2077. #endif
  2078. /*****************************************************************************/
  2079. #if EV_USE_IOCP
  2080. # include "ev_iocp.c"
  2081. #endif
  2082. #if EV_USE_PORT
  2083. # include "ev_port.c"
  2084. #endif
  2085. #if EV_USE_KQUEUE
  2086. # include "ev_kqueue.c"
  2087. #endif
  2088. #if EV_USE_EPOLL
  2089. # include "ev_epoll.c"
  2090. #endif
  2091. #if EV_USE_POLL
  2092. # include "ev_poll.c"
  2093. #endif
  2094. #if EV_USE_SELECT
  2095. # include "ev_select.c"
  2096. #endif
  2097. int ecb_cold
  2098. ev_version_major (void) EV_THROW
  2099. {
  2100. return EV_VERSION_MAJOR;
  2101. }
  2102. int ecb_cold
  2103. ev_version_minor (void) EV_THROW
  2104. {
  2105. return EV_VERSION_MINOR;
  2106. }
  2107. /* return true if we are running with elevated privileges and should ignore env variables */
  2108. int inline_size ecb_cold
  2109. enable_secure (void)
  2110. {
  2111. #ifdef _WIN32
  2112. return 0;
  2113. #else
  2114. return getuid () != geteuid ()
  2115. || getgid () != getegid ();
  2116. #endif
  2117. }
  2118. unsigned int ecb_cold
  2119. ev_supported_backends (void) EV_THROW
  2120. {
  2121. unsigned int flags = 0;
  2122. if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
  2123. if (EV_USE_KQUEUE) flags |= EVBACKEND_KQUEUE;
  2124. if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
  2125. if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
  2126. if (EV_USE_SELECT) flags |= EVBACKEND_SELECT;
  2127. return flags;
  2128. }
  2129. unsigned int ecb_cold
  2130. ev_recommended_backends (void) EV_THROW
  2131. {
  2132. unsigned int flags = ev_supported_backends ();
  2133. #ifndef __NetBSD__
  2134. #ifndef __FreeBSD__
  2135. /* kqueue is borked on everything but netbsd apparently */
  2136. /* it usually doesn't work correctly on anything but sockets and pipes */
  2137. flags &= ~EVBACKEND_KQUEUE;
  2138. #endif
  2139. #endif
  2140. #ifdef __APPLE__
  2141. /* only select works correctly on that "unix-certified" platform */
  2142. flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */
  2143. flags &= ~EVBACKEND_POLL; /* poll is based on kqueue from 10.5 onwards */
  2144. #endif
  2145. #ifdef __FreeBSD__
  2146. flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */
  2147. #endif
  2148. return flags;
  2149. }
  2150. unsigned int ecb_cold
  2151. ev_embeddable_backends (void) EV_THROW
  2152. {
  2153. int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
  2154. /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
  2155. if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
  2156. flags &= ~EVBACKEND_EPOLL;
  2157. return flags;
  2158. }
  2159. unsigned int
  2160. ev_backend (EV_P) EV_THROW
  2161. {
  2162. return backend;
  2163. }
  2164. #if EV_FEATURE_API
  2165. unsigned int
  2166. ev_iteration (EV_P) EV_THROW
  2167. {
  2168. return loop_count;
  2169. }
  2170. unsigned int
  2171. ev_depth (EV_P) EV_THROW
  2172. {
  2173. return loop_depth;
  2174. }
  2175. void
  2176. ev_set_io_collect_interval (EV_P_ ev_tstamp interval) EV_THROW
  2177. {
  2178. io_blocktime = interval;
  2179. }
  2180. void
  2181. ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) EV_THROW
  2182. {
  2183. timeout_blocktime = interval;
  2184. }
  2185. void
  2186. ev_set_userdata (EV_P_ void *data) EV_THROW
  2187. {
  2188. userdata = data;
  2189. }
  2190. void *
  2191. ev_userdata (EV_P) EV_THROW
  2192. {
  2193. return userdata;
  2194. }
  2195. void
  2196. ev_set_invoke_pending_cb (EV_P_ ev_loop_callback invoke_pending_cb) EV_THROW
  2197. {
  2198. invoke_cb = invoke_pending_cb;
  2199. }
  2200. void
  2201. ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_THROW, void (*acquire)(EV_P) EV_THROW) EV_THROW
  2202. {
  2203. release_cb = release;
  2204. acquire_cb = acquire;
  2205. }
  2206. #endif
  2207. /* initialise a loop structure, must be zero-initialised */
  2208. static void noinline ecb_cold
  2209. loop_init (EV_P_ unsigned int flags) EV_THROW
  2210. {
  2211. if (!backend)
  2212. {
  2213. origflags = flags;
  2214. #if EV_USE_REALTIME
  2215. if (!have_realtime)
  2216. {
  2217. struct timespec ts;
  2218. if (!clock_gettime (CLOCK_REALTIME, &ts))
  2219. have_realtime = 1;
  2220. }
  2221. #endif
  2222. #if EV_USE_MONOTONIC
  2223. if (!have_monotonic)
  2224. {
  2225. struct timespec ts;
  2226. if (!clock_gettime (CLOCK_MONOTONIC, &ts))
  2227. have_monotonic = 1;
  2228. }
  2229. #endif
  2230. /* pid check not overridable via env */
  2231. #ifndef _WIN32
  2232. if (flags & EVFLAG_FORKCHECK)
  2233. curpid = getpid ();
  2234. #endif
  2235. if (!(flags & EVFLAG_NOENV)
  2236. && !enable_secure ()
  2237. && getenv ("LIBEV_FLAGS"))
  2238. flags = atoi (getenv ("LIBEV_FLAGS"));
  2239. ev_rt_now = ev_time ();
  2240. mn_now = get_clock ();
  2241. now_floor = mn_now;
  2242. rtmn_diff = ev_rt_now - mn_now;
  2243. #if EV_FEATURE_API
  2244. invoke_cb = ev_invoke_pending;
  2245. #endif
  2246. io_blocktime = 0.;
  2247. timeout_blocktime = 0.;
  2248. backend = 0;
  2249. backend_fd = -1;
  2250. sig_pending = 0;
  2251. #if EV_ASYNC_ENABLE
  2252. async_pending = 0;
  2253. #endif
  2254. pipe_write_skipped = 0;
  2255. pipe_write_wanted = 0;
  2256. evpipe [0] = -1;
  2257. evpipe [1] = -1;
  2258. #if EV_USE_INOTIFY
  2259. fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2;
  2260. #endif
  2261. #if EV_USE_SIGNALFD
  2262. sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
  2263. #endif
  2264. if (!(flags & EVBACKEND_MASK))
  2265. flags |= ev_recommended_backends ();
  2266. #if EV_USE_IOCP
  2267. if (!backend && (flags & EVBACKEND_IOCP )) backend = iocp_init (EV_A_ flags);
  2268. #endif
  2269. #if EV_USE_PORT
  2270. if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
  2271. #endif
  2272. #if EV_USE_KQUEUE
  2273. if (!backend && (flags & EVBACKEND_KQUEUE)) backend = kqueue_init (EV_A_ flags);
  2274. #endif
  2275. #if EV_USE_EPOLL
  2276. if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags);
  2277. #endif
  2278. #if EV_USE_POLL
  2279. if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags);
  2280. #endif
  2281. #if EV_USE_SELECT
  2282. if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init (EV_A_ flags);
  2283. #endif
  2284. ev_prepare_init (&pending_w, pendingcb);
  2285. #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
  2286. ev_init (&pipe_w, pipecb);
  2287. ev_set_priority (&pipe_w, EV_MAXPRI);
  2288. #endif
  2289. }
  2290. }
  2291. /* free up a loop structure */
  2292. void ecb_cold
  2293. ev_loop_destroy (EV_P)
  2294. {
  2295. int i;
  2296. #if EV_MULTIPLICITY
  2297. /* mimic free (0) */
  2298. if (!EV_A)
  2299. return;
  2300. #endif
  2301. #if EV_CLEANUP_ENABLE
  2302. /* queue cleanup watchers (and execute them) */
  2303. if (expect_false (cleanupcnt))
  2304. {
  2305. queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP);
  2306. EV_INVOKE_PENDING;
  2307. }
  2308. #endif
  2309. #if EV_CHILD_ENABLE
  2310. if (ev_is_default_loop (EV_A) && ev_is_active (&childev))
  2311. {
  2312. ev_ref (EV_A); /* child watcher */
  2313. ev_signal_stop (EV_A_ &childev);
  2314. }
  2315. #endif
  2316. if (ev_is_active (&pipe_w))
  2317. {
  2318. /*ev_ref (EV_A);*/
  2319. /*ev_io_stop (EV_A_ &pipe_w);*/
  2320. if (evpipe [0] >= 0) EV_WIN32_CLOSE_FD (evpipe [0]);
  2321. if (evpipe [1] >= 0) EV_WIN32_CLOSE_FD (evpipe [1]);
  2322. }
  2323. #if EV_USE_SIGNALFD
  2324. if (ev_is_active (&sigfd_w))
  2325. close (sigfd);
  2326. #endif
  2327. #if EV_USE_INOTIFY
  2328. if (fs_fd >= 0)
  2329. close (fs_fd);
  2330. #endif
  2331. if (backend_fd >= 0)
  2332. close (backend_fd);
  2333. #if EV_USE_IOCP
  2334. if (backend == EVBACKEND_IOCP ) iocp_destroy (EV_A);
  2335. #endif
  2336. #if EV_USE_PORT
  2337. if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
  2338. #endif
  2339. #if EV_USE_KQUEUE
  2340. if (backend == EVBACKEND_KQUEUE) kqueue_destroy (EV_A);
  2341. #endif
  2342. #if EV_USE_EPOLL
  2343. if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A);
  2344. #endif
  2345. #if EV_USE_POLL
  2346. if (backend == EVBACKEND_POLL ) poll_destroy (EV_A);
  2347. #endif
  2348. #if EV_USE_SELECT
  2349. if (backend == EVBACKEND_SELECT) select_destroy (EV_A);
  2350. #endif
  2351. for (i = NUMPRI; i--; )
  2352. {
  2353. array_free (pending, [i]);
  2354. #if EV_IDLE_ENABLE
  2355. array_free (idle, [i]);
  2356. #endif
  2357. }
  2358. ev_free (anfds); anfds = 0; anfdmax = 0;
  2359. /* have to use the microsoft-never-gets-it-right macro */
  2360. array_free (rfeed, EMPTY);
  2361. array_free (fdchange, EMPTY);
  2362. array_free (timer, EMPTY);
  2363. #if EV_PERIODIC_ENABLE
  2364. array_free (periodic, EMPTY);
  2365. #endif
  2366. #if EV_FORK_ENABLE
  2367. array_free (fork, EMPTY);
  2368. #endif
  2369. #if EV_CLEANUP_ENABLE
  2370. array_free (cleanup, EMPTY);
  2371. #endif
  2372. array_free (prepare, EMPTY);
  2373. array_free (check, EMPTY);
  2374. #if EV_ASYNC_ENABLE
  2375. array_free (async, EMPTY);
  2376. #endif
  2377. backend = 0;
  2378. #if EV_MULTIPLICITY
  2379. if (ev_is_default_loop (EV_A))
  2380. #endif
  2381. ev_default_loop_ptr = 0;
  2382. #if EV_MULTIPLICITY
  2383. else
  2384. ev_free (EV_A);
  2385. #endif
  2386. }
  2387. #if EV_USE_INOTIFY
  2388. inline_size void infy_fork (EV_P);
  2389. #endif
  2390. inline_size void
  2391. loop_fork (EV_P)
  2392. {
  2393. #if EV_USE_PORT
  2394. if (backend == EVBACKEND_PORT ) port_fork (EV_A);
  2395. #endif
  2396. #if EV_USE_KQUEUE
  2397. if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A);
  2398. #endif
  2399. #if EV_USE_EPOLL
  2400. if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
  2401. #endif
  2402. #if EV_USE_INOTIFY
  2403. infy_fork (EV_A);
  2404. #endif
  2405. #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
  2406. if (ev_is_active (&pipe_w))
  2407. {
  2408. /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
  2409. ev_ref (EV_A);
  2410. ev_io_stop (EV_A_ &pipe_w);
  2411. if (evpipe [0] >= 0)
  2412. EV_WIN32_CLOSE_FD (evpipe [0]);
  2413. evpipe_init (EV_A);
  2414. /* iterate over everything, in case we missed something before */
  2415. ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
  2416. }
  2417. #endif
  2418. postfork = 0;
  2419. }
  2420. #if EV_MULTIPLICITY
  2421. struct ev_loop * ecb_cold
  2422. ev_loop_new (unsigned int flags) EV_THROW
  2423. {
  2424. EV_P = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop));
  2425. memset (EV_A, 0, sizeof (struct ev_loop));
  2426. loop_init (EV_A_ flags);
  2427. if (ev_backend (EV_A))
  2428. return EV_A;
  2429. ev_free (EV_A);
  2430. return 0;
  2431. }
  2432. #endif /* multiplicity */
  2433. #if EV_VERIFY
  2434. static void noinline ecb_cold
  2435. verify_watcher (EV_P_ W w)
  2436. {
  2437. assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI));
  2438. if (w->pending)
  2439. assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
  2440. }
  2441. static void noinline ecb_cold
  2442. verify_heap (EV_P_ ANHE *heap, int N)
  2443. {
  2444. int i;
  2445. for (i = HEAP0; i < N + HEAP0; ++i)
  2446. {
  2447. assert (("libev: active index mismatch in heap", ev_active (ANHE_w (heap [i])) == i));
  2448. assert (("libev: heap condition violated", i == HEAP0 || ANHE_at (heap [HPARENT (i)]) <= ANHE_at (heap [i])));
  2449. assert (("libev: heap at cache mismatch", ANHE_at (heap [i]) == ev_at (ANHE_w (heap [i]))));
  2450. verify_watcher (EV_A_ (W)ANHE_w (heap [i]));
  2451. }
  2452. }
  2453. static void noinline ecb_cold
  2454. array_verify (EV_P_ W *ws, int cnt)
  2455. {
  2456. while (cnt--)
  2457. {
  2458. assert (("libev: active index mismatch", ev_active (ws [cnt]) == cnt + 1));
  2459. verify_watcher (EV_A_ ws [cnt]);
  2460. }
  2461. }
  2462. #endif
  2463. #if EV_FEATURE_API
  2464. void ecb_cold
  2465. ev_verify (EV_P) EV_THROW
  2466. {
  2467. #if EV_VERIFY
  2468. int i;
  2469. WL w, w2;
  2470. assert (activecnt >= -1);
  2471. assert (fdchangemax >= fdchangecnt);
  2472. for (i = 0; i < fdchangecnt; ++i)
  2473. assert (("libev: negative fd in fdchanges", fdchanges [i] >= 0));
  2474. assert (anfdmax >= 0);
  2475. for (i = 0; i < anfdmax; ++i)
  2476. {
  2477. int j = 0;
  2478. for (w = w2 = anfds [i].head; w; w = w->next)
  2479. {
  2480. verify_watcher (EV_A_ (W)w);
  2481. if (j++ & 1)
  2482. {
  2483. assert (("libev: io watcher list contains a loop", w != w2));
  2484. w2 = w2->next;
  2485. }
  2486. assert (("libev: inactive fd watcher on anfd list", ev_active (w) == 1));
  2487. assert (("libev: fd mismatch between watcher and anfd", ((ev_io *)w)->fd == i));
  2488. }
  2489. }
  2490. assert (timermax >= timercnt);
  2491. verify_heap (EV_A_ timers, timercnt);
  2492. #if EV_PERIODIC_ENABLE
  2493. assert (periodicmax >= periodiccnt);
  2494. verify_heap (EV_A_ periodics, periodiccnt);
  2495. #endif
  2496. for (i = NUMPRI; i--; )
  2497. {
  2498. assert (pendingmax [i] >= pendingcnt [i]);
  2499. #if EV_IDLE_ENABLE
  2500. assert (idleall >= 0);
  2501. assert (idlemax [i] >= idlecnt [i]);
  2502. array_verify (EV_A_ (W *)idles [i], idlecnt [i]);
  2503. #endif
  2504. }
  2505. #if EV_FORK_ENABLE
  2506. assert (forkmax >= forkcnt);
  2507. array_verify (EV_A_ (W *)forks, forkcnt);
  2508. #endif
  2509. #if EV_CLEANUP_ENABLE
  2510. assert (cleanupmax >= cleanupcnt);
  2511. array_verify (EV_A_ (W *)cleanups, cleanupcnt);
  2512. #endif
  2513. #if EV_ASYNC_ENABLE
  2514. assert (asyncmax >= asynccnt);
  2515. array_verify (EV_A_ (W *)asyncs, asynccnt);
  2516. #endif
  2517. #if EV_PREPARE_ENABLE
  2518. assert (preparemax >= preparecnt);
  2519. array_verify (EV_A_ (W *)prepares, preparecnt);
  2520. #endif
  2521. #if EV_CHECK_ENABLE
  2522. assert (checkmax >= checkcnt);
  2523. array_verify (EV_A_ (W *)checks, checkcnt);
  2524. #endif
  2525. # if 0
  2526. #if EV_CHILD_ENABLE
  2527. for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next)
  2528. for (signum = EV_NSIG; signum--; ) if (signals [signum].pending)
  2529. #endif
  2530. # endif
  2531. #endif
  2532. }
  2533. #endif
  2534. #if EV_MULTIPLICITY
  2535. struct ev_loop * ecb_cold
  2536. #else
  2537. int
  2538. #endif
  2539. ev_default_loop (unsigned int flags) EV_THROW
  2540. {
  2541. if (!ev_default_loop_ptr)
  2542. {
  2543. #if EV_MULTIPLICITY
  2544. EV_P = ev_default_loop_ptr = &default_loop_struct;
  2545. #else
  2546. ev_default_loop_ptr = 1;
  2547. #endif
  2548. loop_init (EV_A_ flags);
  2549. if (ev_backend (EV_A))
  2550. {
  2551. #if EV_CHILD_ENABLE
  2552. ev_signal_init (&childev, childcb, SIGCHLD);
  2553. ev_set_priority (&childev, EV_MAXPRI);
  2554. ev_signal_start (EV_A_ &childev);
  2555. ev_unref (EV_A); /* child watcher should not keep loop alive */
  2556. #endif
  2557. }
  2558. else
  2559. ev_default_loop_ptr = 0;
  2560. }
  2561. return ev_default_loop_ptr;
  2562. }
  2563. void
  2564. ev_loop_fork (EV_P) EV_THROW
  2565. {
  2566. postfork = 1;
  2567. }
  2568. /*****************************************************************************/
  2569. void
  2570. ev_invoke (EV_P_ void *w, int revents)
  2571. {
  2572. EV_CB_INVOKE ((W)w, revents);
  2573. }
  2574. unsigned int
  2575. ev_pending_count (EV_P) EV_THROW
  2576. {
  2577. int pri;
  2578. unsigned int count = 0;
  2579. for (pri = NUMPRI; pri--; )
  2580. count += pendingcnt [pri];
  2581. return count;
  2582. }
  2583. void noinline
  2584. ev_invoke_pending (EV_P)
  2585. {
  2586. pendingpri = NUMPRI;
  2587. while (pendingpri) /* pendingpri possibly gets modified in the inner loop */
  2588. {
  2589. --pendingpri;
  2590. while (pendingcnt [pendingpri])
  2591. {
  2592. ANPENDING *p = pendings [pendingpri] + --pendingcnt [pendingpri];
  2593. p->w->pending = 0;
  2594. EV_CB_INVOKE (p->w, p->events);
  2595. EV_FREQUENT_CHECK;
  2596. }
  2597. }
  2598. }
  2599. #if EV_IDLE_ENABLE
  2600. /* make idle watchers pending. this handles the "call-idle */
  2601. /* only when higher priorities are idle" logic */
  2602. inline_size void
  2603. idle_reify (EV_P)
  2604. {
  2605. if (expect_false (idleall))
  2606. {
  2607. int pri;
  2608. for (pri = NUMPRI; pri--; )
  2609. {
  2610. if (pendingcnt [pri])
  2611. break;
  2612. if (idlecnt [pri])
  2613. {
  2614. queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE);
  2615. break;
  2616. }
  2617. }
  2618. }
  2619. }
  2620. #endif
  2621. /* make timers pending */
  2622. inline_size void
  2623. timers_reify (EV_P)
  2624. {
  2625. EV_FREQUENT_CHECK;
  2626. if (timercnt && ANHE_at (timers [HEAP0]) < mn_now)
  2627. {
  2628. do
  2629. {
  2630. ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]);
  2631. /*assert (("libev: inactive timer on timer heap detected", ev_is_active (w)));*/
  2632. /* first reschedule or stop timer */
  2633. if (w->repeat)
  2634. {
  2635. ev_at (w) += w->repeat;
  2636. if (ev_at (w) < mn_now)
  2637. ev_at (w) = mn_now;
  2638. assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.));
  2639. ANHE_at_cache (timers [HEAP0]);
  2640. downheap (timers, timercnt, HEAP0);
  2641. }
  2642. else
  2643. ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
  2644. EV_FREQUENT_CHECK;
  2645. feed_reverse (EV_A_ (W)w);
  2646. }
  2647. while (timercnt && ANHE_at (timers [HEAP0]) < mn_now);
  2648. feed_reverse_done (EV_A_ EV_TIMER);
  2649. }
  2650. }
  2651. #if EV_PERIODIC_ENABLE
  2652. static void noinline
  2653. periodic_recalc (EV_P_ ev_periodic *w)
  2654. {
  2655. ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL;
  2656. ev_tstamp at = w->offset + interval * ev_floor ((ev_rt_now - w->offset) / interval);
  2657. /* the above almost always errs on the low side */
  2658. while (at <= ev_rt_now)
  2659. {
  2660. ev_tstamp nat = at + w->interval;
  2661. /* when resolution fails us, we use ev_rt_now */
  2662. if (expect_false (nat == at))
  2663. {
  2664. at = ev_rt_now;
  2665. break;
  2666. }
  2667. at = nat;
  2668. }
  2669. ev_at (w) = at;
  2670. }
  2671. /* make periodics pending */
  2672. inline_size void
  2673. periodics_reify (EV_P)
  2674. {
  2675. EV_FREQUENT_CHECK;
  2676. while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now)
  2677. {
  2678. do
  2679. {
  2680. ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]);
  2681. /*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/
  2682. /* first reschedule or stop timer */
  2683. if (w->reschedule_cb)
  2684. {
  2685. ev_at (w) = w->reschedule_cb (w, ev_rt_now);
  2686. assert (("libev: ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now));
  2687. ANHE_at_cache (periodics [HEAP0]);
  2688. downheap (periodics, periodiccnt, HEAP0);
  2689. }
  2690. else if (w->interval)
  2691. {
  2692. periodic_recalc (EV_A_ w);
  2693. ANHE_at_cache (periodics [HEAP0]);
  2694. downheap (periodics, periodiccnt, HEAP0);
  2695. }
  2696. else
  2697. ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
  2698. EV_FREQUENT_CHECK;
  2699. feed_reverse (EV_A_ (W)w);
  2700. }
  2701. while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now);
  2702. feed_reverse_done (EV_A_ EV_PERIODIC);
  2703. }
  2704. }
  2705. /* simply recalculate all periodics */
  2706. /* TODO: maybe ensure that at least one event happens when jumping forward? */
  2707. static void noinline ecb_cold
  2708. periodics_reschedule (EV_P)
  2709. {
  2710. int i;
  2711. /* adjust periodics after time jump */
  2712. for (i = HEAP0; i < periodiccnt + HEAP0; ++i)
  2713. {
  2714. ev_periodic *w = (ev_periodic *)ANHE_w (periodics [i]);
  2715. if (w->reschedule_cb)
  2716. ev_at (w) = w->reschedule_cb (w, ev_rt_now);
  2717. else if (w->interval)
  2718. periodic_recalc (EV_A_ w);
  2719. ANHE_at_cache (periodics [i]);
  2720. }
  2721. reheap (periodics, periodiccnt);
  2722. }
  2723. #endif
  2724. /* adjust all timers by a given offset */
  2725. static void noinline ecb_cold
  2726. timers_reschedule (EV_P_ ev_tstamp adjust)
  2727. {
  2728. int i;
  2729. for (i = 0; i < timercnt; ++i)
  2730. {
  2731. ANHE *he = timers + i + HEAP0;
  2732. ANHE_w (*he)->at += adjust;
  2733. ANHE_at_cache (*he);
  2734. }
  2735. }
  2736. /* fetch new monotonic and realtime times from the kernel */
  2737. /* also detect if there was a timejump, and act accordingly */
  2738. inline_speed void
  2739. time_update (EV_P_ ev_tstamp max_block)
  2740. {
  2741. #if EV_USE_MONOTONIC
  2742. if (expect_true (have_monotonic))
  2743. {
  2744. int i;
  2745. ev_tstamp odiff = rtmn_diff;
  2746. mn_now = get_clock ();
  2747. /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
  2748. /* interpolate in the meantime */
  2749. if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
  2750. {
  2751. ev_rt_now = rtmn_diff + mn_now;
  2752. return;
  2753. }
  2754. now_floor = mn_now;
  2755. ev_rt_now = ev_time ();
  2756. /* loop a few times, before making important decisions.
  2757. * on the choice of "4": one iteration isn't enough,
  2758. * in case we get preempted during the calls to
  2759. * ev_time and get_clock. a second call is almost guaranteed
  2760. * to succeed in that case, though. and looping a few more times
  2761. * doesn't hurt either as we only do this on time-jumps or
  2762. * in the unlikely event of having been preempted here.
  2763. */
  2764. for (i = 4; --i; )
  2765. {
  2766. ev_tstamp diff;
  2767. rtmn_diff = ev_rt_now - mn_now;
  2768. diff = odiff - rtmn_diff;
  2769. if (expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP))
  2770. return; /* all is well */
  2771. ev_rt_now = ev_time ();
  2772. mn_now = get_clock ();
  2773. now_floor = mn_now;
  2774. }
  2775. /* no timer adjustment, as the monotonic clock doesn't jump */
  2776. /* timers_reschedule (EV_A_ rtmn_diff - odiff) */
  2777. # if EV_PERIODIC_ENABLE
  2778. periodics_reschedule (EV_A);
  2779. # endif
  2780. }
  2781. else
  2782. #endif
  2783. {
  2784. ev_rt_now = ev_time ();
  2785. if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP))
  2786. {
  2787. /* adjust timers. this is easy, as the offset is the same for all of them */
  2788. timers_reschedule (EV_A_ ev_rt_now - mn_now);
  2789. #if EV_PERIODIC_ENABLE
  2790. periodics_reschedule (EV_A);
  2791. #endif
  2792. }
  2793. mn_now = ev_rt_now;
  2794. }
  2795. }
  2796. int
  2797. ev_run (EV_P_ int flags)
  2798. {
  2799. #if EV_FEATURE_API
  2800. ++loop_depth;
  2801. #endif
  2802. assert (("libev: ev_loop recursion during release detected", loop_done != EVBREAK_RECURSE));
  2803. loop_done = EVBREAK_CANCEL;
  2804. EV_INVOKE_PENDING; /* in case we recurse, ensure ordering stays nice and clean */
  2805. do
  2806. {
  2807. #if EV_VERIFY >= 2
  2808. ev_verify (EV_A);
  2809. #endif
  2810. #ifndef _WIN32
  2811. if (expect_false (curpid)) /* penalise the forking check even more */
  2812. if (expect_false (getpid () != curpid))
  2813. {
  2814. curpid = getpid ();
  2815. postfork = 1;
  2816. }
  2817. #endif
  2818. #if EV_FORK_ENABLE
  2819. /* we might have forked, so queue fork handlers */
  2820. if (expect_false (postfork))
  2821. if (forkcnt)
  2822. {
  2823. queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
  2824. EV_INVOKE_PENDING;
  2825. }
  2826. #endif
  2827. #if EV_PREPARE_ENABLE
  2828. /* queue prepare watchers (and execute them) */
  2829. if (expect_false (preparecnt))
  2830. {
  2831. queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
  2832. EV_INVOKE_PENDING;
  2833. }
  2834. #endif
  2835. if (expect_false (loop_done))
  2836. break;
  2837. /* we might have forked, so reify kernel state if necessary */
  2838. if (expect_false (postfork))
  2839. loop_fork (EV_A);
  2840. /* update fd-related kernel structures */
  2841. fd_reify (EV_A);
  2842. /* calculate blocking time */
  2843. {
  2844. ev_tstamp waittime = 0.;
  2845. ev_tstamp sleeptime = 0.;
  2846. /* remember old timestamp for io_blocktime calculation */
  2847. ev_tstamp prev_mn_now = mn_now;
  2848. /* update time to cancel out callback processing overhead */
  2849. time_update (EV_A_ 1e100);
  2850. /* from now on, we want a pipe-wake-up */
  2851. pipe_write_wanted = 1;
  2852. ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
  2853. if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
  2854. {
  2855. waittime = MAX_BLOCKTIME;
  2856. if (timercnt)
  2857. {
  2858. ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now;
  2859. if (waittime > to) waittime = to;
  2860. }
  2861. #if EV_PERIODIC_ENABLE
  2862. if (periodiccnt)
  2863. {
  2864. ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now;
  2865. if (waittime > to) waittime = to;
  2866. }
  2867. #endif
  2868. /* don't let timeouts decrease the waittime below timeout_blocktime */
  2869. if (expect_false (waittime < timeout_blocktime))
  2870. waittime = timeout_blocktime;
  2871. /* at this point, we NEED to wait, so we have to ensure */
  2872. /* to pass a minimum nonzero value to the backend */
  2873. if (expect_false (waittime < backend_mintime))
  2874. waittime = backend_mintime;
  2875. /* extra check because io_blocktime is commonly 0 */
  2876. if (expect_false (io_blocktime))
  2877. {
  2878. sleeptime = io_blocktime - (mn_now - prev_mn_now);
  2879. if (sleeptime > waittime - backend_mintime)
  2880. sleeptime = waittime - backend_mintime;
  2881. if (expect_true (sleeptime > 0.))
  2882. {
  2883. ev_sleep (sleeptime);
  2884. waittime -= sleeptime;
  2885. }
  2886. }
  2887. }
  2888. #if EV_FEATURE_API
  2889. ++loop_count;
  2890. #endif
  2891. assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */
  2892. backend_poll (EV_A_ waittime);
  2893. assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */
  2894. pipe_write_wanted = 0; /* just an optimisation, no fence needed */
  2895. ECB_MEMORY_FENCE_ACQUIRE;
  2896. if (pipe_write_skipped)
  2897. {
  2898. assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w)));
  2899. ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
  2900. }
  2901. /* update ev_rt_now, do magic */
  2902. time_update (EV_A_ waittime + sleeptime);
  2903. }
  2904. /* queue pending timers and reschedule them */
  2905. timers_reify (EV_A); /* relative timers called last */
  2906. #if EV_PERIODIC_ENABLE
  2907. periodics_reify (EV_A); /* absolute timers called first */
  2908. #endif
  2909. #if EV_IDLE_ENABLE
  2910. /* queue idle watchers unless other events are pending */
  2911. idle_reify (EV_A);
  2912. #endif
  2913. #if EV_CHECK_ENABLE
  2914. /* queue check watchers, to be executed first */
  2915. if (expect_false (checkcnt))
  2916. queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
  2917. #endif
  2918. EV_INVOKE_PENDING;
  2919. }
  2920. while (expect_true (
  2921. activecnt
  2922. && !loop_done
  2923. && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT))
  2924. ));
  2925. if (loop_done == EVBREAK_ONE)
  2926. loop_done = EVBREAK_CANCEL;
  2927. #if EV_FEATURE_API
  2928. --loop_depth;
  2929. #endif
  2930. return activecnt;
  2931. }
  2932. void
  2933. ev_break (EV_P_ int how) EV_THROW
  2934. {
  2935. loop_done = how;
  2936. }
  2937. void
  2938. ev_ref (EV_P) EV_THROW
  2939. {
  2940. ++activecnt;
  2941. }
  2942. void
  2943. ev_unref (EV_P) EV_THROW
  2944. {
  2945. --activecnt;
  2946. }
  2947. void
  2948. ev_now_update (EV_P) EV_THROW
  2949. {
  2950. time_update (EV_A_ 1e100);
  2951. }
  2952. void
  2953. ev_suspend (EV_P) EV_THROW
  2954. {
  2955. ev_now_update (EV_A);
  2956. }
  2957. void
  2958. ev_resume (EV_P) EV_THROW
  2959. {
  2960. ev_tstamp mn_prev = mn_now;
  2961. ev_now_update (EV_A);
  2962. timers_reschedule (EV_A_ mn_now - mn_prev);
  2963. #if EV_PERIODIC_ENABLE
  2964. /* TODO: really do this? */
  2965. periodics_reschedule (EV_A);
  2966. #endif
  2967. }
  2968. /*****************************************************************************/
  2969. /* singly-linked list management, used when the expected list length is short */
  2970. inline_size void
  2971. wlist_add (WL *head, WL elem)
  2972. {
  2973. elem->next = *head;
  2974. *head = elem;
  2975. }
  2976. inline_size void
  2977. wlist_del (WL *head, WL elem)
  2978. {
  2979. while (*head)
  2980. {
  2981. if (expect_true (*head == elem))
  2982. {
  2983. *head = elem->next;
  2984. break;
  2985. }
  2986. head = &(*head)->next;
  2987. }
  2988. }
  2989. /* internal, faster, version of ev_clear_pending */
  2990. inline_speed void
  2991. clear_pending (EV_P_ W w)
  2992. {
  2993. if (w->pending)
  2994. {
  2995. pendings [ABSPRI (w)][w->pending - 1].w = (W)&pending_w;
  2996. w->pending = 0;
  2997. }
  2998. }
  2999. int
  3000. ev_clear_pending (EV_P_ void *w) EV_THROW
  3001. {
  3002. W w_ = (W)w;
  3003. int pending = w_->pending;
  3004. if (expect_true (pending))
  3005. {
  3006. ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
  3007. p->w = (W)&pending_w;
  3008. w_->pending = 0;
  3009. return p->events;
  3010. }
  3011. else
  3012. return 0;
  3013. }
  3014. inline_size void
  3015. pri_adjust (EV_P_ W w)
  3016. {
  3017. int pri = ev_priority (w);
  3018. pri = pri < EV_MINPRI ? EV_MINPRI : pri;
  3019. pri = pri > EV_MAXPRI ? EV_MAXPRI : pri;
  3020. ev_set_priority (w, pri);
  3021. }
  3022. inline_speed void
  3023. ev_start (EV_P_ W w, int active)
  3024. {
  3025. pri_adjust (EV_A_ w);
  3026. w->active = active;
  3027. ev_ref (EV_A);
  3028. }
  3029. inline_size void
  3030. ev_stop (EV_P_ W w)
  3031. {
  3032. ev_unref (EV_A);
  3033. w->active = 0;
  3034. }
  3035. /*****************************************************************************/
  3036. void noinline
  3037. ev_io_start (EV_P_ ev_io *w) EV_THROW
  3038. {
  3039. int fd = w->fd;
  3040. if (expect_false (ev_is_active (w)))
  3041. return;
  3042. assert (("libev: ev_io_start called with negative fd", fd >= 0));
  3043. assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE))));
  3044. EV_FREQUENT_CHECK;
  3045. ev_start (EV_A_ (W)w, 1);
  3046. array_needsize (ANFD, anfds, anfdmax, fd + 1, array_init_zero);
  3047. wlist_add (&anfds[fd].head, (WL)w);
  3048. /* common bug, apparently */
  3049. assert (("libev: ev_io_start called with corrupted watcher", ((WL)w)->next != (WL)w));
  3050. fd_change (EV_A_ fd, w->events & EV__IOFDSET | EV_ANFD_REIFY);
  3051. w->events &= ~EV__IOFDSET;
  3052. EV_FREQUENT_CHECK;
  3053. }
  3054. void noinline
  3055. ev_io_stop (EV_P_ ev_io *w) EV_THROW
  3056. {
  3057. clear_pending (EV_A_ (W)w);
  3058. if (expect_false (!ev_is_active (w)))
  3059. return;
  3060. assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
  3061. EV_FREQUENT_CHECK;
  3062. wlist_del (&anfds[w->fd].head, (WL)w);
  3063. ev_stop (EV_A_ (W)w);
  3064. fd_change (EV_A_ w->fd, EV_ANFD_REIFY);
  3065. EV_FREQUENT_CHECK;
  3066. }
  3067. void noinline
  3068. ev_timer_start (EV_P_ ev_timer *w) EV_THROW
  3069. {
  3070. if (expect_false (ev_is_active (w)))
  3071. return;
  3072. ev_at (w) += mn_now;
  3073. assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
  3074. EV_FREQUENT_CHECK;
  3075. ++timercnt;
  3076. ev_start (EV_A_ (W)w, timercnt + HEAP0 - 1);
  3077. array_needsize (ANHE, timers, timermax, ev_active (w) + 1, EMPTY2);
  3078. ANHE_w (timers [ev_active (w)]) = (WT)w;
  3079. ANHE_at_cache (timers [ev_active (w)]);
  3080. upheap (timers, ev_active (w));
  3081. EV_FREQUENT_CHECK;
  3082. /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
  3083. }
  3084. void noinline
  3085. ev_timer_stop (EV_P_ ev_timer *w) EV_THROW
  3086. {
  3087. clear_pending (EV_A_ (W)w);
  3088. if (expect_false (!ev_is_active (w)))
  3089. return;
  3090. EV_FREQUENT_CHECK;
  3091. {
  3092. int active = ev_active (w);
  3093. assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w));
  3094. --timercnt;
  3095. if (expect_true (active < timercnt + HEAP0))
  3096. {
  3097. timers [active] = timers [timercnt + HEAP0];
  3098. adjustheap (timers, timercnt, active);
  3099. }
  3100. }
  3101. ev_at (w) -= mn_now;
  3102. ev_stop (EV_A_ (W)w);
  3103. EV_FREQUENT_CHECK;
  3104. }
  3105. void noinline
  3106. ev_timer_again (EV_P_ ev_timer *w) EV_THROW
  3107. {
  3108. EV_FREQUENT_CHECK;
  3109. clear_pending (EV_A_ (W)w);
  3110. if (ev_is_active (w))
  3111. {
  3112. if (w->repeat)
  3113. {
  3114. ev_at (w) = mn_now + w->repeat;
  3115. ANHE_at_cache (timers [ev_active (w)]);
  3116. adjustheap (timers, timercnt, ev_active (w));
  3117. }
  3118. else
  3119. ev_timer_stop (EV_A_ w);
  3120. }
  3121. else if (w->repeat)
  3122. {
  3123. ev_at (w) = w->repeat;
  3124. ev_timer_start (EV_A_ w);
  3125. }
  3126. EV_FREQUENT_CHECK;
  3127. }
  3128. ev_tstamp
  3129. ev_timer_remaining (EV_P_ ev_timer *w) EV_THROW
  3130. {
  3131. return ev_at (w) - (ev_is_active (w) ? mn_now : 0.);
  3132. }
  3133. #if EV_PERIODIC_ENABLE
  3134. void noinline
  3135. ev_periodic_start (EV_P_ ev_periodic *w) EV_THROW
  3136. {
  3137. if (expect_false (ev_is_active (w)))
  3138. return;
  3139. if (w->reschedule_cb)
  3140. ev_at (w) = w->reschedule_cb (w, ev_rt_now);
  3141. else if (w->interval)
  3142. {
  3143. assert (("libev: ev_periodic_start called with negative interval value", w->interval >= 0.));
  3144. periodic_recalc (EV_A_ w);
  3145. }
  3146. else
  3147. ev_at (w) = w->offset;
  3148. EV_FREQUENT_CHECK;
  3149. ++periodiccnt;
  3150. ev_start (EV_A_ (W)w, periodiccnt + HEAP0 - 1);
  3151. array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, EMPTY2);
  3152. ANHE_w (periodics [ev_active (w)]) = (WT)w;
  3153. ANHE_at_cache (periodics [ev_active (w)]);
  3154. upheap (periodics, ev_active (w));
  3155. EV_FREQUENT_CHECK;
  3156. /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
  3157. }
  3158. void noinline
  3159. ev_periodic_stop (EV_P_ ev_periodic *w) EV_THROW
  3160. {
  3161. clear_pending (EV_A_ (W)w);
  3162. if (expect_false (!ev_is_active (w)))
  3163. return;
  3164. EV_FREQUENT_CHECK;
  3165. {
  3166. int active = ev_active (w);
  3167. assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w));
  3168. --periodiccnt;
  3169. if (expect_true (active < periodiccnt + HEAP0))
  3170. {
  3171. periodics [active] = periodics [periodiccnt + HEAP0];
  3172. adjustheap (periodics, periodiccnt, active);
  3173. }
  3174. }
  3175. ev_stop (EV_A_ (W)w);
  3176. EV_FREQUENT_CHECK;
  3177. }
  3178. void noinline
  3179. ev_periodic_again (EV_P_ ev_periodic *w) EV_THROW
  3180. {
  3181. /* TODO: use adjustheap and recalculation */
  3182. ev_periodic_stop (EV_A_ w);
  3183. ev_periodic_start (EV_A_ w);
  3184. }
  3185. #endif
  3186. #ifndef SA_RESTART
  3187. # define SA_RESTART 0
  3188. #endif
  3189. #if EV_SIGNAL_ENABLE
  3190. void noinline
  3191. ev_signal_start (EV_P_ ev_signal *w) EV_THROW
  3192. {
  3193. if (expect_false (ev_is_active (w)))
  3194. return;
  3195. assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG));
  3196. #if EV_MULTIPLICITY
  3197. assert (("libev: a signal must not be attached to two different loops",
  3198. !signals [w->signum - 1].loop || signals [w->signum - 1].loop == loop));
  3199. signals [w->signum - 1].loop = EV_A;
  3200. ECB_MEMORY_FENCE_RELEASE;
  3201. #endif
  3202. EV_FREQUENT_CHECK;
  3203. #if EV_USE_SIGNALFD
  3204. if (sigfd == -2)
  3205. {
  3206. sigfd = signalfd (-1, &sigfd_set, SFD_NONBLOCK | SFD_CLOEXEC);
  3207. if (sigfd < 0 && errno == EINVAL)
  3208. sigfd = signalfd (-1, &sigfd_set, 0); /* retry without flags */
  3209. if (sigfd >= 0)
  3210. {
  3211. fd_intern (sigfd); /* doing it twice will not hurt */
  3212. sigemptyset (&sigfd_set);
  3213. ev_io_init (&sigfd_w, sigfdcb, sigfd, EV_READ);
  3214. ev_set_priority (&sigfd_w, EV_MAXPRI);
  3215. ev_io_start (EV_A_ &sigfd_w);
  3216. ev_unref (EV_A); /* signalfd watcher should not keep loop alive */
  3217. }
  3218. }
  3219. if (sigfd >= 0)
  3220. {
  3221. /* TODO: check .head */
  3222. sigaddset (&sigfd_set, w->signum);
  3223. sigprocmask (SIG_BLOCK, &sigfd_set, 0);
  3224. signalfd (sigfd, &sigfd_set, 0);
  3225. }
  3226. #endif
  3227. ev_start (EV_A_ (W)w, 1);
  3228. wlist_add (&signals [w->signum - 1].head, (WL)w);
  3229. if (!((WL)w)->next)
  3230. # if EV_USE_SIGNALFD
  3231. if (sigfd < 0) /*TODO*/
  3232. # endif
  3233. {
  3234. # ifdef _WIN32
  3235. evpipe_init (EV_A);
  3236. signal (w->signum, ev_sighandler);
  3237. # else
  3238. struct sigaction sa;
  3239. evpipe_init (EV_A);
  3240. sa.sa_handler = ev_sighandler;
  3241. sigfillset (&sa.sa_mask);
  3242. sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */
  3243. sigaction (w->signum, &sa, 0);
  3244. if (origflags & EVFLAG_NOSIGMASK)
  3245. {
  3246. sigemptyset (&sa.sa_mask);
  3247. sigaddset (&sa.sa_mask, w->signum);
  3248. sigprocmask (SIG_UNBLOCK, &sa.sa_mask, 0);
  3249. }
  3250. #endif
  3251. }
  3252. EV_FREQUENT_CHECK;
  3253. }
  3254. void noinline
  3255. ev_signal_stop (EV_P_ ev_signal *w) EV_THROW
  3256. {
  3257. clear_pending (EV_A_ (W)w);
  3258. if (expect_false (!ev_is_active (w)))
  3259. return;
  3260. EV_FREQUENT_CHECK;
  3261. wlist_del (&signals [w->signum - 1].head, (WL)w);
  3262. ev_stop (EV_A_ (W)w);
  3263. if (!signals [w->signum - 1].head)
  3264. {
  3265. #if EV_MULTIPLICITY
  3266. signals [w->signum - 1].loop = 0; /* unattach from signal */
  3267. #endif
  3268. #if EV_USE_SIGNALFD
  3269. if (sigfd >= 0)
  3270. {
  3271. sigset_t ss;
  3272. sigemptyset (&ss);
  3273. sigaddset (&ss, w->signum);
  3274. sigdelset (&sigfd_set, w->signum);
  3275. signalfd (sigfd, &sigfd_set, 0);
  3276. sigprocmask (SIG_UNBLOCK, &ss, 0);
  3277. }
  3278. else
  3279. #endif
  3280. signal (w->signum, SIG_DFL);
  3281. }
  3282. EV_FREQUENT_CHECK;
  3283. }
  3284. #endif
  3285. #if EV_CHILD_ENABLE
  3286. void
  3287. ev_child_start (EV_P_ ev_child *w) EV_THROW
  3288. {
  3289. #if EV_MULTIPLICITY
  3290. assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
  3291. #endif
  3292. if (expect_false (ev_is_active (w)))
  3293. return;
  3294. EV_FREQUENT_CHECK;
  3295. ev_start (EV_A_ (W)w, 1);
  3296. wlist_add (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w);
  3297. EV_FREQUENT_CHECK;
  3298. }
  3299. void
  3300. ev_child_stop (EV_P_ ev_child *w) EV_THROW
  3301. {
  3302. clear_pending (EV_A_ (W)w);
  3303. if (expect_false (!ev_is_active (w)))
  3304. return;
  3305. EV_FREQUENT_CHECK;
  3306. wlist_del (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w);
  3307. ev_stop (EV_A_ (W)w);
  3308. EV_FREQUENT_CHECK;
  3309. }
  3310. #endif
  3311. #if EV_STAT_ENABLE
  3312. # ifdef _WIN32
  3313. # undef lstat
  3314. # define lstat(a,b) _stati64 (a,b)
  3315. # endif
  3316. #define DEF_STAT_INTERVAL 5.0074891
  3317. #define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */
  3318. #define MIN_STAT_INTERVAL 0.1074891
  3319. static void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents);
  3320. #if EV_USE_INOTIFY
  3321. /* the * 2 is to allow for alignment padding, which for some reason is >> 8 */
  3322. # define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
  3323. static void noinline
  3324. infy_add (EV_P_ ev_stat *w)
  3325. {
  3326. w->wd = inotify_add_watch (fs_fd, w->path,
  3327. IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY
  3328. | IN_CREATE | IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO
  3329. | IN_DONT_FOLLOW | IN_MASK_ADD);
  3330. if (w->wd >= 0)
  3331. {
  3332. struct statfs sfs;
  3333. /* now local changes will be tracked by inotify, but remote changes won't */
  3334. /* unless the filesystem is known to be local, we therefore still poll */
  3335. /* also do poll on <2.6.25, but with normal frequency */
  3336. if (!fs_2625)
  3337. w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
  3338. else if (!statfs (w->path, &sfs)
  3339. && (sfs.f_type == 0x1373 /* devfs */
  3340. || sfs.f_type == 0x4006 /* fat */
  3341. || sfs.f_type == 0x4d44 /* msdos */
  3342. || sfs.f_type == 0xEF53 /* ext2/3 */
  3343. || sfs.f_type == 0x72b6 /* jffs2 */
  3344. || sfs.f_type == 0x858458f6 /* ramfs */
  3345. || sfs.f_type == 0x5346544e /* ntfs */
  3346. || sfs.f_type == 0x3153464a /* jfs */
  3347. || sfs.f_type == 0x9123683e /* btrfs */
  3348. || sfs.f_type == 0x52654973 /* reiser3 */
  3349. || sfs.f_type == 0x01021994 /* tmpfs */
  3350. || sfs.f_type == 0x58465342 /* xfs */))
  3351. w->timer.repeat = 0.; /* filesystem is local, kernel new enough */
  3352. else
  3353. w->timer.repeat = w->interval ? w->interval : NFS_STAT_INTERVAL; /* remote, use reduced frequency */
  3354. }
  3355. else
  3356. {
  3357. /* can't use inotify, continue to stat */
  3358. w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
  3359. /* if path is not there, monitor some parent directory for speedup hints */
  3360. /* note that exceeding the hardcoded path limit is not a correctness issue, */
  3361. /* but an efficiency issue only */
  3362. if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096)
  3363. {
  3364. char path [4096];
  3365. strcpy (path, w->path);
  3366. do
  3367. {
  3368. int mask = IN_MASK_ADD | IN_DELETE_SELF | IN_MOVE_SELF
  3369. | (errno == EACCES ? IN_ATTRIB : IN_CREATE | IN_MOVED_TO);
  3370. char *pend = strrchr (path, '/');
  3371. if (!pend || pend == path)
  3372. break;
  3373. *pend = 0;
  3374. w->wd = inotify_add_watch (fs_fd, path, mask);
  3375. }
  3376. while (w->wd < 0 && (errno == ENOENT || errno == EACCES));
  3377. }
  3378. }
  3379. if (w->wd >= 0)
  3380. wlist_add (&fs_hash [w->wd & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w);
  3381. /* now re-arm timer, if required */
  3382. if (ev_is_active (&w->timer)) ev_ref (EV_A);
  3383. ev_timer_again (EV_A_ &w->timer);
  3384. if (ev_is_active (&w->timer)) ev_unref (EV_A);
  3385. }
  3386. static void noinline
  3387. infy_del (EV_P_ ev_stat *w)
  3388. {
  3389. int slot;
  3390. int wd = w->wd;
  3391. if (wd < 0)
  3392. return;
  3393. w->wd = -2;
  3394. slot = wd & ((EV_INOTIFY_HASHSIZE) - 1);
  3395. wlist_del (&fs_hash [slot].head, (WL)w);
  3396. /* remove this watcher, if others are watching it, they will rearm */
  3397. inotify_rm_watch (fs_fd, wd);
  3398. }
  3399. static void noinline
  3400. infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
  3401. {
  3402. if (slot < 0)
  3403. /* overflow, need to check for all hash slots */
  3404. for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot)
  3405. infy_wd (EV_A_ slot, wd, ev);
  3406. else
  3407. {
  3408. WL w_;
  3409. for (w_ = fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head; w_; )
  3410. {
  3411. ev_stat *w = (ev_stat *)w_;
  3412. w_ = w_->next; /* lets us remove this watcher and all before it */
  3413. if (w->wd == wd || wd == -1)
  3414. {
  3415. if (ev->mask & (IN_IGNORED | IN_UNMOUNT | IN_DELETE_SELF))
  3416. {
  3417. wlist_del (&fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w);
  3418. w->wd = -1;
  3419. infy_add (EV_A_ w); /* re-add, no matter what */
  3420. }
  3421. stat_timer_cb (EV_A_ &w->timer, 0);
  3422. }
  3423. }
  3424. }
  3425. }
  3426. static void
  3427. infy_cb (EV_P_ ev_io *w, int revents)
  3428. {
  3429. char buf [EV_INOTIFY_BUFSIZE];
  3430. int ofs;
  3431. int len = read (fs_fd, buf, sizeof (buf));
  3432. for (ofs = 0; ofs < len; )
  3433. {
  3434. struct inotify_event *ev = (struct inotify_event *)(buf + ofs);
  3435. infy_wd (EV_A_ ev->wd, ev->wd, ev);
  3436. ofs += sizeof (struct inotify_event) + ev->len;
  3437. }
  3438. }
  3439. inline_size void ecb_cold
  3440. ev_check_2625 (EV_P)
  3441. {
  3442. /* kernels < 2.6.25 are borked
  3443. * http://www.ussg.indiana.edu/hypermail/linux/kernel/0711.3/1208.html
  3444. */
  3445. if (ev_linux_version () < 0x020619)
  3446. return;
  3447. fs_2625 = 1;
  3448. }
  3449. inline_size int
  3450. infy_newfd (void)
  3451. {
  3452. #if defined IN_CLOEXEC && defined IN_NONBLOCK
  3453. int fd = inotify_init1 (IN_CLOEXEC | IN_NONBLOCK);
  3454. if (fd >= 0)
  3455. return fd;
  3456. #endif
  3457. return inotify_init ();
  3458. }
  3459. inline_size void
  3460. infy_init (EV_P)
  3461. {
  3462. if (fs_fd != -2)
  3463. return;
  3464. fs_fd = -1;
  3465. ev_check_2625 (EV_A);
  3466. fs_fd = infy_newfd ();
  3467. if (fs_fd >= 0)
  3468. {
  3469. fd_intern (fs_fd);
  3470. ev_io_init (&fs_w, infy_cb, fs_fd, EV_READ);
  3471. ev_set_priority (&fs_w, EV_MAXPRI);
  3472. ev_io_start (EV_A_ &fs_w);
  3473. ev_unref (EV_A);
  3474. }
  3475. }
  3476. inline_size void
  3477. infy_fork (EV_P)
  3478. {
  3479. int slot;
  3480. if (fs_fd < 0)
  3481. return;
  3482. ev_ref (EV_A);
  3483. ev_io_stop (EV_A_ &fs_w);
  3484. close (fs_fd);
  3485. fs_fd = infy_newfd ();
  3486. if (fs_fd >= 0)
  3487. {
  3488. fd_intern (fs_fd);
  3489. ev_io_set (&fs_w, fs_fd, EV_READ);
  3490. ev_io_start (EV_A_ &fs_w);
  3491. ev_unref (EV_A);
  3492. }
  3493. for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot)
  3494. {
  3495. WL w_ = fs_hash [slot].head;
  3496. fs_hash [slot].head = 0;
  3497. while (w_)
  3498. {
  3499. ev_stat *w = (ev_stat *)w_;
  3500. w_ = w_->next; /* lets us add this watcher */
  3501. w->wd = -1;
  3502. if (fs_fd >= 0)
  3503. infy_add (EV_A_ w); /* re-add, no matter what */
  3504. else
  3505. {
  3506. w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
  3507. if (ev_is_active (&w->timer)) ev_ref (EV_A);
  3508. ev_timer_again (EV_A_ &w->timer);
  3509. if (ev_is_active (&w->timer)) ev_unref (EV_A);
  3510. }
  3511. }
  3512. }
  3513. }
  3514. #endif
  3515. #ifdef _WIN32
  3516. # define EV_LSTAT(p,b) _stati64 (p, b)
  3517. #else
  3518. # define EV_LSTAT(p,b) lstat (p, b)
  3519. #endif
  3520. void
  3521. ev_stat_stat (EV_P_ ev_stat *w) EV_THROW
  3522. {
  3523. if (lstat (w->path, &w->attr) < 0)
  3524. w->attr.st_nlink = 0;
  3525. else if (!w->attr.st_nlink)
  3526. w->attr.st_nlink = 1;
  3527. }
  3528. static void noinline
  3529. stat_timer_cb (EV_P_ ev_timer *w_, int revents)
  3530. {
  3531. ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer));
  3532. ev_statdata prev = w->attr;
  3533. ev_stat_stat (EV_A_ w);
  3534. /* memcmp doesn't work on netbsd, they.... do stuff to their struct stat */
  3535. if (
  3536. prev.st_dev != w->attr.st_dev
  3537. || prev.st_ino != w->attr.st_ino
  3538. || prev.st_mode != w->attr.st_mode
  3539. || prev.st_nlink != w->attr.st_nlink
  3540. || prev.st_uid != w->attr.st_uid
  3541. || prev.st_gid != w->attr.st_gid
  3542. || prev.st_rdev != w->attr.st_rdev
  3543. || prev.st_size != w->attr.st_size
  3544. || prev.st_atime != w->attr.st_atime
  3545. || prev.st_mtime != w->attr.st_mtime
  3546. || prev.st_ctime != w->attr.st_ctime
  3547. ) {
  3548. /* we only update w->prev on actual differences */
  3549. /* in case we test more often than invoke the callback, */
  3550. /* to ensure that prev is always different to attr */
  3551. w->prev = prev;
  3552. #if EV_USE_INOTIFY
  3553. if (fs_fd >= 0)
  3554. {
  3555. infy_del (EV_A_ w);
  3556. infy_add (EV_A_ w);
  3557. ev_stat_stat (EV_A_ w); /* avoid race... */
  3558. }
  3559. #endif
  3560. ev_feed_event (EV_A_ w, EV_STAT);
  3561. }
  3562. }
  3563. void
  3564. ev_stat_start (EV_P_ ev_stat *w) EV_THROW
  3565. {
  3566. if (expect_false (ev_is_active (w)))
  3567. return;
  3568. ev_stat_stat (EV_A_ w);
  3569. if (w->interval < MIN_STAT_INTERVAL && w->interval)
  3570. w->interval = MIN_STAT_INTERVAL;
  3571. ev_timer_init (&w->timer, stat_timer_cb, 0., w->interval ? w->interval : DEF_STAT_INTERVAL);
  3572. ev_set_priority (&w->timer, ev_priority (w));
  3573. #if EV_USE_INOTIFY
  3574. infy_init (EV_A);
  3575. if (fs_fd >= 0)
  3576. infy_add (EV_A_ w);
  3577. else
  3578. #endif
  3579. {
  3580. ev_timer_again (EV_A_ &w->timer);
  3581. ev_unref (EV_A);
  3582. }
  3583. ev_start (EV_A_ (W)w, 1);
  3584. EV_FREQUENT_CHECK;
  3585. }
  3586. void
  3587. ev_stat_stop (EV_P_ ev_stat *w) EV_THROW
  3588. {
  3589. clear_pending (EV_A_ (W)w);
  3590. if (expect_false (!ev_is_active (w)))
  3591. return;
  3592. EV_FREQUENT_CHECK;
  3593. #if EV_USE_INOTIFY
  3594. infy_del (EV_A_ w);
  3595. #endif
  3596. if (ev_is_active (&w->timer))
  3597. {
  3598. ev_ref (EV_A);
  3599. ev_timer_stop (EV_A_ &w->timer);
  3600. }
  3601. ev_stop (EV_A_ (W)w);
  3602. EV_FREQUENT_CHECK;
  3603. }
  3604. #endif
  3605. #if EV_IDLE_ENABLE
  3606. void
  3607. ev_idle_start (EV_P_ ev_idle *w) EV_THROW
  3608. {
  3609. if (expect_false (ev_is_active (w)))
  3610. return;
  3611. pri_adjust (EV_A_ (W)w);
  3612. EV_FREQUENT_CHECK;
  3613. {
  3614. int active = ++idlecnt [ABSPRI (w)];
  3615. ++idleall;
  3616. ev_start (EV_A_ (W)w, active);
  3617. array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, EMPTY2);
  3618. idles [ABSPRI (w)][active - 1] = w;
  3619. }
  3620. EV_FREQUENT_CHECK;
  3621. }
  3622. void
  3623. ev_idle_stop (EV_P_ ev_idle *w) EV_THROW
  3624. {
  3625. clear_pending (EV_A_ (W)w);
  3626. if (expect_false (!ev_is_active (w)))
  3627. return;
  3628. EV_FREQUENT_CHECK;
  3629. {
  3630. int active = ev_active (w);
  3631. idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]];
  3632. ev_active (idles [ABSPRI (w)][active - 1]) = active;
  3633. ev_stop (EV_A_ (W)w);
  3634. --idleall;
  3635. }
  3636. EV_FREQUENT_CHECK;
  3637. }
  3638. #endif
  3639. #if EV_PREPARE_ENABLE
  3640. void
  3641. ev_prepare_start (EV_P_ ev_prepare *w) EV_THROW
  3642. {
  3643. if (expect_false (ev_is_active (w)))
  3644. return;
  3645. EV_FREQUENT_CHECK;
  3646. ev_start (EV_A_ (W)w, ++preparecnt);
  3647. array_needsize (ev_prepare *, prepares, preparemax, preparecnt, EMPTY2);
  3648. prepares [preparecnt - 1] = w;
  3649. EV_FREQUENT_CHECK;
  3650. }
  3651. void
  3652. ev_prepare_stop (EV_P_ ev_prepare *w) EV_THROW
  3653. {
  3654. clear_pending (EV_A_ (W)w);
  3655. if (expect_false (!ev_is_active (w)))
  3656. return;
  3657. EV_FREQUENT_CHECK;
  3658. {
  3659. int active = ev_active (w);
  3660. prepares [active - 1] = prepares [--preparecnt];
  3661. ev_active (prepares [active - 1]) = active;
  3662. }
  3663. ev_stop (EV_A_ (W)w);
  3664. EV_FREQUENT_CHECK;
  3665. }
  3666. #endif
  3667. #if EV_CHECK_ENABLE
  3668. void
  3669. ev_check_start (EV_P_ ev_check *w) EV_THROW
  3670. {
  3671. if (expect_false (ev_is_active (w)))
  3672. return;
  3673. EV_FREQUENT_CHECK;
  3674. ev_start (EV_A_ (W)w, ++checkcnt);
  3675. array_needsize (ev_check *, checks, checkmax, checkcnt, EMPTY2);
  3676. checks [checkcnt - 1] = w;
  3677. EV_FREQUENT_CHECK;
  3678. }
  3679. void
  3680. ev_check_stop (EV_P_ ev_check *w) EV_THROW
  3681. {
  3682. clear_pending (EV_A_ (W)w);
  3683. if (expect_false (!ev_is_active (w)))
  3684. return;
  3685. EV_FREQUENT_CHECK;
  3686. {
  3687. int active = ev_active (w);
  3688. checks [active - 1] = checks [--checkcnt];
  3689. ev_active (checks [active - 1]) = active;
  3690. }
  3691. ev_stop (EV_A_ (W)w);
  3692. EV_FREQUENT_CHECK;
  3693. }
  3694. #endif
  3695. #if EV_EMBED_ENABLE
  3696. void noinline
  3697. ev_embed_sweep (EV_P_ ev_embed *w) EV_THROW
  3698. {
  3699. ev_run (w->other, EVRUN_NOWAIT);
  3700. }
  3701. static void
  3702. embed_io_cb (EV_P_ ev_io *io, int revents)
  3703. {
  3704. ev_embed *w = (ev_embed *)(((char *)io) - offsetof (ev_embed, io));
  3705. if (ev_cb (w))
  3706. ev_feed_event (EV_A_ (W)w, EV_EMBED);
  3707. else
  3708. ev_run (w->other, EVRUN_NOWAIT);
  3709. }
  3710. static void
  3711. embed_prepare_cb (EV_P_ ev_prepare *prepare, int revents)
  3712. {
  3713. ev_embed *w = (ev_embed *)(((char *)prepare) - offsetof (ev_embed, prepare));
  3714. {
  3715. EV_P = w->other;
  3716. while (fdchangecnt)
  3717. {
  3718. fd_reify (EV_A);
  3719. ev_run (EV_A_ EVRUN_NOWAIT);
  3720. }
  3721. }
  3722. }
  3723. static void
  3724. embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
  3725. {
  3726. ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork));
  3727. ev_embed_stop (EV_A_ w);
  3728. {
  3729. EV_P = w->other;
  3730. ev_loop_fork (EV_A);
  3731. ev_run (EV_A_ EVRUN_NOWAIT);
  3732. }
  3733. ev_embed_start (EV_A_ w);
  3734. }
  3735. #if 0
  3736. static void
  3737. embed_idle_cb (EV_P_ ev_idle *idle, int revents)
  3738. {
  3739. ev_idle_stop (EV_A_ idle);
  3740. }
  3741. #endif
  3742. void
  3743. ev_embed_start (EV_P_ ev_embed *w) EV_THROW
  3744. {
  3745. if (expect_false (ev_is_active (w)))
  3746. return;
  3747. {
  3748. EV_P = w->other;
  3749. assert (("libev: loop to be embedded is not embeddable", backend & ev_embeddable_backends ()));
  3750. ev_io_init (&w->io, embed_io_cb, backend_fd, EV_READ);
  3751. }
  3752. EV_FREQUENT_CHECK;
  3753. ev_set_priority (&w->io, ev_priority (w));
  3754. ev_io_start (EV_A_ &w->io);
  3755. ev_prepare_init (&w->prepare, embed_prepare_cb);
  3756. ev_set_priority (&w->prepare, EV_MINPRI);
  3757. ev_prepare_start (EV_A_ &w->prepare);
  3758. ev_fork_init (&w->fork, embed_fork_cb);
  3759. ev_fork_start (EV_A_ &w->fork);
  3760. /*ev_idle_init (&w->idle, e,bed_idle_cb);*/
  3761. ev_start (EV_A_ (W)w, 1);
  3762. EV_FREQUENT_CHECK;
  3763. }
  3764. void
  3765. ev_embed_stop (EV_P_ ev_embed *w) EV_THROW
  3766. {
  3767. clear_pending (EV_A_ (W)w);
  3768. if (expect_false (!ev_is_active (w)))
  3769. return;
  3770. EV_FREQUENT_CHECK;
  3771. ev_io_stop (EV_A_ &w->io);
  3772. ev_prepare_stop (EV_A_ &w->prepare);
  3773. ev_fork_stop (EV_A_ &w->fork);
  3774. ev_stop (EV_A_ (W)w);
  3775. EV_FREQUENT_CHECK;
  3776. }
  3777. #endif
  3778. #if EV_FORK_ENABLE
  3779. void
  3780. ev_fork_start (EV_P_ ev_fork *w) EV_THROW
  3781. {
  3782. if (expect_false (ev_is_active (w)))
  3783. return;
  3784. EV_FREQUENT_CHECK;
  3785. ev_start (EV_A_ (W)w, ++forkcnt);
  3786. array_needsize (ev_fork *, forks, forkmax, forkcnt, EMPTY2);
  3787. forks [forkcnt - 1] = w;
  3788. EV_FREQUENT_CHECK;
  3789. }
  3790. void
  3791. ev_fork_stop (EV_P_ ev_fork *w) EV_THROW
  3792. {
  3793. clear_pending (EV_A_ (W)w);
  3794. if (expect_false (!ev_is_active (w)))
  3795. return;
  3796. EV_FREQUENT_CHECK;
  3797. {
  3798. int active = ev_active (w);
  3799. forks [active - 1] = forks [--forkcnt];
  3800. ev_active (forks [active - 1]) = active;
  3801. }
  3802. ev_stop (EV_A_ (W)w);
  3803. EV_FREQUENT_CHECK;
  3804. }
  3805. #endif
  3806. #if EV_CLEANUP_ENABLE
  3807. void
  3808. ev_cleanup_start (EV_P_ ev_cleanup *w) EV_THROW
  3809. {
  3810. if (expect_false (ev_is_active (w)))
  3811. return;
  3812. EV_FREQUENT_CHECK;
  3813. ev_start (EV_A_ (W)w, ++cleanupcnt);
  3814. array_needsize (ev_cleanup *, cleanups, cleanupmax, cleanupcnt, EMPTY2);
  3815. cleanups [cleanupcnt - 1] = w;
  3816. /* cleanup watchers should never keep a refcount on the loop */
  3817. ev_unref (EV_A);
  3818. EV_FREQUENT_CHECK;
  3819. }
  3820. void
  3821. ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_THROW
  3822. {
  3823. clear_pending (EV_A_ (W)w);
  3824. if (expect_false (!ev_is_active (w)))
  3825. return;
  3826. EV_FREQUENT_CHECK;
  3827. ev_ref (EV_A);
  3828. {
  3829. int active = ev_active (w);
  3830. cleanups [active - 1] = cleanups [--cleanupcnt];
  3831. ev_active (cleanups [active - 1]) = active;
  3832. }
  3833. ev_stop (EV_A_ (W)w);
  3834. EV_FREQUENT_CHECK;
  3835. }
  3836. #endif
  3837. #if EV_ASYNC_ENABLE
  3838. void
  3839. ev_async_start (EV_P_ ev_async *w) EV_THROW
  3840. {
  3841. if (expect_false (ev_is_active (w)))
  3842. return;
  3843. w->sent = 0;
  3844. evpipe_init (EV_A);
  3845. EV_FREQUENT_CHECK;
  3846. ev_start (EV_A_ (W)w, ++asynccnt);
  3847. array_needsize (ev_async *, asyncs, asyncmax, asynccnt, EMPTY2);
  3848. asyncs [asynccnt - 1] = w;
  3849. EV_FREQUENT_CHECK;
  3850. }
  3851. void
  3852. ev_async_stop (EV_P_ ev_async *w) EV_THROW
  3853. {
  3854. clear_pending (EV_A_ (W)w);
  3855. if (expect_false (!ev_is_active (w)))
  3856. return;
  3857. EV_FREQUENT_CHECK;
  3858. {
  3859. int active = ev_active (w);
  3860. asyncs [active - 1] = asyncs [--asynccnt];
  3861. ev_active (asyncs [active - 1]) = active;
  3862. }
  3863. ev_stop (EV_A_ (W)w);
  3864. EV_FREQUENT_CHECK;
  3865. }
  3866. void
  3867. ev_async_send (EV_P_ ev_async *w) EV_THROW
  3868. {
  3869. w->sent = 1;
  3870. evpipe_write (EV_A_ &async_pending);
  3871. }
  3872. #endif
  3873. /*****************************************************************************/
  3874. struct ev_once
  3875. {
  3876. ev_io io;
  3877. ev_timer to;
  3878. void (*cb)(int revents, void *arg);
  3879. void *arg;
  3880. };
  3881. static void
  3882. once_cb (EV_P_ struct ev_once *once, int revents)
  3883. {
  3884. void (*cb)(int revents, void *arg) = once->cb;
  3885. void *arg = once->arg;
  3886. ev_io_stop (EV_A_ &once->io);
  3887. ev_timer_stop (EV_A_ &once->to);
  3888. ev_free (once);
  3889. cb (revents, arg);
  3890. }
  3891. static void
  3892. once_cb_io (EV_P_ ev_io *w, int revents)
  3893. {
  3894. struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io));
  3895. once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->to));
  3896. }
  3897. static void
  3898. once_cb_to (EV_P_ ev_timer *w, int revents)
  3899. {
  3900. struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to));
  3901. once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->io));
  3902. }
  3903. void
  3904. ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) EV_THROW
  3905. {
  3906. struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once));
  3907. if (expect_false (!once))
  3908. {
  3909. cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMER, arg);
  3910. return;
  3911. }
  3912. once->cb = cb;
  3913. once->arg = arg;
  3914. ev_init (&once->io, once_cb_io);
  3915. if (fd >= 0)
  3916. {
  3917. ev_io_set (&once->io, fd, events);
  3918. ev_io_start (EV_A_ &once->io);
  3919. }
  3920. ev_init (&once->to, once_cb_to);
  3921. if (timeout >= 0.)
  3922. {
  3923. ev_timer_set (&once->to, timeout, 0.);
  3924. ev_timer_start (EV_A_ &once->to);
  3925. }
  3926. }
  3927. /*****************************************************************************/
  3928. #if EV_WALK_ENABLE
  3929. void ecb_cold
  3930. ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_THROW
  3931. {
  3932. int i, j;
  3933. ev_watcher_list *wl, *wn;
  3934. if (types & (EV_IO | EV_EMBED))
  3935. for (i = 0; i < anfdmax; ++i)
  3936. for (wl = anfds [i].head; wl; )
  3937. {
  3938. wn = wl->next;
  3939. #if EV_EMBED_ENABLE
  3940. if (ev_cb ((ev_io *)wl) == embed_io_cb)
  3941. {
  3942. if (types & EV_EMBED)
  3943. cb (EV_A_ EV_EMBED, ((char *)wl) - offsetof (struct ev_embed, io));
  3944. }
  3945. else
  3946. #endif
  3947. #if EV_USE_INOTIFY
  3948. if (ev_cb ((ev_io *)wl) == infy_cb)
  3949. ;
  3950. else
  3951. #endif
  3952. if ((ev_io *)wl != &pipe_w)
  3953. if (types & EV_IO)
  3954. cb (EV_A_ EV_IO, wl);
  3955. wl = wn;
  3956. }
  3957. if (types & (EV_TIMER | EV_STAT))
  3958. for (i = timercnt + HEAP0; i-- > HEAP0; )
  3959. #if EV_STAT_ENABLE
  3960. /*TODO: timer is not always active*/
  3961. if (ev_cb ((ev_timer *)ANHE_w (timers [i])) == stat_timer_cb)
  3962. {
  3963. if (types & EV_STAT)
  3964. cb (EV_A_ EV_STAT, ((char *)ANHE_w (timers [i])) - offsetof (struct ev_stat, timer));
  3965. }
  3966. else
  3967. #endif
  3968. if (types & EV_TIMER)
  3969. cb (EV_A_ EV_TIMER, ANHE_w (timers [i]));
  3970. #if EV_PERIODIC_ENABLE
  3971. if (types & EV_PERIODIC)
  3972. for (i = periodiccnt + HEAP0; i-- > HEAP0; )
  3973. cb (EV_A_ EV_PERIODIC, ANHE_w (periodics [i]));
  3974. #endif
  3975. #if EV_IDLE_ENABLE
  3976. if (types & EV_IDLE)
  3977. for (j = NUMPRI; j--; )
  3978. for (i = idlecnt [j]; i--; )
  3979. cb (EV_A_ EV_IDLE, idles [j][i]);
  3980. #endif
  3981. #if EV_FORK_ENABLE
  3982. if (types & EV_FORK)
  3983. for (i = forkcnt; i--; )
  3984. if (ev_cb (forks [i]) != embed_fork_cb)
  3985. cb (EV_A_ EV_FORK, forks [i]);
  3986. #endif
  3987. #if EV_ASYNC_ENABLE
  3988. if (types & EV_ASYNC)
  3989. for (i = asynccnt; i--; )
  3990. cb (EV_A_ EV_ASYNC, asyncs [i]);
  3991. #endif
  3992. #if EV_PREPARE_ENABLE
  3993. if (types & EV_PREPARE)
  3994. for (i = preparecnt; i--; )
  3995. # if EV_EMBED_ENABLE
  3996. if (ev_cb (prepares [i]) != embed_prepare_cb)
  3997. # endif
  3998. cb (EV_A_ EV_PREPARE, prepares [i]);
  3999. #endif
  4000. #if EV_CHECK_ENABLE
  4001. if (types & EV_CHECK)
  4002. for (i = checkcnt; i--; )
  4003. cb (EV_A_ EV_CHECK, checks [i]);
  4004. #endif
  4005. #if EV_SIGNAL_ENABLE
  4006. if (types & EV_SIGNAL)
  4007. for (i = 0; i < EV_NSIG - 1; ++i)
  4008. for (wl = signals [i].head; wl; )
  4009. {
  4010. wn = wl->next;
  4011. cb (EV_A_ EV_SIGNAL, wl);
  4012. wl = wn;
  4013. }
  4014. #endif
  4015. #if EV_CHILD_ENABLE
  4016. if (types & EV_CHILD)
  4017. for (i = (EV_PID_HASHSIZE); i--; )
  4018. for (wl = childs [i]; wl; )
  4019. {
  4020. wn = wl->next;
  4021. cb (EV_A_ EV_CHILD, wl);
  4022. wl = wn;
  4023. }
  4024. #endif
  4025. /* EV_STAT 0x00001000 /* stat data changed */
  4026. /* EV_EMBED 0x00010000 /* embedded event loop needs sweep */
  4027. }
  4028. #endif
  4029. #if EV_MULTIPLICITY
  4030. #include "ev_wrap.h"
  4031. #endif