You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4897 lines
120 KiB

11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
  1. /*
  2. * libev event processing core, watcher management
  3. *
  4. * Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann <libev@schmorp.de>
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without modifica-
  8. * tion, are permitted provided that the following conditions are met:
  9. *
  10. * 1. Redistributions of source code must retain the above copyright notice,
  11. * this list of conditions and the following disclaimer.
  12. *
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. *
  17. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
  18. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
  19. * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
  20. * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
  21. * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  22. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  23. * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  24. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
  25. * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  26. * OF THE POSSIBILITY OF SUCH DAMAGE.
  27. *
  28. * Alternatively, the contents of this file may be used under the terms of
  29. * the GNU General Public License ("GPL") version 2 or any later version,
  30. * in which case the provisions of the GPL are applicable instead of
  31. * the above. If you wish to allow the use of your version of this file
  32. * only under the terms of the GPL and not to allow others to use your
  33. * version of this file under the BSD license, indicate your decision
  34. * by deleting the provisions above and replace them with the notice
  35. * and other provisions required by the GPL. If you do not delete the
  36. * provisions above, a recipient may use your version of this file under
  37. * either the BSD or the GPL.
  38. */
  39. /* this big block deduces configuration from config.h */
  40. #ifndef EV_STANDALONE
  41. # ifdef EV_CONFIG_H
  42. # include EV_CONFIG_H
  43. # else
  44. # include "config.h"
  45. # endif
  46. # if HAVE_FLOOR
  47. # ifndef EV_USE_FLOOR
  48. # define EV_USE_FLOOR 1
  49. # endif
  50. # endif
  51. # if HAVE_CLOCK_SYSCALL
  52. # ifndef EV_USE_CLOCK_SYSCALL
  53. # define EV_USE_CLOCK_SYSCALL 1
  54. # ifndef EV_USE_REALTIME
  55. # define EV_USE_REALTIME 0
  56. # endif
  57. # ifndef EV_USE_MONOTONIC
  58. # define EV_USE_MONOTONIC 1
  59. # endif
  60. # endif
  61. # elif !defined EV_USE_CLOCK_SYSCALL
  62. # define EV_USE_CLOCK_SYSCALL 0
  63. # endif
  64. # if HAVE_CLOCK_GETTIME
  65. # ifndef EV_USE_MONOTONIC
  66. # define EV_USE_MONOTONIC 1
  67. # endif
  68. # ifndef EV_USE_REALTIME
  69. # define EV_USE_REALTIME 0
  70. # endif
  71. # else
  72. # ifndef EV_USE_MONOTONIC
  73. # define EV_USE_MONOTONIC 0
  74. # endif
  75. # ifndef EV_USE_REALTIME
  76. # define EV_USE_REALTIME 0
  77. # endif
  78. # endif
  79. # if HAVE_NANOSLEEP
  80. # ifndef EV_USE_NANOSLEEP
  81. # define EV_USE_NANOSLEEP EV_FEATURE_OS
  82. # endif
  83. # else
  84. # undef EV_USE_NANOSLEEP
  85. # define EV_USE_NANOSLEEP 0
  86. # endif
  87. # if HAVE_SELECT && HAVE_SYS_SELECT_H
  88. # ifndef EV_USE_SELECT
  89. # define EV_USE_SELECT EV_FEATURE_BACKENDS
  90. # endif
  91. # else
  92. # undef EV_USE_SELECT
  93. # define EV_USE_SELECT 0
  94. # endif
  95. # if HAVE_POLL && HAVE_POLL_H
  96. # ifndef EV_USE_POLL
  97. # define EV_USE_POLL EV_FEATURE_BACKENDS
  98. # endif
  99. # else
  100. # undef EV_USE_POLL
  101. # define EV_USE_POLL 0
  102. # endif
  103. # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
  104. # ifndef EV_USE_EPOLL
  105. # define EV_USE_EPOLL EV_FEATURE_BACKENDS
  106. # endif
  107. # else
  108. # undef EV_USE_EPOLL
  109. # define EV_USE_EPOLL 0
  110. # endif
  111. # if HAVE_KQUEUE && HAVE_SYS_EVENT_H
  112. # ifndef EV_USE_KQUEUE
  113. # define EV_USE_KQUEUE EV_FEATURE_BACKENDS
  114. # endif
  115. # else
  116. # undef EV_USE_KQUEUE
  117. # define EV_USE_KQUEUE 0
  118. # endif
  119. # if HAVE_PORT_H && HAVE_PORT_CREATE
  120. # ifndef EV_USE_PORT
  121. # define EV_USE_PORT EV_FEATURE_BACKENDS
  122. # endif
  123. # else
  124. # undef EV_USE_PORT
  125. # define EV_USE_PORT 0
  126. # endif
  127. # if HAVE_INOTIFY_INIT && HAVE_SYS_INOTIFY_H
  128. # ifndef EV_USE_INOTIFY
  129. # define EV_USE_INOTIFY EV_FEATURE_OS
  130. # endif
  131. # else
  132. # undef EV_USE_INOTIFY
  133. # define EV_USE_INOTIFY 0
  134. # endif
  135. # if HAVE_SIGNALFD && HAVE_SYS_SIGNALFD_H
  136. # ifndef EV_USE_SIGNALFD
  137. # define EV_USE_SIGNALFD EV_FEATURE_OS
  138. # endif
  139. # else
  140. # undef EV_USE_SIGNALFD
  141. # define EV_USE_SIGNALFD 0
  142. # endif
  143. # if HAVE_EVENTFD
  144. # ifndef EV_USE_EVENTFD
  145. # define EV_USE_EVENTFD EV_FEATURE_OS
  146. # endif
  147. # else
  148. # undef EV_USE_EVENTFD
  149. # define EV_USE_EVENTFD 0
  150. # endif
  151. #endif
  152. #include <stdlib.h>
  153. #include <string.h>
  154. #include <fcntl.h>
  155. #include <stddef.h>
  156. #include <stdio.h>
  157. #include <assert.h>
  158. #include <errno.h>
  159. #include <sys/types.h>
  160. #include <time.h>
  161. #include <limits.h>
  162. #include <signal.h>
  163. #ifdef EV_H
  164. # include EV_H
  165. #else
  166. # include "ev.h"
  167. #endif
  168. #if EV_NO_THREADS
  169. # undef EV_NO_SMP
  170. # define EV_NO_SMP 1
  171. # undef ECB_NO_THREADS
  172. # define ECB_NO_THREADS 1
  173. #endif
  174. #if EV_NO_SMP
  175. # undef EV_NO_SMP
  176. # define ECB_NO_SMP 1
  177. #endif
  178. #ifndef _WIN32
  179. # include <sys/time.h>
  180. # include <sys/wait.h>
  181. # include <unistd.h>
  182. #else
  183. # include <io.h>
  184. # define WIN32_LEAN_AND_MEAN
  185. # include <winsock2.h>
  186. # include <windows.h>
  187. # ifndef EV_SELECT_IS_WINSOCKET
  188. # define EV_SELECT_IS_WINSOCKET 1
  189. # endif
  190. # undef EV_AVOID_STDIO
  191. #endif
  192. /* OS X, in its infinite idiocy, actually HARDCODES
  193. * a limit of 1024 into their select. Where people have brains,
  194. * OS X engineers apparently have a vacuum. Or maybe they were
  195. * ordered to have a vacuum, or they do anything for money.
  196. * This might help. Or not.
  197. */
  198. #define _DARWIN_UNLIMITED_SELECT 1
  199. /* this block tries to deduce configuration from header-defined symbols and defaults */
  200. /* try to deduce the maximum number of signals on this platform */
  201. #if defined EV_NSIG
  202. /* use what's provided */
  203. #elif defined NSIG
  204. # define EV_NSIG (NSIG)
  205. #elif defined _NSIG
  206. # define EV_NSIG (_NSIG)
  207. #elif defined SIGMAX
  208. # define EV_NSIG (SIGMAX+1)
  209. #elif defined SIG_MAX
  210. # define EV_NSIG (SIG_MAX+1)
  211. #elif defined _SIG_MAX
  212. # define EV_NSIG (_SIG_MAX+1)
  213. #elif defined MAXSIG
  214. # define EV_NSIG (MAXSIG+1)
  215. #elif defined MAX_SIG
  216. # define EV_NSIG (MAX_SIG+1)
  217. #elif defined SIGARRAYSIZE
  218. # define EV_NSIG (SIGARRAYSIZE) /* Assume ary[SIGARRAYSIZE] */
  219. #elif defined _sys_nsig
  220. # define EV_NSIG (_sys_nsig) /* Solaris 2.5 */
  221. #else
  222. # define EV_NSIG (8 * sizeof (sigset_t) + 1)
  223. #endif
  224. #ifndef EV_USE_FLOOR
  225. # define EV_USE_FLOOR 0
  226. #endif
  227. #ifndef EV_USE_CLOCK_SYSCALL
  228. # if __linux && __GLIBC__ == 2 && __GLIBC_MINOR__ < 17
  229. # define EV_USE_CLOCK_SYSCALL EV_FEATURE_OS
  230. # else
  231. # define EV_USE_CLOCK_SYSCALL 0
  232. # endif
  233. #endif
  234. #if !(_POSIX_TIMERS > 0)
  235. # ifndef EV_USE_MONOTONIC
  236. # define EV_USE_MONOTONIC 0
  237. # endif
  238. # ifndef EV_USE_REALTIME
  239. # define EV_USE_REALTIME 0
  240. # endif
  241. #endif
  242. #ifndef EV_USE_MONOTONIC
  243. # if defined _POSIX_MONOTONIC_CLOCK && _POSIX_MONOTONIC_CLOCK >= 0
  244. # define EV_USE_MONOTONIC EV_FEATURE_OS
  245. # else
  246. # define EV_USE_MONOTONIC 0
  247. # endif
  248. #endif
  249. #ifndef EV_USE_REALTIME
  250. # define EV_USE_REALTIME !EV_USE_CLOCK_SYSCALL
  251. #endif
  252. #ifndef EV_USE_NANOSLEEP
  253. # if _POSIX_C_SOURCE >= 199309L
  254. # define EV_USE_NANOSLEEP EV_FEATURE_OS
  255. # else
  256. # define EV_USE_NANOSLEEP 0
  257. # endif
  258. #endif
  259. #ifndef EV_USE_SELECT
  260. # define EV_USE_SELECT EV_FEATURE_BACKENDS
  261. #endif
  262. #ifndef EV_USE_POLL
  263. # ifdef _WIN32
  264. # define EV_USE_POLL 0
  265. # else
  266. # define EV_USE_POLL EV_FEATURE_BACKENDS
  267. # endif
  268. #endif
  269. #ifndef EV_USE_EPOLL
  270. # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
  271. # define EV_USE_EPOLL EV_FEATURE_BACKENDS
  272. # else
  273. # define EV_USE_EPOLL 0
  274. # endif
  275. #endif
  276. #ifndef EV_USE_KQUEUE
  277. # define EV_USE_KQUEUE 0
  278. #endif
  279. #ifndef EV_USE_PORT
  280. # define EV_USE_PORT 0
  281. #endif
  282. #ifndef EV_USE_INOTIFY
  283. # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
  284. # define EV_USE_INOTIFY EV_FEATURE_OS
  285. # else
  286. # define EV_USE_INOTIFY 0
  287. # endif
  288. #endif
  289. #ifndef EV_PID_HASHSIZE
  290. # define EV_PID_HASHSIZE EV_FEATURE_DATA ? 16 : 1
  291. #endif
  292. #ifndef EV_INOTIFY_HASHSIZE
  293. # define EV_INOTIFY_HASHSIZE EV_FEATURE_DATA ? 16 : 1
  294. #endif
  295. #ifndef EV_USE_EVENTFD
  296. # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
  297. # define EV_USE_EVENTFD EV_FEATURE_OS
  298. # else
  299. # define EV_USE_EVENTFD 0
  300. # endif
  301. #endif
  302. #ifndef EV_USE_SIGNALFD
  303. # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
  304. # define EV_USE_SIGNALFD EV_FEATURE_OS
  305. # else
  306. # define EV_USE_SIGNALFD 0
  307. # endif
  308. #endif
  309. #if 0 /* debugging */
  310. # define EV_VERIFY 3
  311. # define EV_USE_4HEAP 1
  312. # define EV_HEAP_CACHE_AT 1
  313. #endif
  314. #ifndef EV_VERIFY
  315. # define EV_VERIFY (EV_FEATURE_API ? 1 : 0)
  316. #endif
  317. #ifndef EV_USE_4HEAP
  318. # define EV_USE_4HEAP EV_FEATURE_DATA
  319. #endif
  320. #ifndef EV_HEAP_CACHE_AT
  321. # define EV_HEAP_CACHE_AT EV_FEATURE_DATA
  322. #endif
  323. #ifdef ANDROID
  324. /* supposedly, android doesn't typedef fd_mask */
  325. # undef EV_USE_SELECT
  326. # define EV_USE_SELECT 0
  327. /* supposedly, we need to include syscall.h, not sys/syscall.h, so just disable */
  328. # undef EV_USE_CLOCK_SYSCALL
  329. # define EV_USE_CLOCK_SYSCALL 0
  330. #endif
  331. /* aix's poll.h seems to cause lots of trouble */
  332. #ifdef _AIX
  333. /* AIX has a completely broken poll.h header */
  334. # undef EV_USE_POLL
  335. # define EV_USE_POLL 0
  336. #endif
  337. /* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */
  338. /* which makes programs even slower. might work on other unices, too. */
  339. #if EV_USE_CLOCK_SYSCALL
  340. # include <sys/syscall.h>
  341. # ifdef SYS_clock_gettime
  342. # define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
  343. # undef EV_USE_MONOTONIC
  344. # define EV_USE_MONOTONIC 1
  345. # else
  346. # undef EV_USE_CLOCK_SYSCALL
  347. # define EV_USE_CLOCK_SYSCALL 0
  348. # endif
  349. #endif
  350. /* this block fixes any misconfiguration where we know we run into trouble otherwise */
  351. #ifndef CLOCK_MONOTONIC
  352. # undef EV_USE_MONOTONIC
  353. # define EV_USE_MONOTONIC 0
  354. #endif
  355. #ifndef CLOCK_REALTIME
  356. # undef EV_USE_REALTIME
  357. # define EV_USE_REALTIME 0
  358. #endif
  359. #if !EV_STAT_ENABLE
  360. # undef EV_USE_INOTIFY
  361. # define EV_USE_INOTIFY 0
  362. #endif
  363. #if !EV_USE_NANOSLEEP
  364. /* hp-ux has it in sys/time.h, which we unconditionally include above */
  365. # if !defined _WIN32 && !defined __hpux
  366. # include <sys/select.h>
  367. # endif
  368. #endif
  369. #if EV_USE_INOTIFY
  370. # include <sys/statfs.h>
  371. # include <sys/inotify.h>
  372. /* some very old inotify.h headers don't have IN_DONT_FOLLOW */
  373. # ifndef IN_DONT_FOLLOW
  374. # undef EV_USE_INOTIFY
  375. # define EV_USE_INOTIFY 0
  376. # endif
  377. #endif
  378. #if EV_USE_EVENTFD
  379. /* our minimum requirement is glibc 2.7 which has the stub, but not the header */
  380. # include <stdint.h>
  381. # ifndef EFD_NONBLOCK
  382. # define EFD_NONBLOCK O_NONBLOCK
  383. # endif
  384. # ifndef EFD_CLOEXEC
  385. # ifdef O_CLOEXEC
  386. # define EFD_CLOEXEC O_CLOEXEC
  387. # else
  388. # define EFD_CLOEXEC 02000000
  389. # endif
  390. # endif
  391. EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
  392. #endif
  393. #if EV_USE_SIGNALFD
  394. /* our minimum requirement is glibc 2.7 which has the stub, but not the header */
  395. # include <stdint.h>
  396. # ifndef SFD_NONBLOCK
  397. # define SFD_NONBLOCK O_NONBLOCK
  398. # endif
  399. # ifndef SFD_CLOEXEC
  400. # ifdef O_CLOEXEC
  401. # define SFD_CLOEXEC O_CLOEXEC
  402. # else
  403. # define SFD_CLOEXEC 02000000
  404. # endif
  405. # endif
  406. EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags);
  407. struct signalfd_siginfo
  408. {
  409. uint32_t ssi_signo;
  410. char pad[128 - sizeof (uint32_t)];
  411. };
  412. #endif
  413. /**/
  414. #if EV_VERIFY >= 3
  415. # define EV_FREQUENT_CHECK ev_verify (EV_A)
  416. #else
  417. # define EV_FREQUENT_CHECK do { } while (0)
  418. #endif
  419. /*
  420. * This is used to work around floating point rounding problems.
  421. * This value is good at least till the year 4000.
  422. */
  423. #define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
  424. /*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
  425. #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
  426. #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
  427. #define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
  428. #define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
  429. /* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
  430. /* ECB.H BEGIN */
  431. /*
  432. * libecb - http://software.schmorp.de/pkg/libecb
  433. *
  434. * Copyright (©) 2009-2014 Marc Alexander Lehmann <libecb@schmorp.de>
  435. * Copyright (©) 2011 Emanuele Giaquinta
  436. * All rights reserved.
  437. *
  438. * Redistribution and use in source and binary forms, with or without modifica-
  439. * tion, are permitted provided that the following conditions are met:
  440. *
  441. * 1. Redistributions of source code must retain the above copyright notice,
  442. * this list of conditions and the following disclaimer.
  443. *
  444. * 2. Redistributions in binary form must reproduce the above copyright
  445. * notice, this list of conditions and the following disclaimer in the
  446. * documentation and/or other materials provided with the distribution.
  447. *
  448. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
  449. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
  450. * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
  451. * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
  452. * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  453. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  454. * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  455. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
  456. * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  457. * OF THE POSSIBILITY OF SUCH DAMAGE.
  458. *
  459. * Alternatively, the contents of this file may be used under the terms of
  460. * the GNU General Public License ("GPL") version 2 or any later version,
  461. * in which case the provisions of the GPL are applicable instead of
  462. * the above. If you wish to allow the use of your version of this file
  463. * only under the terms of the GPL and not to allow others to use your
  464. * version of this file under the BSD license, indicate your decision
  465. * by deleting the provisions above and replace them with the notice
  466. * and other provisions required by the GPL. If you do not delete the
  467. * provisions above, a recipient may use your version of this file under
  468. * either the BSD or the GPL.
  469. */
  470. #ifndef ECB_H
  471. #define ECB_H
  472. /* 16 bits major, 16 bits minor */
  473. #define ECB_VERSION 0x00010003
  474. #ifdef _WIN32
  475. typedef signed char int8_t;
  476. typedef unsigned char uint8_t;
  477. typedef signed short int16_t;
  478. typedef unsigned short uint16_t;
  479. typedef signed int int32_t;
  480. typedef unsigned int uint32_t;
  481. #if __GNUC__
  482. typedef signed long long int64_t;
  483. typedef unsigned long long uint64_t;
  484. #else /* _MSC_VER || __BORLANDC__ */
  485. typedef signed __int64 int64_t;
  486. typedef unsigned __int64 uint64_t;
  487. #endif
  488. #ifdef _WIN64
  489. #define ECB_PTRSIZE 8
  490. typedef uint64_t uintptr_t;
  491. typedef int64_t intptr_t;
  492. #else
  493. #define ECB_PTRSIZE 4
  494. typedef uint32_t uintptr_t;
  495. typedef int32_t intptr_t;
  496. #endif
  497. #else
  498. #include <inttypes.h>
  499. #if UINTMAX_MAX > 0xffffffffU
  500. #define ECB_PTRSIZE 8
  501. #else
  502. #define ECB_PTRSIZE 4
  503. #endif
  504. #endif
  505. /* work around x32 idiocy by defining proper macros */
  506. #if __amd64 || __x86_64 || _M_AMD64 || _M_X64
  507. #if _ILP32
  508. #define ECB_AMD64_X32 1
  509. #else
  510. #define ECB_AMD64 1
  511. #endif
  512. #endif
  513. /* many compilers define _GNUC_ to some versions but then only implement
  514. * what their idiot authors think are the "more important" extensions,
  515. * causing enormous grief in return for some better fake benchmark numbers.
  516. * or so.
  517. * we try to detect these and simply assume they are not gcc - if they have
  518. * an issue with that they should have done it right in the first place.
  519. */
  520. #ifndef ECB_GCC_VERSION
  521. #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__
  522. #define ECB_GCC_VERSION(major,minor) 0
  523. #else
  524. #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
  525. #endif
  526. #endif
  527. #define ECB_CPP (__cplusplus+0)
  528. #define ECB_CPP11 (__cplusplus >= 201103L)
  529. #if ECB_CPP
  530. #define ECB_C 0
  531. #define ECB_STDC_VERSION 0
  532. #else
  533. #define ECB_C 1
  534. #define ECB_STDC_VERSION __STDC_VERSION__
  535. #endif
  536. #define ECB_C99 (ECB_STDC_VERSION >= 199901L)
  537. #define ECB_C11 (ECB_STDC_VERSION >= 201112L)
  538. #if ECB_CPP
  539. #define ECB_EXTERN_C extern "C"
  540. #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
  541. #define ECB_EXTERN_C_END }
  542. #else
  543. #define ECB_EXTERN_C extern
  544. #define ECB_EXTERN_C_BEG
  545. #define ECB_EXTERN_C_END
  546. #endif
  547. /*****************************************************************************/
  548. /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
  549. /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
  550. #if ECB_NO_THREADS
  551. #define ECB_NO_SMP 1
  552. #endif
  553. #if ECB_NO_SMP
  554. #define ECB_MEMORY_FENCE do { } while (0)
  555. #endif
  556. #ifndef ECB_MEMORY_FENCE
  557. #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
  558. #if __i386 || __i386__
  559. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
  560. #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
  561. #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
  562. #elif __amd64 || __amd64__ || __x86_64 || __x86_64__
  563. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
  564. #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
  565. #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
  566. #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
  567. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
  568. #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
  569. || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__
  570. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
  571. #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
  572. || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__
  573. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
  574. #elif __aarch64__
  575. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
  576. #elif (__sparc || __sparc__) && !__sparcv8
  577. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
  578. #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
  579. #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
  580. #elif defined __s390__ || defined __s390x__
  581. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
  582. #elif defined __mips__
  583. /* GNU/Linux emulates sync on mips1 architectures, so we force its use */
  584. /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
  585. #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
  586. #elif defined __alpha__
  587. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
  588. #elif defined __hppa__
  589. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
  590. #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
  591. #elif defined __ia64__
  592. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
  593. #elif defined __m68k__
  594. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
  595. #elif defined __m88k__
  596. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory")
  597. #elif defined __sh__
  598. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
  599. #endif
  600. #endif
  601. #endif
  602. #ifndef ECB_MEMORY_FENCE
  603. #if ECB_GCC_VERSION(4,7)
  604. /* see comment below (stdatomic.h) about the C11 memory model. */
  605. #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
  606. #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
  607. #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
  608. /* The __has_feature syntax from clang is so misdesigned that we cannot use it
  609. * without risking compile time errors with other compilers. We *could*
  610. * define our own ecb_clang_has_feature, but I just can't be bothered to work
  611. * around this shit time and again.
  612. * #elif defined __clang && __has_feature (cxx_atomic)
  613. * // see comment below (stdatomic.h) about the C11 memory model.
  614. * #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
  615. * #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
  616. * #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
  617. */
  618. #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
  619. #define ECB_MEMORY_FENCE __sync_synchronize ()
  620. #elif _MSC_VER >= 1500 /* VC++ 2008 */
  621. /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
  622. #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
  623. #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier()
  624. #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */
  625. #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier()
  626. #elif _MSC_VER >= 1400 /* VC++ 2005 */
  627. #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
  628. #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
  629. #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
  630. #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
  631. #elif defined _WIN32
  632. #include <WinNT.h>
  633. #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
  634. #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
  635. #include <mbarrier.h>
  636. #define ECB_MEMORY_FENCE __machine_rw_barrier ()
  637. #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier ()
  638. #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier ()
  639. #elif __xlC__
  640. #define ECB_MEMORY_FENCE __sync ()
  641. #endif
  642. #endif
  643. #ifndef ECB_MEMORY_FENCE
  644. #if ECB_C11 && !defined __STDC_NO_ATOMICS__
  645. /* we assume that these memory fences work on all variables/all memory accesses, */
  646. /* not just C11 atomics and atomic accesses */
  647. #include <stdatomic.h>
  648. /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
  649. /* any fence other than seq_cst, which isn't very efficient for us. */
  650. /* Why that is, we don't know - either the C11 memory model is quite useless */
  651. /* for most usages, or gcc and clang have a bug */
  652. /* I *currently* lean towards the latter, and inefficiently implement */
  653. /* all three of ecb's fences as a seq_cst fence */
  654. /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
  655. /* for all __atomic_thread_fence's except seq_cst */
  656. #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
  657. #endif
  658. #endif
  659. #ifndef ECB_MEMORY_FENCE
  660. #if !ECB_AVOID_PTHREADS
  661. /*
  662. * if you get undefined symbol references to pthread_mutex_lock,
  663. * or failure to find pthread.h, then you should implement
  664. * the ECB_MEMORY_FENCE operations for your cpu/compiler
  665. * OR provide pthread.h and link against the posix thread library
  666. * of your system.
  667. */
  668. #include <pthread.h>
  669. #define ECB_NEEDS_PTHREADS 1
  670. #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1
  671. static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER;
  672. #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0)
  673. #endif
  674. #endif
  675. #if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE
  676. #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
  677. #endif
  678. #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
  679. #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
  680. #endif
  681. /*****************************************************************************/
  682. #if __cplusplus
  683. #define ecb_inline static inline
  684. #elif ECB_GCC_VERSION(2,5)
  685. #define ecb_inline static __inline__
  686. #elif ECB_C99
  687. #define ecb_inline static inline
  688. #else
  689. #define ecb_inline static
  690. #endif
  691. #if ECB_GCC_VERSION(3,3)
  692. #define ecb_restrict __restrict__
  693. #elif ECB_C99
  694. #define ecb_restrict restrict
  695. #else
  696. #define ecb_restrict
  697. #endif
  698. typedef int ecb_bool;
  699. #define ECB_CONCAT_(a, b) a ## b
  700. #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b)
  701. #define ECB_STRINGIFY_(a) # a
  702. #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
  703. #define ecb_function_ ecb_inline
  704. #if ECB_GCC_VERSION(3,1)
  705. #define ecb_attribute(attrlist) __attribute__(attrlist)
  706. #define ecb_is_constant(expr) __builtin_constant_p (expr)
  707. #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
  708. #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
  709. #else
  710. #define ecb_attribute(attrlist)
  711. /* possible C11 impl for integral types
  712. typedef struct ecb_is_constant_struct ecb_is_constant_struct;
  713. #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */
  714. #define ecb_is_constant(expr) 0
  715. #define ecb_expect(expr,value) (expr)
  716. #define ecb_prefetch(addr,rw,locality)
  717. #endif
  718. /* no emulation for ecb_decltype */
  719. #if ECB_GCC_VERSION(4,5)
  720. #define ecb_decltype(x) __decltype(x)
  721. #elif ECB_GCC_VERSION(3,0)
  722. #define ecb_decltype(x) __typeof(x)
  723. #endif
  724. #if _MSC_VER >= 1300
  725. #define ecb_deprecated __declspec(deprecated)
  726. #else
  727. #define ecb_deprecated ecb_attribute ((__deprecated__))
  728. #endif
  729. #define ecb_noinline ecb_attribute ((__noinline__))
  730. #define ecb_unused ecb_attribute ((__unused__))
  731. #define ecb_const ecb_attribute ((__const__))
  732. #define ecb_pure ecb_attribute ((__pure__))
  733. /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx __declspec(noreturn) */
  734. #if ECB_C11
  735. #define ecb_noreturn _Noreturn
  736. #else
  737. #define ecb_noreturn ecb_attribute ((__noreturn__))
  738. #endif
  739. #if ECB_GCC_VERSION(4,3)
  740. #define ecb_artificial ecb_attribute ((__artificial__))
  741. #define ecb_hot ecb_attribute ((__hot__))
  742. #define ecb_cold ecb_attribute ((__cold__))
  743. #else
  744. #define ecb_artificial
  745. #define ecb_hot
  746. #define ecb_cold
  747. #endif
  748. /* put around conditional expressions if you are very sure that the */
  749. /* expression is mostly true or mostly false. note that these return */
  750. /* booleans, not the expression. */
  751. #define ecb_expect_false(expr) ecb_expect (!!(expr), 0)
  752. #define ecb_expect_true(expr) ecb_expect (!!(expr), 1)
  753. /* for compatibility to the rest of the world */
  754. #define ecb_likely(expr) ecb_expect_true (expr)
  755. #define ecb_unlikely(expr) ecb_expect_false (expr)
  756. /* count trailing zero bits and count # of one bits */
  757. #if ECB_GCC_VERSION(3,4)
  758. /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */
  759. #define ecb_ld32(x) (__builtin_clz (x) ^ 31)
  760. #define ecb_ld64(x) (__builtin_clzll (x) ^ 63)
  761. #define ecb_ctz32(x) __builtin_ctz (x)
  762. #define ecb_ctz64(x) __builtin_ctzll (x)
  763. #define ecb_popcount32(x) __builtin_popcount (x)
  764. /* no popcountll */
  765. #else
  766. ecb_function_ int ecb_ctz32 (uint32_t x) ecb_const;
  767. ecb_function_ int
  768. ecb_ctz32 (uint32_t x)
  769. {
  770. int r = 0;
  771. x &= ~x + 1; /* this isolates the lowest bit */
  772. #if ECB_branchless_on_i386
  773. r += !!(x & 0xaaaaaaaa) << 0;
  774. r += !!(x & 0xcccccccc) << 1;
  775. r += !!(x & 0xf0f0f0f0) << 2;
  776. r += !!(x & 0xff00ff00) << 3;
  777. r += !!(x & 0xffff0000) << 4;
  778. #else
  779. if (x & 0xaaaaaaaa) r += 1;
  780. if (x & 0xcccccccc) r += 2;
  781. if (x & 0xf0f0f0f0) r += 4;
  782. if (x & 0xff00ff00) r += 8;
  783. if (x & 0xffff0000) r += 16;
  784. #endif
  785. return r;
  786. }
  787. ecb_function_ int ecb_ctz64 (uint64_t x) ecb_const;
  788. ecb_function_ int
  789. ecb_ctz64 (uint64_t x)
  790. {
  791. int shift = x & 0xffffffffU ? 0 : 32;
  792. return ecb_ctz32 (x >> shift) + shift;
  793. }
  794. ecb_function_ int ecb_popcount32 (uint32_t x) ecb_const;
  795. ecb_function_ int
  796. ecb_popcount32 (uint32_t x)
  797. {
  798. x -= (x >> 1) & 0x55555555;
  799. x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
  800. x = ((x >> 4) + x) & 0x0f0f0f0f;
  801. x *= 0x01010101;
  802. return x >> 24;
  803. }
  804. ecb_function_ int ecb_ld32 (uint32_t x) ecb_const;
  805. ecb_function_ int ecb_ld32 (uint32_t x)
  806. {
  807. int r = 0;
  808. if (x >> 16) { x >>= 16; r += 16; }
  809. if (x >> 8) { x >>= 8; r += 8; }
  810. if (x >> 4) { x >>= 4; r += 4; }
  811. if (x >> 2) { x >>= 2; r += 2; }
  812. if (x >> 1) { r += 1; }
  813. return r;
  814. }
  815. ecb_function_ int ecb_ld64 (uint64_t x) ecb_const;
  816. ecb_function_ int ecb_ld64 (uint64_t x)
  817. {
  818. int r = 0;
  819. if (x >> 32) { x >>= 32; r += 32; }
  820. return r + ecb_ld32 (x);
  821. }
  822. #endif
  823. ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) ecb_const;
  824. ecb_function_ ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
  825. ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) ecb_const;
  826. ecb_function_ ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); }
  827. ecb_function_ uint8_t ecb_bitrev8 (uint8_t x) ecb_const;
  828. ecb_function_ uint8_t ecb_bitrev8 (uint8_t x)
  829. {
  830. return ( (x * 0x0802U & 0x22110U)
  831. | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
  832. }
  833. ecb_function_ uint16_t ecb_bitrev16 (uint16_t x) ecb_const;
  834. ecb_function_ uint16_t ecb_bitrev16 (uint16_t x)
  835. {
  836. x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1);
  837. x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2);
  838. x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4);
  839. x = ( x >> 8 ) | ( x << 8);
  840. return x;
  841. }
  842. ecb_function_ uint32_t ecb_bitrev32 (uint32_t x) ecb_const;
  843. ecb_function_ uint32_t ecb_bitrev32 (uint32_t x)
  844. {
  845. x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
  846. x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2);
  847. x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4);
  848. x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8);
  849. x = ( x >> 16 ) | ( x << 16);
  850. return x;
  851. }
  852. /* popcount64 is only available on 64 bit cpus as gcc builtin */
  853. /* so for this version we are lazy */
  854. ecb_function_ int ecb_popcount64 (uint64_t x) ecb_const;
  855. ecb_function_ int
  856. ecb_popcount64 (uint64_t x)
  857. {
  858. return ecb_popcount32 (x) + ecb_popcount32 (x >> 32);
  859. }
  860. ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) ecb_const;
  861. ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) ecb_const;
  862. ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) ecb_const;
  863. ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) ecb_const;
  864. ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) ecb_const;
  865. ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) ecb_const;
  866. ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) ecb_const;
  867. ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) ecb_const;
  868. ecb_inline uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); }
  869. ecb_inline uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); }
  870. ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); }
  871. ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); }
  872. ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); }
  873. ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); }
  874. ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); }
  875. ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); }
  876. #if ECB_GCC_VERSION(4,3)
  877. #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
  878. #define ecb_bswap32(x) __builtin_bswap32 (x)
  879. #define ecb_bswap64(x) __builtin_bswap64 (x)
  880. #else
  881. ecb_function_ uint16_t ecb_bswap16 (uint16_t x) ecb_const;
  882. ecb_function_ uint16_t
  883. ecb_bswap16 (uint16_t x)
  884. {
  885. return ecb_rotl16 (x, 8);
  886. }
  887. ecb_function_ uint32_t ecb_bswap32 (uint32_t x) ecb_const;
  888. ecb_function_ uint32_t
  889. ecb_bswap32 (uint32_t x)
  890. {
  891. return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16);
  892. }
  893. ecb_function_ uint64_t ecb_bswap64 (uint64_t x) ecb_const;
  894. ecb_function_ uint64_t
  895. ecb_bswap64 (uint64_t x)
  896. {
  897. return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);
  898. }
  899. #endif
  900. #if ECB_GCC_VERSION(4,5)
  901. #define ecb_unreachable() __builtin_unreachable ()
  902. #else
  903. /* this seems to work fine, but gcc always emits a warning for it :/ */
  904. ecb_inline void ecb_unreachable (void) ecb_noreturn;
  905. ecb_inline void ecb_unreachable (void) { }
  906. #endif
  907. /* try to tell the compiler that some condition is definitely true */
  908. #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
  909. ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const;
  910. ecb_inline unsigned char
  911. ecb_byteorder_helper (void)
  912. {
  913. /* the union code still generates code under pressure in gcc, */
  914. /* but less than using pointers, and always seems to */
  915. /* successfully return a constant. */
  916. /* the reason why we have this horrible preprocessor mess */
  917. /* is to avoid it in all cases, at least on common architectures */
  918. /* or when using a recent enough gcc version (>= 4.6) */
  919. #if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64
  920. return 0x44;
  921. #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  922. return 0x44;
  923. #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  924. return 0x11;
  925. #else
  926. union
  927. {
  928. uint32_t i;
  929. uint8_t c;
  930. } u = { 0x11223344 };
  931. return u.c;
  932. #endif
  933. }
  934. ecb_inline ecb_bool ecb_big_endian (void) ecb_const;
  935. ecb_inline ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; }
  936. ecb_inline ecb_bool ecb_little_endian (void) ecb_const;
  937. ecb_inline ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; }
  938. #if ECB_GCC_VERSION(3,0) || ECB_C99
  939. #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
  940. #else
  941. #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
  942. #endif
  943. #if __cplusplus
  944. template<typename T>
  945. static inline T ecb_div_rd (T val, T div)
  946. {
  947. return val < 0 ? - ((-val + div - 1) / div) : (val ) / div;
  948. }
  949. template<typename T>
  950. static inline T ecb_div_ru (T val, T div)
  951. {
  952. return val < 0 ? - ((-val ) / div) : (val + div - 1) / div;
  953. }
  954. #else
  955. #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div))
  956. #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div))
  957. #endif
  958. #if ecb_cplusplus_does_not_suck
  959. /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */
  960. template<typename T, int N>
  961. static inline int ecb_array_length (const T (&arr)[N])
  962. {
  963. return N;
  964. }
  965. #else
  966. #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
  967. #endif
  968. /*******************************************************************************/
  969. /* floating point stuff, can be disabled by defining ECB_NO_LIBM */
  970. /* basically, everything uses "ieee pure-endian" floating point numbers */
  971. /* the only noteworthy exception is ancient armle, which uses order 43218765 */
  972. #if 0 \
  973. || __i386 || __i386__ \
  974. || __amd64 || __amd64__ || __x86_64 || __x86_64__ \
  975. || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
  976. || defined __s390__ || defined __s390x__ \
  977. || defined __mips__ \
  978. || defined __alpha__ \
  979. || defined __hppa__ \
  980. || defined __ia64__ \
  981. || defined __m68k__ \
  982. || defined __m88k__ \
  983. || defined __sh__ \
  984. || defined _M_IX86 || defined _M_AMD64 || defined _M_IA64 \
  985. || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
  986. || defined __aarch64__
  987. #define ECB_STDFP 1
  988. #include <string.h> /* for memcpy */
  989. #else
  990. #define ECB_STDFP 0
  991. #endif
  992. #ifndef ECB_NO_LIBM
  993. #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */
  994. /* only the oldest of old doesn't have this one. solaris. */
  995. #ifdef INFINITY
  996. #define ECB_INFINITY INFINITY
  997. #else
  998. #define ECB_INFINITY HUGE_VAL
  999. #endif
  1000. #ifdef NAN
  1001. #define ECB_NAN NAN
  1002. #else
  1003. #define ECB_NAN ECB_INFINITY
  1004. #endif
  1005. /* converts an ieee half/binary16 to a float */
  1006. ecb_function_ float ecb_binary16_to_float (uint16_t x) ecb_const;
  1007. ecb_function_ float
  1008. ecb_binary16_to_float (uint16_t x)
  1009. {
  1010. int e = (x >> 10) & 0x1f;
  1011. int m = x & 0x3ff;
  1012. float r;
  1013. if (!e ) r = ldexpf (m , -24);
  1014. else if (e != 31) r = ldexpf (m + 0x400, e - 25);
  1015. else if (m ) r = ECB_NAN;
  1016. else r = ECB_INFINITY;
  1017. return x & 0x8000 ? -r : r;
  1018. }
  1019. /* convert a float to ieee single/binary32 */
  1020. ecb_function_ uint32_t ecb_float_to_binary32 (float x) ecb_const;
  1021. ecb_function_ uint32_t
  1022. ecb_float_to_binary32 (float x)
  1023. {
  1024. uint32_t r;
  1025. #if ECB_STDFP
  1026. memcpy (&r, &x, 4);
  1027. #else
  1028. /* slow emulation, works for anything but -0 */
  1029. uint32_t m;
  1030. int e;
  1031. if (x == 0e0f ) return 0x00000000U;
  1032. if (x > +3.40282346638528860e+38f) return 0x7f800000U;
  1033. if (x < -3.40282346638528860e+38f) return 0xff800000U;
  1034. if (x != x ) return 0x7fbfffffU;
  1035. m = frexpf (x, &e) * 0x1000000U;
  1036. r = m & 0x80000000U;
  1037. if (r)
  1038. m = -m;
  1039. if (e <= -126)
  1040. {
  1041. m &= 0xffffffU;
  1042. m >>= (-125 - e);
  1043. e = -126;
  1044. }
  1045. r |= (e + 126) << 23;
  1046. r |= m & 0x7fffffU;
  1047. #endif
  1048. return r;
  1049. }
  1050. /* converts an ieee single/binary32 to a float */
  1051. ecb_function_ float ecb_binary32_to_float (uint32_t x) ecb_const;
  1052. ecb_function_ float
  1053. ecb_binary32_to_float (uint32_t x)
  1054. {
  1055. float r;
  1056. #if ECB_STDFP
  1057. memcpy (&r, &x, 4);
  1058. #else
  1059. /* emulation, only works for normals and subnormals and +0 */
  1060. int neg = x >> 31;
  1061. int e = (x >> 23) & 0xffU;
  1062. x &= 0x7fffffU;
  1063. if (e)
  1064. x |= 0x800000U;
  1065. else
  1066. e = 1;
  1067. /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
  1068. r = ldexpf (x * (0.5f / 0x800000U), e - 126);
  1069. r = neg ? -r : r;
  1070. #endif
  1071. return r;
  1072. }
  1073. /* convert a double to ieee double/binary64 */
  1074. ecb_function_ uint64_t ecb_double_to_binary64 (double x) ecb_const;
  1075. ecb_function_ uint64_t
  1076. ecb_double_to_binary64 (double x)
  1077. {
  1078. uint64_t r;
  1079. #if ECB_STDFP
  1080. memcpy (&r, &x, 8);
  1081. #else
  1082. /* slow emulation, works for anything but -0 */
  1083. uint64_t m;
  1084. int e;
  1085. if (x == 0e0 ) return 0x0000000000000000U;
  1086. if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
  1087. if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
  1088. if (x != x ) return 0X7ff7ffffffffffffU;
  1089. m = frexp (x, &e) * 0x20000000000000U;
  1090. r = m & 0x8000000000000000;;
  1091. if (r)
  1092. m = -m;
  1093. if (e <= -1022)
  1094. {
  1095. m &= 0x1fffffffffffffU;
  1096. m >>= (-1021 - e);
  1097. e = -1022;
  1098. }
  1099. r |= ((uint64_t)(e + 1022)) << 52;
  1100. r |= m & 0xfffffffffffffU;
  1101. #endif
  1102. return r;
  1103. }
  1104. /* converts an ieee double/binary64 to a double */
  1105. ecb_function_ double ecb_binary64_to_double (uint64_t x) ecb_const;
  1106. ecb_function_ double
  1107. ecb_binary64_to_double (uint64_t x)
  1108. {
  1109. double r;
  1110. #if ECB_STDFP
  1111. memcpy (&r, &x, 8);
  1112. #else
  1113. /* emulation, only works for normals and subnormals and +0 */
  1114. int neg = x >> 63;
  1115. int e = (x >> 52) & 0x7ffU;
  1116. x &= 0xfffffffffffffU;
  1117. if (e)
  1118. x |= 0x10000000000000U;
  1119. else
  1120. e = 1;
  1121. /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
  1122. r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
  1123. r = neg ? -r : r;
  1124. #endif
  1125. return r;
  1126. }
  1127. #endif
  1128. #endif
  1129. /* ECB.H END */
  1130. #if ECB_MEMORY_FENCE_NEEDS_PTHREADS
  1131. /* if your architecture doesn't need memory fences, e.g. because it is
  1132. * single-cpu/core, or if you use libev in a project that doesn't use libev
  1133. * from multiple threads, then you can define ECB_AVOID_PTHREADS when compiling
  1134. * libev, in which cases the memory fences become nops.
  1135. * alternatively, you can remove this #error and link against libpthread,
  1136. * which will then provide the memory fences.
  1137. */
  1138. # error "memory fences not defined for your architecture, please report"
  1139. #endif
  1140. #ifndef ECB_MEMORY_FENCE
  1141. # define ECB_MEMORY_FENCE do { } while (0)
  1142. # define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
  1143. # define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
  1144. #endif
  1145. #define expect_false(cond) ecb_expect_false (cond)
  1146. #define expect_true(cond) ecb_expect_true (cond)
  1147. #define noinline ecb_noinline
  1148. #define inline_size ecb_inline
  1149. #if EV_FEATURE_CODE
  1150. # define inline_speed ecb_inline
  1151. #else
  1152. # define inline_speed static noinline
  1153. #endif
  1154. #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
  1155. #if EV_MINPRI == EV_MAXPRI
  1156. # define ABSPRI(w) (((W)w), 0)
  1157. #else
  1158. # define ABSPRI(w) (((W)w)->priority - EV_MINPRI)
  1159. #endif
  1160. #define EMPTY /* required for microsofts broken pseudo-c compiler */
  1161. #define EMPTY2(a,b) /* used to suppress some warnings */
  1162. typedef ev_watcher *W;
  1163. typedef ev_watcher_list *WL;
  1164. typedef ev_watcher_time *WT;
  1165. #define ev_active(w) ((W)(w))->active
  1166. #define ev_at(w) ((WT)(w))->at
  1167. #if EV_USE_REALTIME
  1168. /* sig_atomic_t is used to avoid per-thread variables or locking but still */
  1169. /* giving it a reasonably high chance of working on typical architectures */
  1170. static EV_ATOMIC_T have_realtime; /* did clock_gettime (CLOCK_REALTIME) work? */
  1171. #endif
  1172. #if EV_USE_MONOTONIC
  1173. static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
  1174. #endif
  1175. #ifndef EV_FD_TO_WIN32_HANDLE
  1176. # define EV_FD_TO_WIN32_HANDLE(fd) _get_osfhandle (fd)
  1177. #endif
  1178. #ifndef EV_WIN32_HANDLE_TO_FD
  1179. # define EV_WIN32_HANDLE_TO_FD(handle) _open_osfhandle (handle, 0)
  1180. #endif
  1181. #ifndef EV_WIN32_CLOSE_FD
  1182. # define EV_WIN32_CLOSE_FD(fd) close (fd)
  1183. #endif
  1184. #ifdef _WIN32
  1185. # include "ev_win32.c"
  1186. #endif
  1187. /*****************************************************************************/
  1188. /* define a suitable floor function (only used by periodics atm) */
  1189. #if EV_USE_FLOOR
  1190. # include <math.h>
  1191. # define ev_floor(v) floor (v)
  1192. #else
  1193. #include <float.h>
  1194. /* a floor() replacement function, should be independent of ev_tstamp type */
  1195. static ev_tstamp noinline
  1196. ev_floor (ev_tstamp v)
  1197. {
  1198. /* the choice of shift factor is not terribly important */
  1199. #if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */
  1200. const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.;
  1201. #else
  1202. const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
  1203. #endif
  1204. /* argument too large for an unsigned long? */
  1205. if (expect_false (v >= shift))
  1206. {
  1207. ev_tstamp f;
  1208. if (v == v - 1.)
  1209. return v; /* very large number */
  1210. f = shift * ev_floor (v * (1. / shift));
  1211. return f + ev_floor (v - f);
  1212. }
  1213. /* special treatment for negative args? */
  1214. if (expect_false (v < 0.))
  1215. {
  1216. ev_tstamp f = -ev_floor (-v);
  1217. return f - (f == v ? 0 : 1);
  1218. }
  1219. /* fits into an unsigned long */
  1220. return (unsigned long)v;
  1221. }
  1222. #endif
  1223. /*****************************************************************************/
  1224. #ifdef __linux
  1225. # include <sys/utsname.h>
  1226. #endif
  1227. static unsigned int noinline ecb_cold
  1228. ev_linux_version (void)
  1229. {
  1230. #ifdef __linux
  1231. unsigned int v = 0;
  1232. struct utsname buf;
  1233. int i;
  1234. char *p = buf.release;
  1235. if (uname (&buf))
  1236. return 0;
  1237. for (i = 3+1; --i; )
  1238. {
  1239. unsigned int c = 0;
  1240. for (;;)
  1241. {
  1242. if (*p >= '0' && *p <= '9')
  1243. c = c * 10 + *p++ - '0';
  1244. else
  1245. {
  1246. p += *p == '.';
  1247. break;
  1248. }
  1249. }
  1250. v = (v << 8) | c;
  1251. }
  1252. return v;
  1253. #else
  1254. return 0;
  1255. #endif
  1256. }
  1257. /*****************************************************************************/
  1258. #if EV_AVOID_STDIO
  1259. static void noinline ecb_cold
  1260. ev_printerr (const char *msg)
  1261. {
  1262. write (STDERR_FILENO, msg, strlen (msg));
  1263. }
  1264. #endif
  1265. static void (*syserr_cb)(const char *msg) EV_THROW;
  1266. void ecb_cold
  1267. ev_set_syserr_cb (void (*cb)(const char *msg) EV_THROW) EV_THROW
  1268. {
  1269. syserr_cb = cb;
  1270. }
  1271. static void noinline ecb_cold
  1272. ev_syserr (const char *msg)
  1273. {
  1274. if (!msg)
  1275. msg = "(libev) system error";
  1276. if (syserr_cb)
  1277. syserr_cb (msg);
  1278. else
  1279. {
  1280. #if EV_AVOID_STDIO
  1281. ev_printerr (msg);
  1282. ev_printerr (": ");
  1283. ev_printerr (strerror (errno));
  1284. ev_printerr ("\n");
  1285. #else
  1286. perror (msg);
  1287. #endif
  1288. abort ();
  1289. }
  1290. }
  1291. static void *
  1292. ev_realloc_emul (void *ptr, long size) EV_THROW
  1293. {
  1294. /* some systems, notably openbsd and darwin, fail to properly
  1295. * implement realloc (x, 0) (as required by both ansi c-89 and
  1296. * the single unix specification, so work around them here.
  1297. * recently, also (at least) fedora and debian started breaking it,
  1298. * despite documenting it otherwise.
  1299. */
  1300. if (size)
  1301. return realloc (ptr, size);
  1302. free (ptr);
  1303. return 0;
  1304. }
  1305. static void *(*alloc)(void *ptr, long size) EV_THROW = ev_realloc_emul;
  1306. void ecb_cold
  1307. ev_set_allocator (void *(*cb)(void *ptr, long size) EV_THROW) EV_THROW
  1308. {
  1309. alloc = cb;
  1310. }
  1311. inline_speed void *
  1312. ev_realloc (void *ptr, long size)
  1313. {
  1314. ptr = alloc (ptr, size);
  1315. if (!ptr && size)
  1316. {
  1317. #if EV_AVOID_STDIO
  1318. ev_printerr ("(libev) memory allocation failed, aborting.\n");
  1319. #else
  1320. fprintf (stderr, "(libev) cannot allocate %ld bytes, aborting.", size);
  1321. #endif
  1322. abort ();
  1323. }
  1324. return ptr;
  1325. }
  1326. #define ev_malloc(size) ev_realloc (0, (size))
  1327. #define ev_free(ptr) ev_realloc ((ptr), 0)
  1328. /*****************************************************************************/
  1329. /* set in reify when reification needed */
  1330. #define EV_ANFD_REIFY 1
  1331. /* file descriptor info structure */
  1332. typedef struct
  1333. {
  1334. WL head;
  1335. unsigned char events; /* the events watched for */
  1336. unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
  1337. unsigned char emask; /* the epoll backend stores the actual kernel mask in here */
  1338. unsigned char unused;
  1339. #if EV_USE_EPOLL
  1340. unsigned int egen; /* generation counter to counter epoll bugs */
  1341. #endif
  1342. #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
  1343. SOCKET handle;
  1344. #endif
  1345. #if EV_USE_IOCP
  1346. OVERLAPPED or, ow;
  1347. #endif
  1348. } ANFD;
  1349. /* stores the pending event set for a given watcher */
  1350. typedef struct
  1351. {
  1352. W w;
  1353. int events; /* the pending event set for the given watcher */
  1354. } ANPENDING;
  1355. #if EV_USE_INOTIFY
  1356. /* hash table entry per inotify-id */
  1357. typedef struct
  1358. {
  1359. WL head;
  1360. } ANFS;
  1361. #endif
  1362. /* Heap Entry */
  1363. #if EV_HEAP_CACHE_AT
  1364. /* a heap element */
  1365. typedef struct {
  1366. ev_tstamp at;
  1367. WT w;
  1368. } ANHE;
  1369. #define ANHE_w(he) (he).w /* access watcher, read-write */
  1370. #define ANHE_at(he) (he).at /* access cached at, read-only */
  1371. #define ANHE_at_cache(he) (he).at = (he).w->at /* update at from watcher */
  1372. #else
  1373. /* a heap element */
  1374. typedef WT ANHE;
  1375. #define ANHE_w(he) (he)
  1376. #define ANHE_at(he) (he)->at
  1377. #define ANHE_at_cache(he)
  1378. #endif
  1379. #if EV_MULTIPLICITY
  1380. struct ev_loop
  1381. {
  1382. ev_tstamp ev_rt_now;
  1383. #define ev_rt_now ((loop)->ev_rt_now)
  1384. #define VAR(name,decl) decl;
  1385. #include "ev_vars.h"
  1386. #undef VAR
  1387. };
  1388. #include "ev_wrap.h"
  1389. static struct ev_loop default_loop_struct;
  1390. EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */
  1391. #else
  1392. EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */
  1393. #define VAR(name,decl) static decl;
  1394. #include "ev_vars.h"
  1395. #undef VAR
  1396. static int ev_default_loop_ptr;
  1397. #endif
  1398. #if EV_FEATURE_API
  1399. # define EV_RELEASE_CB if (expect_false (release_cb)) release_cb (EV_A)
  1400. # define EV_ACQUIRE_CB if (expect_false (acquire_cb)) acquire_cb (EV_A)
  1401. # define EV_INVOKE_PENDING invoke_cb (EV_A)
  1402. #else
  1403. # define EV_RELEASE_CB (void)0
  1404. # define EV_ACQUIRE_CB (void)0
  1405. # define EV_INVOKE_PENDING ev_invoke_pending (EV_A)
  1406. #endif
  1407. #define EVBREAK_RECURSE 0x80
  1408. /*****************************************************************************/
  1409. #ifndef EV_HAVE_EV_TIME
  1410. ev_tstamp
  1411. ev_time (void) EV_THROW
  1412. {
  1413. #if EV_USE_REALTIME
  1414. if (expect_true (have_realtime))
  1415. {
  1416. struct timespec ts;
  1417. clock_gettime (CLOCK_REALTIME, &ts);
  1418. return ts.tv_sec + ts.tv_nsec * 1e-9;
  1419. }
  1420. #endif
  1421. struct timeval tv;
  1422. gettimeofday (&tv, 0);
  1423. return tv.tv_sec + tv.tv_usec * 1e-6;
  1424. }
  1425. #endif
  1426. inline_size ev_tstamp
  1427. get_clock (void)
  1428. {
  1429. #if EV_USE_MONOTONIC
  1430. if (expect_true (have_monotonic))
  1431. {
  1432. struct timespec ts;
  1433. clock_gettime (CLOCK_MONOTONIC, &ts);
  1434. return ts.tv_sec + ts.tv_nsec * 1e-9;
  1435. }
  1436. #endif
  1437. return ev_time ();
  1438. }
  1439. #if EV_MULTIPLICITY
  1440. ev_tstamp
  1441. ev_now (EV_P) EV_THROW
  1442. {
  1443. return ev_rt_now;
  1444. }
  1445. #endif
  1446. void
  1447. ev_sleep (ev_tstamp delay) EV_THROW
  1448. {
  1449. if (delay > 0.)
  1450. {
  1451. #if EV_USE_NANOSLEEP
  1452. struct timespec ts;
  1453. EV_TS_SET (ts, delay);
  1454. nanosleep (&ts, 0);
  1455. #elif defined _WIN32
  1456. Sleep ((unsigned long)(delay * 1e3));
  1457. #else
  1458. struct timeval tv;
  1459. /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */
  1460. /* something not guaranteed by newer posix versions, but guaranteed */
  1461. /* by older ones */
  1462. EV_TV_SET (tv, delay);
  1463. select (0, 0, 0, 0, &tv);
  1464. #endif
  1465. }
  1466. }
  1467. /*****************************************************************************/
  1468. #define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */
  1469. /* find a suitable new size for the given array, */
  1470. /* hopefully by rounding to a nice-to-malloc size */
  1471. inline_size int
  1472. array_nextsize (int elem, int cur, int cnt)
  1473. {
  1474. int ncur = cur + 1;
  1475. do
  1476. ncur <<= 1;
  1477. while (cnt > ncur);
  1478. /* if size is large, round to MALLOC_ROUND - 4 * longs to accommodate malloc overhead */
  1479. if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4)
  1480. {
  1481. ncur *= elem;
  1482. ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1);
  1483. ncur = ncur - sizeof (void *) * 4;
  1484. ncur /= elem;
  1485. }
  1486. return ncur;
  1487. }
  1488. static void * noinline ecb_cold
  1489. array_realloc (int elem, void *base, int *cur, int cnt)
  1490. {
  1491. *cur = array_nextsize (elem, *cur, cnt);
  1492. return ev_realloc (base, elem * *cur);
  1493. }
  1494. #define array_init_zero(base,count) \
  1495. memset ((void *)(base), 0, sizeof (*(base)) * (count))
  1496. #define array_needsize(type,base,cur,cnt,init) \
  1497. if (expect_false ((cnt) > (cur))) \
  1498. { \
  1499. int ecb_unused ocur_ = (cur); \
  1500. (base) = (type *)array_realloc \
  1501. (sizeof (type), (base), &(cur), (cnt)); \
  1502. init ((base) + (ocur_), (cur) - ocur_); \
  1503. }
  1504. #if 0
  1505. #define array_slim(type,stem) \
  1506. if (stem ## max < array_roundsize (stem ## cnt >> 2)) \
  1507. { \
  1508. stem ## max = array_roundsize (stem ## cnt >> 1); \
  1509. base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\
  1510. fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\
  1511. }
  1512. #endif
  1513. #define array_free(stem, idx) \
  1514. ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0
  1515. /*****************************************************************************/
  1516. /* dummy callback for pending events */
  1517. static void noinline
  1518. pendingcb (EV_P_ ev_prepare *w, int revents)
  1519. {
  1520. }
  1521. void noinline
  1522. ev_feed_event (EV_P_ void *w, int revents) EV_THROW
  1523. {
  1524. W w_ = (W)w;
  1525. int pri = ABSPRI (w_);
  1526. if (expect_false (w_->pending))
  1527. pendings [pri][w_->pending - 1].events |= revents;
  1528. else
  1529. {
  1530. w_->pending = ++pendingcnt [pri];
  1531. array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, EMPTY2);
  1532. pendings [pri][w_->pending - 1].w = w_;
  1533. pendings [pri][w_->pending - 1].events = revents;
  1534. }
  1535. pendingpri = NUMPRI - 1;
  1536. }
  1537. inline_speed void
  1538. feed_reverse (EV_P_ W w)
  1539. {
  1540. array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, EMPTY2);
  1541. rfeeds [rfeedcnt++] = w;
  1542. }
  1543. inline_size void
  1544. feed_reverse_done (EV_P_ int revents)
  1545. {
  1546. do
  1547. ev_feed_event (EV_A_ rfeeds [--rfeedcnt], revents);
  1548. while (rfeedcnt);
  1549. }
  1550. inline_speed void
  1551. queue_events (EV_P_ W *events, int eventcnt, int type)
  1552. {
  1553. int i;
  1554. for (i = 0; i < eventcnt; ++i)
  1555. ev_feed_event (EV_A_ events [i], type);
  1556. }
  1557. /*****************************************************************************/
  1558. inline_speed void
  1559. fd_event_nocheck (EV_P_ int fd, int revents)
  1560. {
  1561. ANFD *anfd = anfds + fd;
  1562. ev_io *w;
  1563. for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
  1564. {
  1565. int ev = w->events & revents;
  1566. if (ev)
  1567. ev_feed_event (EV_A_ (W)w, ev);
  1568. }
  1569. }
  1570. /* do not submit kernel events for fds that have reify set */
  1571. /* because that means they changed while we were polling for new events */
  1572. inline_speed void
  1573. fd_event (EV_P_ int fd, int revents)
  1574. {
  1575. ANFD *anfd = anfds + fd;
  1576. if (expect_true (!anfd->reify))
  1577. fd_event_nocheck (EV_A_ fd, revents);
  1578. }
  1579. void
  1580. ev_feed_fd_event (EV_P_ int fd, int revents) EV_THROW
  1581. {
  1582. if (fd >= 0 && fd < anfdmax)
  1583. fd_event_nocheck (EV_A_ fd, revents);
  1584. }
  1585. /* make sure the external fd watch events are in-sync */
  1586. /* with the kernel/libev internal state */
  1587. inline_size void
  1588. fd_reify (EV_P)
  1589. {
  1590. int i;
  1591. #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
  1592. for (i = 0; i < fdchangecnt; ++i)
  1593. {
  1594. int fd = fdchanges [i];
  1595. ANFD *anfd = anfds + fd;
  1596. if (anfd->reify & EV__IOFDSET && anfd->head)
  1597. {
  1598. SOCKET handle = EV_FD_TO_WIN32_HANDLE (fd);
  1599. if (handle != anfd->handle)
  1600. {
  1601. unsigned long arg;
  1602. assert (("libev: only socket fds supported in this configuration", ioctlsocket (handle, FIONREAD, &arg) == 0));
  1603. /* handle changed, but fd didn't - we need to do it in two steps */
  1604. backend_modify (EV_A_ fd, anfd->events, 0);
  1605. anfd->events = 0;
  1606. anfd->handle = handle;
  1607. }
  1608. }
  1609. }
  1610. #endif
  1611. for (i = 0; i < fdchangecnt; ++i)
  1612. {
  1613. int fd = fdchanges [i];
  1614. ANFD *anfd = anfds + fd;
  1615. ev_io *w;
  1616. unsigned char o_events = anfd->events;
  1617. unsigned char o_reify = anfd->reify;
  1618. anfd->reify = 0;
  1619. /*if (expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
  1620. {
  1621. anfd->events = 0;
  1622. for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
  1623. anfd->events |= (unsigned char)w->events;
  1624. if (o_events != anfd->events)
  1625. o_reify = EV__IOFDSET; /* actually |= */
  1626. }
  1627. if (o_reify & EV__IOFDSET)
  1628. backend_modify (EV_A_ fd, o_events, anfd->events);
  1629. }
  1630. fdchangecnt = 0;
  1631. }
  1632. /* something about the given fd changed */
  1633. inline_size void
  1634. fd_change (EV_P_ int fd, int flags)
  1635. {
  1636. unsigned char reify = anfds [fd].reify;
  1637. anfds [fd].reify |= flags;
  1638. if (expect_true (!reify))
  1639. {
  1640. ++fdchangecnt;
  1641. array_needsize (int, fdchanges, fdchangemax, fdchangecnt, EMPTY2);
  1642. fdchanges [fdchangecnt - 1] = fd;
  1643. }
  1644. }
  1645. /* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */
  1646. inline_speed void ecb_cold
  1647. fd_kill (EV_P_ int fd)
  1648. {
  1649. ev_io *w;
  1650. while ((w = (ev_io *)anfds [fd].head))
  1651. {
  1652. ev_io_stop (EV_A_ w);
  1653. ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
  1654. }
  1655. }
  1656. /* check whether the given fd is actually valid, for error recovery */
  1657. inline_size int ecb_cold
  1658. fd_valid (int fd)
  1659. {
  1660. #ifdef _WIN32
  1661. return EV_FD_TO_WIN32_HANDLE (fd) != -1;
  1662. #else
  1663. return fcntl (fd, F_GETFD) != -1;
  1664. #endif
  1665. }
  1666. /* called on EBADF to verify fds */
  1667. static void noinline ecb_cold
  1668. fd_ebadf (EV_P)
  1669. {
  1670. int fd;
  1671. for (fd = 0; fd < anfdmax; ++fd)
  1672. if (anfds [fd].events)
  1673. if (!fd_valid (fd) && errno == EBADF)
  1674. fd_kill (EV_A_ fd);
  1675. }
  1676. /* called on ENOMEM in select/poll to kill some fds and retry */
  1677. static void noinline ecb_cold
  1678. fd_enomem (EV_P)
  1679. {
  1680. int fd;
  1681. for (fd = anfdmax; fd--; )
  1682. if (anfds [fd].events)
  1683. {
  1684. fd_kill (EV_A_ fd);
  1685. break;
  1686. }
  1687. }
  1688. /* usually called after fork if backend needs to re-arm all fds from scratch */
  1689. static void noinline
  1690. fd_rearm_all (EV_P)
  1691. {
  1692. int fd;
  1693. for (fd = 0; fd < anfdmax; ++fd)
  1694. if (anfds [fd].events)
  1695. {
  1696. anfds [fd].events = 0;
  1697. anfds [fd].emask = 0;
  1698. fd_change (EV_A_ fd, EV__IOFDSET | EV_ANFD_REIFY);
  1699. }
  1700. }
  1701. /* used to prepare libev internal fd's */
  1702. /* this is not fork-safe */
  1703. inline_speed void
  1704. fd_intern (int fd)
  1705. {
  1706. #ifdef _WIN32
  1707. unsigned long arg = 1;
  1708. ioctlsocket (EV_FD_TO_WIN32_HANDLE (fd), FIONBIO, &arg);
  1709. #else
  1710. fcntl (fd, F_SETFD, FD_CLOEXEC);
  1711. fcntl (fd, F_SETFL, O_NONBLOCK);
  1712. #endif
  1713. }
  1714. /*****************************************************************************/
  1715. /*
  1716. * the heap functions want a real array index. array index 0 is guaranteed to not
  1717. * be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives
  1718. * the branching factor of the d-tree.
  1719. */
  1720. /*
  1721. * at the moment we allow libev the luxury of two heaps,
  1722. * a small-code-size 2-heap one and a ~1.5kb larger 4-heap
  1723. * which is more cache-efficient.
  1724. * the difference is about 5% with 50000+ watchers.
  1725. */
  1726. #if EV_USE_4HEAP
  1727. #define DHEAP 4
  1728. #define HEAP0 (DHEAP - 1) /* index of first element in heap */
  1729. #define HPARENT(k) ((((k) - HEAP0 - 1) / DHEAP) + HEAP0)
  1730. #define UPHEAP_DONE(p,k) ((p) == (k))
  1731. /* away from the root */
  1732. inline_speed void
  1733. downheap (ANHE *heap, int N, int k)
  1734. {
  1735. ANHE he = heap [k];
  1736. ANHE *E = heap + N + HEAP0;
  1737. for (;;)
  1738. {
  1739. ev_tstamp minat;
  1740. ANHE *minpos;
  1741. ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1;
  1742. /* find minimum child */
  1743. if (expect_true (pos + DHEAP - 1 < E))
  1744. {
  1745. /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
  1746. if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
  1747. if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
  1748. if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
  1749. }
  1750. else if (pos < E)
  1751. {
  1752. /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
  1753. if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
  1754. if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
  1755. if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
  1756. }
  1757. else
  1758. break;
  1759. if (ANHE_at (he) <= minat)
  1760. break;
  1761. heap [k] = *minpos;
  1762. ev_active (ANHE_w (*minpos)) = k;
  1763. k = minpos - heap;
  1764. }
  1765. heap [k] = he;
  1766. ev_active (ANHE_w (he)) = k;
  1767. }
  1768. #else /* 4HEAP */
  1769. #define HEAP0 1
  1770. #define HPARENT(k) ((k) >> 1)
  1771. #define UPHEAP_DONE(p,k) (!(p))
  1772. /* away from the root */
  1773. inline_speed void
  1774. downheap (ANHE *heap, int N, int k)
  1775. {
  1776. ANHE he = heap [k];
  1777. for (;;)
  1778. {
  1779. int c = k << 1;
  1780. if (c >= N + HEAP0)
  1781. break;
  1782. c += c + 1 < N + HEAP0 && ANHE_at (heap [c]) > ANHE_at (heap [c + 1])
  1783. ? 1 : 0;
  1784. if (ANHE_at (he) <= ANHE_at (heap [c]))
  1785. break;
  1786. heap [k] = heap [c];
  1787. ev_active (ANHE_w (heap [k])) = k;
  1788. k = c;
  1789. }
  1790. heap [k] = he;
  1791. ev_active (ANHE_w (he)) = k;
  1792. }
  1793. #endif
  1794. /* towards the root */
  1795. inline_speed void
  1796. upheap (ANHE *heap, int k)
  1797. {
  1798. ANHE he = heap [k];
  1799. for (;;)
  1800. {
  1801. int p = HPARENT (k);
  1802. if (UPHEAP_DONE (p, k) || ANHE_at (heap [p]) <= ANHE_at (he))
  1803. break;
  1804. heap [k] = heap [p];
  1805. ev_active (ANHE_w (heap [k])) = k;
  1806. k = p;
  1807. }
  1808. heap [k] = he;
  1809. ev_active (ANHE_w (he)) = k;
  1810. }
  1811. /* move an element suitably so it is in a correct place */
  1812. inline_size void
  1813. adjustheap (ANHE *heap, int N, int k)
  1814. {
  1815. if (k > HEAP0 && ANHE_at (heap [k]) <= ANHE_at (heap [HPARENT (k)]))
  1816. upheap (heap, k);
  1817. else
  1818. downheap (heap, N, k);
  1819. }
  1820. /* rebuild the heap: this function is used only once and executed rarely */
  1821. inline_size void
  1822. reheap (ANHE *heap, int N)
  1823. {
  1824. int i;
  1825. /* we don't use floyds algorithm, upheap is simpler and is more cache-efficient */
  1826. /* also, this is easy to implement and correct for both 2-heaps and 4-heaps */
  1827. for (i = 0; i < N; ++i)
  1828. upheap (heap, i + HEAP0);
  1829. }
  1830. /*****************************************************************************/
  1831. /* associate signal watchers to a signal signal */
  1832. typedef struct
  1833. {
  1834. EV_ATOMIC_T pending;
  1835. #if EV_MULTIPLICITY
  1836. EV_P;
  1837. #endif
  1838. WL head;
  1839. } ANSIG;
  1840. static ANSIG signals [EV_NSIG - 1];
  1841. /*****************************************************************************/
  1842. #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
  1843. static void noinline ecb_cold
  1844. evpipe_init (EV_P)
  1845. {
  1846. if (!ev_is_active (&pipe_w))
  1847. {
  1848. int fds [2];
  1849. # if EV_USE_EVENTFD
  1850. fds [0] = -1;
  1851. fds [1] = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC);
  1852. if (fds [1] < 0 && errno == EINVAL)
  1853. fds [1] = eventfd (0, 0);
  1854. if (fds [1] < 0)
  1855. # endif
  1856. {
  1857. while (pipe (fds))
  1858. ev_syserr ("(libev) error creating signal/async pipe");
  1859. fd_intern (fds [0]);
  1860. }
  1861. evpipe [0] = fds [0];
  1862. if (evpipe [1] < 0)
  1863. evpipe [1] = fds [1]; /* first call, set write fd */
  1864. else
  1865. {
  1866. /* on subsequent calls, do not change evpipe [1] */
  1867. /* so that evpipe_write can always rely on its value. */
  1868. /* this branch does not do anything sensible on windows, */
  1869. /* so must not be executed on windows */
  1870. dup2 (fds [1], evpipe [1]);
  1871. close (fds [1]);
  1872. }
  1873. fd_intern (evpipe [1]);
  1874. ev_io_set (&pipe_w, evpipe [0] < 0 ? evpipe [1] : evpipe [0], EV_READ);
  1875. ev_io_start (EV_A_ &pipe_w);
  1876. ev_unref (EV_A); /* watcher should not keep loop alive */
  1877. }
  1878. }
  1879. inline_speed void
  1880. evpipe_write (EV_P_ EV_ATOMIC_T *flag)
  1881. {
  1882. ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */
  1883. if (expect_true (*flag))
  1884. return;
  1885. *flag = 1;
  1886. ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */
  1887. pipe_write_skipped = 1;
  1888. ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */
  1889. if (pipe_write_wanted)
  1890. {
  1891. int old_errno;
  1892. pipe_write_skipped = 0;
  1893. ECB_MEMORY_FENCE_RELEASE;
  1894. old_errno = errno; /* save errno because write will clobber it */
  1895. #if EV_USE_EVENTFD
  1896. if (evpipe [0] < 0)
  1897. {
  1898. uint64_t counter = 1;
  1899. write (evpipe [1], &counter, sizeof (uint64_t));
  1900. }
  1901. else
  1902. #endif
  1903. {
  1904. #ifdef _WIN32
  1905. WSABUF buf;
  1906. DWORD sent;
  1907. buf.buf = &buf;
  1908. buf.len = 1;
  1909. WSASend (EV_FD_TO_WIN32_HANDLE (evpipe [1]), &buf, 1, &sent, 0, 0, 0);
  1910. #else
  1911. write (evpipe [1], &(evpipe [1]), 1);
  1912. #endif
  1913. }
  1914. errno = old_errno;
  1915. }
  1916. }
  1917. /* called whenever the libev signal pipe */
  1918. /* got some events (signal, async) */
  1919. static void
  1920. pipecb (EV_P_ ev_io *iow, int revents)
  1921. {
  1922. int i;
  1923. if (revents & EV_READ)
  1924. {
  1925. #if EV_USE_EVENTFD
  1926. if (evpipe [0] < 0)
  1927. {
  1928. uint64_t counter;
  1929. read (evpipe [1], &counter, sizeof (uint64_t));
  1930. }
  1931. else
  1932. #endif
  1933. {
  1934. char dummy[4];
  1935. #ifdef _WIN32
  1936. WSABUF buf;
  1937. DWORD recvd;
  1938. DWORD flags = 0;
  1939. buf.buf = dummy;
  1940. buf.len = sizeof (dummy);
  1941. WSARecv (EV_FD_TO_WIN32_HANDLE (evpipe [0]), &buf, 1, &recvd, &flags, 0, 0);
  1942. #else
  1943. read (evpipe [0], &dummy, sizeof (dummy));
  1944. #endif
  1945. }
  1946. }
  1947. pipe_write_skipped = 0;
  1948. ECB_MEMORY_FENCE; /* push out skipped, acquire flags */
  1949. #if EV_SIGNAL_ENABLE
  1950. if (sig_pending)
  1951. {
  1952. sig_pending = 0;
  1953. ECB_MEMORY_FENCE;
  1954. for (i = EV_NSIG - 1; i--; )
  1955. if (expect_false (signals [i].pending))
  1956. ev_feed_signal_event (EV_A_ i + 1);
  1957. }
  1958. #endif
  1959. #if EV_ASYNC_ENABLE
  1960. if (async_pending)
  1961. {
  1962. async_pending = 0;
  1963. ECB_MEMORY_FENCE;
  1964. for (i = asynccnt; i--; )
  1965. if (asyncs [i]->sent)
  1966. {
  1967. asyncs [i]->sent = 0;
  1968. ECB_MEMORY_FENCE_RELEASE;
  1969. ev_feed_event (EV_A_ asyncs [i], EV_ASYNC);
  1970. }
  1971. }
  1972. #endif
  1973. }
  1974. /*****************************************************************************/
  1975. void
  1976. ev_feed_signal (int signum) EV_THROW
  1977. {
  1978. #if EV_MULTIPLICITY
  1979. EV_P;
  1980. ECB_MEMORY_FENCE_ACQUIRE;
  1981. EV_A = signals [signum - 1].loop;
  1982. if (!EV_A)
  1983. return;
  1984. #endif
  1985. signals [signum - 1].pending = 1;
  1986. evpipe_write (EV_A_ &sig_pending);
  1987. }
  1988. static void
  1989. ev_sighandler (int signum)
  1990. {
  1991. #ifdef _WIN32
  1992. signal (signum, ev_sighandler);
  1993. #endif
  1994. ev_feed_signal (signum);
  1995. }
  1996. void noinline
  1997. ev_feed_signal_event (EV_P_ int signum) EV_THROW
  1998. {
  1999. WL w;
  2000. if (expect_false (signum <= 0 || signum >= EV_NSIG))
  2001. return;
  2002. --signum;
  2003. #if EV_MULTIPLICITY
  2004. /* it is permissible to try to feed a signal to the wrong loop */
  2005. /* or, likely more useful, feeding a signal nobody is waiting for */
  2006. if (expect_false (signals [signum].loop != EV_A))
  2007. return;
  2008. #endif
  2009. signals [signum].pending = 0;
  2010. ECB_MEMORY_FENCE_RELEASE;
  2011. for (w = signals [signum].head; w; w = w->next)
  2012. ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
  2013. }
  2014. #if EV_USE_SIGNALFD
  2015. static void
  2016. sigfdcb (EV_P_ ev_io *iow, int revents)
  2017. {
  2018. struct signalfd_siginfo si[2], *sip; /* these structs are big */
  2019. for (;;)
  2020. {
  2021. ssize_t res = read (sigfd, si, sizeof (si));
  2022. /* not ISO-C, as res might be -1, but works with SuS */
  2023. for (sip = si; (char *)sip < (char *)si + res; ++sip)
  2024. ev_feed_signal_event (EV_A_ sip->ssi_signo);
  2025. if (res < (ssize_t)sizeof (si))
  2026. break;
  2027. }
  2028. }
  2029. #endif
  2030. #endif
  2031. /*****************************************************************************/
  2032. #if EV_CHILD_ENABLE
  2033. static WL childs [EV_PID_HASHSIZE];
  2034. static ev_signal childev;
  2035. #ifndef WIFCONTINUED
  2036. # define WIFCONTINUED(status) 0
  2037. #endif
  2038. /* handle a single child status event */
  2039. inline_speed void
  2040. child_reap (EV_P_ int chain, int pid, int status)
  2041. {
  2042. ev_child *w;
  2043. int traced = WIFSTOPPED (status) || WIFCONTINUED (status);
  2044. for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next)
  2045. {
  2046. if ((w->pid == pid || !w->pid)
  2047. && (!traced || (w->flags & 1)))
  2048. {
  2049. ev_set_priority (w, EV_MAXPRI); /* need to do it *now*, this *must* be the same prio as the signal watcher itself */
  2050. w->rpid = pid;
  2051. w->rstatus = status;
  2052. ev_feed_event (EV_A_ (W)w, EV_CHILD);
  2053. }
  2054. }
  2055. }
  2056. #ifndef WCONTINUED
  2057. # define WCONTINUED 0
  2058. #endif
  2059. /* called on sigchld etc., calls waitpid */
  2060. static void
  2061. childcb (EV_P_ ev_signal *sw, int revents)
  2062. {
  2063. int pid, status;
  2064. /* some systems define WCONTINUED but then fail to support it (linux 2.4) */
  2065. if (0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)))
  2066. if (!WCONTINUED
  2067. || errno != EINVAL
  2068. || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED)))
  2069. return;
  2070. /* make sure we are called again until all children have been reaped */
  2071. /* we need to do it this way so that the callback gets called before we continue */
  2072. ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);
  2073. child_reap (EV_A_ pid, pid, status);
  2074. if ((EV_PID_HASHSIZE) > 1)
  2075. child_reap (EV_A_ 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */
  2076. }
  2077. #endif
  2078. /*****************************************************************************/
  2079. #if EV_USE_IOCP
  2080. # include "ev_iocp.c"
  2081. #endif
  2082. #if EV_USE_PORT
  2083. # include "ev_port.c"
  2084. #endif
  2085. #if EV_USE_KQUEUE
  2086. # include "ev_kqueue.c"
  2087. #endif
  2088. #if EV_USE_EPOLL
  2089. # include "ev_epoll.c"
  2090. #endif
  2091. #if EV_USE_POLL
  2092. # include "ev_poll.c"
  2093. #endif
  2094. #if EV_USE_SELECT
  2095. # include "ev_select.c"
  2096. #endif
  2097. int ecb_cold
  2098. ev_version_major (void) EV_THROW
  2099. {
  2100. return EV_VERSION_MAJOR;
  2101. }
  2102. int ecb_cold
  2103. ev_version_minor (void) EV_THROW
  2104. {
  2105. return EV_VERSION_MINOR;
  2106. }
  2107. /* return true if we are running with elevated privileges and should ignore env variables */
  2108. int inline_size ecb_cold
  2109. enable_secure (void)
  2110. {
  2111. #ifdef _WIN32
  2112. return 0;
  2113. #else
  2114. return getuid () != geteuid ()
  2115. || getgid () != getegid ();
  2116. #endif
  2117. }
  2118. unsigned int ecb_cold
  2119. ev_supported_backends (void) EV_THROW
  2120. {
  2121. unsigned int flags = 0;
  2122. if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
  2123. if (EV_USE_KQUEUE) flags |= EVBACKEND_KQUEUE;
  2124. if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
  2125. if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
  2126. if (EV_USE_SELECT) flags |= EVBACKEND_SELECT;
  2127. return flags;
  2128. }
  2129. unsigned int ecb_cold
  2130. ev_recommended_backends (void) EV_THROW
  2131. {
  2132. unsigned int flags = ev_supported_backends ();
  2133. #ifndef __NetBSD__
  2134. /* kqueue is borked on everything but netbsd apparently */
  2135. /* it usually doesn't work correctly on anything but sockets and pipes */
  2136. flags &= ~EVBACKEND_KQUEUE;
  2137. #endif
  2138. #ifdef __APPLE__
  2139. /* only select works correctly on that "unix-certified" platform */
  2140. flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */
  2141. flags &= ~EVBACKEND_POLL; /* poll is based on kqueue from 10.5 onwards */
  2142. #endif
  2143. #ifdef __FreeBSD__
  2144. flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */
  2145. #endif
  2146. return flags;
  2147. }
  2148. unsigned int ecb_cold
  2149. ev_embeddable_backends (void) EV_THROW
  2150. {
  2151. int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
  2152. /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
  2153. if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
  2154. flags &= ~EVBACKEND_EPOLL;
  2155. return flags;
  2156. }
  2157. unsigned int
  2158. ev_backend (EV_P) EV_THROW
  2159. {
  2160. return backend;
  2161. }
  2162. #if EV_FEATURE_API
  2163. unsigned int
  2164. ev_iteration (EV_P) EV_THROW
  2165. {
  2166. return loop_count;
  2167. }
  2168. unsigned int
  2169. ev_depth (EV_P) EV_THROW
  2170. {
  2171. return loop_depth;
  2172. }
  2173. void
  2174. ev_set_io_collect_interval (EV_P_ ev_tstamp interval) EV_THROW
  2175. {
  2176. io_blocktime = interval;
  2177. }
  2178. void
  2179. ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) EV_THROW
  2180. {
  2181. timeout_blocktime = interval;
  2182. }
  2183. void
  2184. ev_set_userdata (EV_P_ void *data) EV_THROW
  2185. {
  2186. userdata = data;
  2187. }
  2188. void *
  2189. ev_userdata (EV_P) EV_THROW
  2190. {
  2191. return userdata;
  2192. }
  2193. void
  2194. ev_set_invoke_pending_cb (EV_P_ ev_loop_callback invoke_pending_cb) EV_THROW
  2195. {
  2196. invoke_cb = invoke_pending_cb;
  2197. }
  2198. void
  2199. ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_THROW, void (*acquire)(EV_P) EV_THROW) EV_THROW
  2200. {
  2201. release_cb = release;
  2202. acquire_cb = acquire;
  2203. }
  2204. #endif
  2205. /* initialise a loop structure, must be zero-initialised */
  2206. static void noinline ecb_cold
  2207. loop_init (EV_P_ unsigned int flags) EV_THROW
  2208. {
  2209. if (!backend)
  2210. {
  2211. origflags = flags;
  2212. #if EV_USE_REALTIME
  2213. if (!have_realtime)
  2214. {
  2215. struct timespec ts;
  2216. if (!clock_gettime (CLOCK_REALTIME, &ts))
  2217. have_realtime = 1;
  2218. }
  2219. #endif
  2220. #if EV_USE_MONOTONIC
  2221. if (!have_monotonic)
  2222. {
  2223. struct timespec ts;
  2224. if (!clock_gettime (CLOCK_MONOTONIC, &ts))
  2225. have_monotonic = 1;
  2226. }
  2227. #endif
  2228. /* pid check not overridable via env */
  2229. #ifndef _WIN32
  2230. if (flags & EVFLAG_FORKCHECK)
  2231. curpid = getpid ();
  2232. #endif
  2233. if (!(flags & EVFLAG_NOENV)
  2234. && !enable_secure ()
  2235. && getenv ("LIBEV_FLAGS"))
  2236. flags = atoi (getenv ("LIBEV_FLAGS"));
  2237. ev_rt_now = ev_time ();
  2238. mn_now = get_clock ();
  2239. now_floor = mn_now;
  2240. rtmn_diff = ev_rt_now - mn_now;
  2241. #if EV_FEATURE_API
  2242. invoke_cb = ev_invoke_pending;
  2243. #endif
  2244. io_blocktime = 0.;
  2245. timeout_blocktime = 0.;
  2246. backend = 0;
  2247. backend_fd = -1;
  2248. sig_pending = 0;
  2249. #if EV_ASYNC_ENABLE
  2250. async_pending = 0;
  2251. #endif
  2252. pipe_write_skipped = 0;
  2253. pipe_write_wanted = 0;
  2254. evpipe [0] = -1;
  2255. evpipe [1] = -1;
  2256. #if EV_USE_INOTIFY
  2257. fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2;
  2258. #endif
  2259. #if EV_USE_SIGNALFD
  2260. sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
  2261. #endif
  2262. if (!(flags & EVBACKEND_MASK))
  2263. flags |= ev_recommended_backends ();
  2264. #if EV_USE_IOCP
  2265. if (!backend && (flags & EVBACKEND_IOCP )) backend = iocp_init (EV_A_ flags);
  2266. #endif
  2267. #if EV_USE_PORT
  2268. if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
  2269. #endif
  2270. #if EV_USE_KQUEUE
  2271. if (!backend && (flags & EVBACKEND_KQUEUE)) backend = kqueue_init (EV_A_ flags);
  2272. #endif
  2273. #if EV_USE_EPOLL
  2274. if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags);
  2275. #endif
  2276. #if EV_USE_POLL
  2277. if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags);
  2278. #endif
  2279. #if EV_USE_SELECT
  2280. if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init (EV_A_ flags);
  2281. #endif
  2282. ev_prepare_init (&pending_w, pendingcb);
  2283. #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
  2284. ev_init (&pipe_w, pipecb);
  2285. ev_set_priority (&pipe_w, EV_MAXPRI);
  2286. #endif
  2287. }
  2288. }
  2289. /* free up a loop structure */
  2290. void ecb_cold
  2291. ev_loop_destroy (EV_P)
  2292. {
  2293. int i;
  2294. #if EV_MULTIPLICITY
  2295. /* mimic free (0) */
  2296. if (!EV_A)
  2297. return;
  2298. #endif
  2299. #if EV_CLEANUP_ENABLE
  2300. /* queue cleanup watchers (and execute them) */
  2301. if (expect_false (cleanupcnt))
  2302. {
  2303. queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP);
  2304. EV_INVOKE_PENDING;
  2305. }
  2306. #endif
  2307. #if EV_CHILD_ENABLE
  2308. if (ev_is_default_loop (EV_A) && ev_is_active (&childev))
  2309. {
  2310. ev_ref (EV_A); /* child watcher */
  2311. ev_signal_stop (EV_A_ &childev);
  2312. }
  2313. #endif
  2314. if (ev_is_active (&pipe_w))
  2315. {
  2316. /*ev_ref (EV_A);*/
  2317. /*ev_io_stop (EV_A_ &pipe_w);*/
  2318. if (evpipe [0] >= 0) EV_WIN32_CLOSE_FD (evpipe [0]);
  2319. if (evpipe [1] >= 0) EV_WIN32_CLOSE_FD (evpipe [1]);
  2320. }
  2321. #if EV_USE_SIGNALFD
  2322. if (ev_is_active (&sigfd_w))
  2323. close (sigfd);
  2324. #endif
  2325. #if EV_USE_INOTIFY
  2326. if (fs_fd >= 0)
  2327. close (fs_fd);
  2328. #endif
  2329. if (backend_fd >= 0)
  2330. close (backend_fd);
  2331. #if EV_USE_IOCP
  2332. if (backend == EVBACKEND_IOCP ) iocp_destroy (EV_A);
  2333. #endif
  2334. #if EV_USE_PORT
  2335. if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
  2336. #endif
  2337. #if EV_USE_KQUEUE
  2338. if (backend == EVBACKEND_KQUEUE) kqueue_destroy (EV_A);
  2339. #endif
  2340. #if EV_USE_EPOLL
  2341. if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A);
  2342. #endif
  2343. #if EV_USE_POLL
  2344. if (backend == EVBACKEND_POLL ) poll_destroy (EV_A);
  2345. #endif
  2346. #if EV_USE_SELECT
  2347. if (backend == EVBACKEND_SELECT) select_destroy (EV_A);
  2348. #endif
  2349. for (i = NUMPRI; i--; )
  2350. {
  2351. array_free (pending, [i]);
  2352. #if EV_IDLE_ENABLE
  2353. array_free (idle, [i]);
  2354. #endif
  2355. }
  2356. ev_free (anfds); anfds = 0; anfdmax = 0;
  2357. /* have to use the microsoft-never-gets-it-right macro */
  2358. array_free (rfeed, EMPTY);
  2359. array_free (fdchange, EMPTY);
  2360. array_free (timer, EMPTY);
  2361. #if EV_PERIODIC_ENABLE
  2362. array_free (periodic, EMPTY);
  2363. #endif
  2364. #if EV_FORK_ENABLE
  2365. array_free (fork, EMPTY);
  2366. #endif
  2367. #if EV_CLEANUP_ENABLE
  2368. array_free (cleanup, EMPTY);
  2369. #endif
  2370. array_free (prepare, EMPTY);
  2371. array_free (check, EMPTY);
  2372. #if EV_ASYNC_ENABLE
  2373. array_free (async, EMPTY);
  2374. #endif
  2375. backend = 0;
  2376. #if EV_MULTIPLICITY
  2377. if (ev_is_default_loop (EV_A))
  2378. #endif
  2379. ev_default_loop_ptr = 0;
  2380. #if EV_MULTIPLICITY
  2381. else
  2382. ev_free (EV_A);
  2383. #endif
  2384. }
  2385. #if EV_USE_INOTIFY
  2386. inline_size void infy_fork (EV_P);
  2387. #endif
  2388. inline_size void
  2389. loop_fork (EV_P)
  2390. {
  2391. #if EV_USE_PORT
  2392. if (backend == EVBACKEND_PORT ) port_fork (EV_A);
  2393. #endif
  2394. #if EV_USE_KQUEUE
  2395. if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A);
  2396. #endif
  2397. #if EV_USE_EPOLL
  2398. if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
  2399. #endif
  2400. #if EV_USE_INOTIFY
  2401. infy_fork (EV_A);
  2402. #endif
  2403. #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
  2404. if (ev_is_active (&pipe_w))
  2405. {
  2406. /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
  2407. ev_ref (EV_A);
  2408. ev_io_stop (EV_A_ &pipe_w);
  2409. if (evpipe [0] >= 0)
  2410. EV_WIN32_CLOSE_FD (evpipe [0]);
  2411. evpipe_init (EV_A);
  2412. /* iterate over everything, in case we missed something before */
  2413. ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
  2414. }
  2415. #endif
  2416. postfork = 0;
  2417. }
  2418. #if EV_MULTIPLICITY
  2419. struct ev_loop * ecb_cold
  2420. ev_loop_new (unsigned int flags) EV_THROW
  2421. {
  2422. EV_P = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop));
  2423. memset (EV_A, 0, sizeof (struct ev_loop));
  2424. loop_init (EV_A_ flags);
  2425. if (ev_backend (EV_A))
  2426. return EV_A;
  2427. ev_free (EV_A);
  2428. return 0;
  2429. }
  2430. #endif /* multiplicity */
  2431. #if EV_VERIFY
  2432. static void noinline ecb_cold
  2433. verify_watcher (EV_P_ W w)
  2434. {
  2435. assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI));
  2436. if (w->pending)
  2437. assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
  2438. }
  2439. static void noinline ecb_cold
  2440. verify_heap (EV_P_ ANHE *heap, int N)
  2441. {
  2442. int i;
  2443. for (i = HEAP0; i < N + HEAP0; ++i)
  2444. {
  2445. assert (("libev: active index mismatch in heap", ev_active (ANHE_w (heap [i])) == i));
  2446. assert (("libev: heap condition violated", i == HEAP0 || ANHE_at (heap [HPARENT (i)]) <= ANHE_at (heap [i])));
  2447. assert (("libev: heap at cache mismatch", ANHE_at (heap [i]) == ev_at (ANHE_w (heap [i]))));
  2448. verify_watcher (EV_A_ (W)ANHE_w (heap [i]));
  2449. }
  2450. }
  2451. static void noinline ecb_cold
  2452. array_verify (EV_P_ W *ws, int cnt)
  2453. {
  2454. while (cnt--)
  2455. {
  2456. assert (("libev: active index mismatch", ev_active (ws [cnt]) == cnt + 1));
  2457. verify_watcher (EV_A_ ws [cnt]);
  2458. }
  2459. }
  2460. #endif
  2461. #if EV_FEATURE_API
  2462. void ecb_cold
  2463. ev_verify (EV_P) EV_THROW
  2464. {
  2465. #if EV_VERIFY
  2466. int i;
  2467. WL w, w2;
  2468. assert (activecnt >= -1);
  2469. assert (fdchangemax >= fdchangecnt);
  2470. for (i = 0; i < fdchangecnt; ++i)
  2471. assert (("libev: negative fd in fdchanges", fdchanges [i] >= 0));
  2472. assert (anfdmax >= 0);
  2473. for (i = 0; i < anfdmax; ++i)
  2474. {
  2475. int j = 0;
  2476. for (w = w2 = anfds [i].head; w; w = w->next)
  2477. {
  2478. verify_watcher (EV_A_ (W)w);
  2479. if (j++ & 1)
  2480. {
  2481. assert (("libev: io watcher list contains a loop", w != w2));
  2482. w2 = w2->next;
  2483. }
  2484. assert (("libev: inactive fd watcher on anfd list", ev_active (w) == 1));
  2485. assert (("libev: fd mismatch between watcher and anfd", ((ev_io *)w)->fd == i));
  2486. }
  2487. }
  2488. assert (timermax >= timercnt);
  2489. verify_heap (EV_A_ timers, timercnt);
  2490. #if EV_PERIODIC_ENABLE
  2491. assert (periodicmax >= periodiccnt);
  2492. verify_heap (EV_A_ periodics, periodiccnt);
  2493. #endif
  2494. for (i = NUMPRI; i--; )
  2495. {
  2496. assert (pendingmax [i] >= pendingcnt [i]);
  2497. #if EV_IDLE_ENABLE
  2498. assert (idleall >= 0);
  2499. assert (idlemax [i] >= idlecnt [i]);
  2500. array_verify (EV_A_ (W *)idles [i], idlecnt [i]);
  2501. #endif
  2502. }
  2503. #if EV_FORK_ENABLE
  2504. assert (forkmax >= forkcnt);
  2505. array_verify (EV_A_ (W *)forks, forkcnt);
  2506. #endif
  2507. #if EV_CLEANUP_ENABLE
  2508. assert (cleanupmax >= cleanupcnt);
  2509. array_verify (EV_A_ (W *)cleanups, cleanupcnt);
  2510. #endif
  2511. #if EV_ASYNC_ENABLE
  2512. assert (asyncmax >= asynccnt);
  2513. array_verify (EV_A_ (W *)asyncs, asynccnt);
  2514. #endif
  2515. #if EV_PREPARE_ENABLE
  2516. assert (preparemax >= preparecnt);
  2517. array_verify (EV_A_ (W *)prepares, preparecnt);
  2518. #endif
  2519. #if EV_CHECK_ENABLE
  2520. assert (checkmax >= checkcnt);
  2521. array_verify (EV_A_ (W *)checks, checkcnt);
  2522. #endif
  2523. # if 0
  2524. #if EV_CHILD_ENABLE
  2525. for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next)
  2526. for (signum = EV_NSIG; signum--; ) if (signals [signum].pending)
  2527. #endif
  2528. # endif
  2529. #endif
  2530. }
  2531. #endif
  2532. #if EV_MULTIPLICITY
  2533. struct ev_loop * ecb_cold
  2534. #else
  2535. int
  2536. #endif
  2537. ev_default_loop (unsigned int flags) EV_THROW
  2538. {
  2539. if (!ev_default_loop_ptr)
  2540. {
  2541. #if EV_MULTIPLICITY
  2542. EV_P = ev_default_loop_ptr = &default_loop_struct;
  2543. #else
  2544. ev_default_loop_ptr = 1;
  2545. #endif
  2546. loop_init (EV_A_ flags);
  2547. if (ev_backend (EV_A))
  2548. {
  2549. #if EV_CHILD_ENABLE
  2550. ev_signal_init (&childev, childcb, SIGCHLD);
  2551. ev_set_priority (&childev, EV_MAXPRI);
  2552. ev_signal_start (EV_A_ &childev);
  2553. ev_unref (EV_A); /* child watcher should not keep loop alive */
  2554. #endif
  2555. }
  2556. else
  2557. ev_default_loop_ptr = 0;
  2558. }
  2559. return ev_default_loop_ptr;
  2560. }
  2561. void
  2562. ev_loop_fork (EV_P) EV_THROW
  2563. {
  2564. postfork = 1;
  2565. }
  2566. /*****************************************************************************/
  2567. void
  2568. ev_invoke (EV_P_ void *w, int revents)
  2569. {
  2570. EV_CB_INVOKE ((W)w, revents);
  2571. }
  2572. unsigned int
  2573. ev_pending_count (EV_P) EV_THROW
  2574. {
  2575. int pri;
  2576. unsigned int count = 0;
  2577. for (pri = NUMPRI; pri--; )
  2578. count += pendingcnt [pri];
  2579. return count;
  2580. }
  2581. void noinline
  2582. ev_invoke_pending (EV_P)
  2583. {
  2584. pendingpri = NUMPRI;
  2585. while (pendingpri) /* pendingpri possibly gets modified in the inner loop */
  2586. {
  2587. --pendingpri;
  2588. while (pendingcnt [pendingpri])
  2589. {
  2590. ANPENDING *p = pendings [pendingpri] + --pendingcnt [pendingpri];
  2591. p->w->pending = 0;
  2592. EV_CB_INVOKE (p->w, p->events);
  2593. EV_FREQUENT_CHECK;
  2594. }
  2595. }
  2596. }
  2597. #if EV_IDLE_ENABLE
  2598. /* make idle watchers pending. this handles the "call-idle */
  2599. /* only when higher priorities are idle" logic */
  2600. inline_size void
  2601. idle_reify (EV_P)
  2602. {
  2603. if (expect_false (idleall))
  2604. {
  2605. int pri;
  2606. for (pri = NUMPRI; pri--; )
  2607. {
  2608. if (pendingcnt [pri])
  2609. break;
  2610. if (idlecnt [pri])
  2611. {
  2612. queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE);
  2613. break;
  2614. }
  2615. }
  2616. }
  2617. }
  2618. #endif
  2619. /* make timers pending */
  2620. inline_size void
  2621. timers_reify (EV_P)
  2622. {
  2623. EV_FREQUENT_CHECK;
  2624. if (timercnt && ANHE_at (timers [HEAP0]) < mn_now)
  2625. {
  2626. do
  2627. {
  2628. ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]);
  2629. /*assert (("libev: inactive timer on timer heap detected", ev_is_active (w)));*/
  2630. /* first reschedule or stop timer */
  2631. if (w->repeat)
  2632. {
  2633. ev_at (w) += w->repeat;
  2634. if (ev_at (w) < mn_now)
  2635. ev_at (w) = mn_now;
  2636. assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.));
  2637. ANHE_at_cache (timers [HEAP0]);
  2638. downheap (timers, timercnt, HEAP0);
  2639. }
  2640. else
  2641. ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
  2642. EV_FREQUENT_CHECK;
  2643. feed_reverse (EV_A_ (W)w);
  2644. }
  2645. while (timercnt && ANHE_at (timers [HEAP0]) < mn_now);
  2646. feed_reverse_done (EV_A_ EV_TIMER);
  2647. }
  2648. }
  2649. #if EV_PERIODIC_ENABLE
  2650. static void noinline
  2651. periodic_recalc (EV_P_ ev_periodic *w)
  2652. {
  2653. ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL;
  2654. ev_tstamp at = w->offset + interval * ev_floor ((ev_rt_now - w->offset) / interval);
  2655. /* the above almost always errs on the low side */
  2656. while (at <= ev_rt_now)
  2657. {
  2658. ev_tstamp nat = at + w->interval;
  2659. /* when resolution fails us, we use ev_rt_now */
  2660. if (expect_false (nat == at))
  2661. {
  2662. at = ev_rt_now;
  2663. break;
  2664. }
  2665. at = nat;
  2666. }
  2667. ev_at (w) = at;
  2668. }
  2669. /* make periodics pending */
  2670. inline_size void
  2671. periodics_reify (EV_P)
  2672. {
  2673. EV_FREQUENT_CHECK;
  2674. while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now)
  2675. {
  2676. do
  2677. {
  2678. ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]);
  2679. /*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/
  2680. /* first reschedule or stop timer */
  2681. if (w->reschedule_cb)
  2682. {
  2683. ev_at (w) = w->reschedule_cb (w, ev_rt_now);
  2684. assert (("libev: ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now));
  2685. ANHE_at_cache (periodics [HEAP0]);
  2686. downheap (periodics, periodiccnt, HEAP0);
  2687. }
  2688. else if (w->interval)
  2689. {
  2690. periodic_recalc (EV_A_ w);
  2691. ANHE_at_cache (periodics [HEAP0]);
  2692. downheap (periodics, periodiccnt, HEAP0);
  2693. }
  2694. else
  2695. ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
  2696. EV_FREQUENT_CHECK;
  2697. feed_reverse (EV_A_ (W)w);
  2698. }
  2699. while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now);
  2700. feed_reverse_done (EV_A_ EV_PERIODIC);
  2701. }
  2702. }
  2703. /* simply recalculate all periodics */
  2704. /* TODO: maybe ensure that at least one event happens when jumping forward? */
  2705. static void noinline ecb_cold
  2706. periodics_reschedule (EV_P)
  2707. {
  2708. int i;
  2709. /* adjust periodics after time jump */
  2710. for (i = HEAP0; i < periodiccnt + HEAP0; ++i)
  2711. {
  2712. ev_periodic *w = (ev_periodic *)ANHE_w (periodics [i]);
  2713. if (w->reschedule_cb)
  2714. ev_at (w) = w->reschedule_cb (w, ev_rt_now);
  2715. else if (w->interval)
  2716. periodic_recalc (EV_A_ w);
  2717. ANHE_at_cache (periodics [i]);
  2718. }
  2719. reheap (periodics, periodiccnt);
  2720. }
  2721. #endif
  2722. /* adjust all timers by a given offset */
  2723. static void noinline ecb_cold
  2724. timers_reschedule (EV_P_ ev_tstamp adjust)
  2725. {
  2726. int i;
  2727. for (i = 0; i < timercnt; ++i)
  2728. {
  2729. ANHE *he = timers + i + HEAP0;
  2730. ANHE_w (*he)->at += adjust;
  2731. ANHE_at_cache (*he);
  2732. }
  2733. }
  2734. /* fetch new monotonic and realtime times from the kernel */
  2735. /* also detect if there was a timejump, and act accordingly */
  2736. inline_speed void
  2737. time_update (EV_P_ ev_tstamp max_block)
  2738. {
  2739. #if EV_USE_MONOTONIC
  2740. if (expect_true (have_monotonic))
  2741. {
  2742. int i;
  2743. ev_tstamp odiff = rtmn_diff;
  2744. mn_now = get_clock ();
  2745. /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
  2746. /* interpolate in the meantime */
  2747. if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
  2748. {
  2749. ev_rt_now = rtmn_diff + mn_now;
  2750. return;
  2751. }
  2752. now_floor = mn_now;
  2753. ev_rt_now = ev_time ();
  2754. /* loop a few times, before making important decisions.
  2755. * on the choice of "4": one iteration isn't enough,
  2756. * in case we get preempted during the calls to
  2757. * ev_time and get_clock. a second call is almost guaranteed
  2758. * to succeed in that case, though. and looping a few more times
  2759. * doesn't hurt either as we only do this on time-jumps or
  2760. * in the unlikely event of having been preempted here.
  2761. */
  2762. for (i = 4; --i; )
  2763. {
  2764. ev_tstamp diff;
  2765. rtmn_diff = ev_rt_now - mn_now;
  2766. diff = odiff - rtmn_diff;
  2767. if (expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP))
  2768. return; /* all is well */
  2769. ev_rt_now = ev_time ();
  2770. mn_now = get_clock ();
  2771. now_floor = mn_now;
  2772. }
  2773. /* no timer adjustment, as the monotonic clock doesn't jump */
  2774. /* timers_reschedule (EV_A_ rtmn_diff - odiff) */
  2775. # if EV_PERIODIC_ENABLE
  2776. periodics_reschedule (EV_A);
  2777. # endif
  2778. }
  2779. else
  2780. #endif
  2781. {
  2782. ev_rt_now = ev_time ();
  2783. if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP))
  2784. {
  2785. /* adjust timers. this is easy, as the offset is the same for all of them */
  2786. timers_reschedule (EV_A_ ev_rt_now - mn_now);
  2787. #if EV_PERIODIC_ENABLE
  2788. periodics_reschedule (EV_A);
  2789. #endif
  2790. }
  2791. mn_now = ev_rt_now;
  2792. }
  2793. }
  2794. int
  2795. ev_run (EV_P_ int flags)
  2796. {
  2797. #if EV_FEATURE_API
  2798. ++loop_depth;
  2799. #endif
  2800. assert (("libev: ev_loop recursion during release detected", loop_done != EVBREAK_RECURSE));
  2801. loop_done = EVBREAK_CANCEL;
  2802. EV_INVOKE_PENDING; /* in case we recurse, ensure ordering stays nice and clean */
  2803. do
  2804. {
  2805. #if EV_VERIFY >= 2
  2806. ev_verify (EV_A);
  2807. #endif
  2808. #ifndef _WIN32
  2809. if (expect_false (curpid)) /* penalise the forking check even more */
  2810. if (expect_false (getpid () != curpid))
  2811. {
  2812. curpid = getpid ();
  2813. postfork = 1;
  2814. }
  2815. #endif
  2816. #if EV_FORK_ENABLE
  2817. /* we might have forked, so queue fork handlers */
  2818. if (expect_false (postfork))
  2819. if (forkcnt)
  2820. {
  2821. queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
  2822. EV_INVOKE_PENDING;
  2823. }
  2824. #endif
  2825. #if EV_PREPARE_ENABLE
  2826. /* queue prepare watchers (and execute them) */
  2827. if (expect_false (preparecnt))
  2828. {
  2829. queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
  2830. EV_INVOKE_PENDING;
  2831. }
  2832. #endif
  2833. if (expect_false (loop_done))
  2834. break;
  2835. /* we might have forked, so reify kernel state if necessary */
  2836. if (expect_false (postfork))
  2837. loop_fork (EV_A);
  2838. /* update fd-related kernel structures */
  2839. fd_reify (EV_A);
  2840. /* calculate blocking time */
  2841. {
  2842. ev_tstamp waittime = 0.;
  2843. ev_tstamp sleeptime = 0.;
  2844. /* remember old timestamp for io_blocktime calculation */
  2845. ev_tstamp prev_mn_now = mn_now;
  2846. /* update time to cancel out callback processing overhead */
  2847. time_update (EV_A_ 1e100);
  2848. /* from now on, we want a pipe-wake-up */
  2849. pipe_write_wanted = 1;
  2850. ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
  2851. if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
  2852. {
  2853. waittime = MAX_BLOCKTIME;
  2854. if (timercnt)
  2855. {
  2856. ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now;
  2857. if (waittime > to) waittime = to;
  2858. }
  2859. #if EV_PERIODIC_ENABLE
  2860. if (periodiccnt)
  2861. {
  2862. ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now;
  2863. if (waittime > to) waittime = to;
  2864. }
  2865. #endif
  2866. /* don't let timeouts decrease the waittime below timeout_blocktime */
  2867. if (expect_false (waittime < timeout_blocktime))
  2868. waittime = timeout_blocktime;
  2869. /* at this point, we NEED to wait, so we have to ensure */
  2870. /* to pass a minimum nonzero value to the backend */
  2871. if (expect_false (waittime < backend_mintime))
  2872. waittime = backend_mintime;
  2873. /* extra check because io_blocktime is commonly 0 */
  2874. if (expect_false (io_blocktime))
  2875. {
  2876. sleeptime = io_blocktime - (mn_now - prev_mn_now);
  2877. if (sleeptime > waittime - backend_mintime)
  2878. sleeptime = waittime - backend_mintime;
  2879. if (expect_true (sleeptime > 0.))
  2880. {
  2881. ev_sleep (sleeptime);
  2882. waittime -= sleeptime;
  2883. }
  2884. }
  2885. }
  2886. #if EV_FEATURE_API
  2887. ++loop_count;
  2888. #endif
  2889. assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */
  2890. backend_poll (EV_A_ waittime);
  2891. assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */
  2892. pipe_write_wanted = 0; /* just an optimisation, no fence needed */
  2893. ECB_MEMORY_FENCE_ACQUIRE;
  2894. if (pipe_write_skipped)
  2895. {
  2896. assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w)));
  2897. ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
  2898. }
  2899. /* update ev_rt_now, do magic */
  2900. time_update (EV_A_ waittime + sleeptime);
  2901. }
  2902. /* queue pending timers and reschedule them */
  2903. timers_reify (EV_A); /* relative timers called last */
  2904. #if EV_PERIODIC_ENABLE
  2905. periodics_reify (EV_A); /* absolute timers called first */
  2906. #endif
  2907. #if EV_IDLE_ENABLE
  2908. /* queue idle watchers unless other events are pending */
  2909. idle_reify (EV_A);
  2910. #endif
  2911. #if EV_CHECK_ENABLE
  2912. /* queue check watchers, to be executed first */
  2913. if (expect_false (checkcnt))
  2914. queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
  2915. #endif
  2916. EV_INVOKE_PENDING;
  2917. }
  2918. while (expect_true (
  2919. activecnt
  2920. && !loop_done
  2921. && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT))
  2922. ));
  2923. if (loop_done == EVBREAK_ONE)
  2924. loop_done = EVBREAK_CANCEL;
  2925. #if EV_FEATURE_API
  2926. --loop_depth;
  2927. #endif
  2928. return activecnt;
  2929. }
  2930. void
  2931. ev_break (EV_P_ int how) EV_THROW
  2932. {
  2933. loop_done = how;
  2934. }
  2935. void
  2936. ev_ref (EV_P) EV_THROW
  2937. {
  2938. ++activecnt;
  2939. }
  2940. void
  2941. ev_unref (EV_P) EV_THROW
  2942. {
  2943. --activecnt;
  2944. }
  2945. void
  2946. ev_now_update (EV_P) EV_THROW
  2947. {
  2948. time_update (EV_A_ 1e100);
  2949. }
  2950. void
  2951. ev_suspend (EV_P) EV_THROW
  2952. {
  2953. ev_now_update (EV_A);
  2954. }
  2955. void
  2956. ev_resume (EV_P) EV_THROW
  2957. {
  2958. ev_tstamp mn_prev = mn_now;
  2959. ev_now_update (EV_A);
  2960. timers_reschedule (EV_A_ mn_now - mn_prev);
  2961. #if EV_PERIODIC_ENABLE
  2962. /* TODO: really do this? */
  2963. periodics_reschedule (EV_A);
  2964. #endif
  2965. }
  2966. /*****************************************************************************/
  2967. /* singly-linked list management, used when the expected list length is short */
  2968. inline_size void
  2969. wlist_add (WL *head, WL elem)
  2970. {
  2971. elem->next = *head;
  2972. *head = elem;
  2973. }
  2974. inline_size void
  2975. wlist_del (WL *head, WL elem)
  2976. {
  2977. while (*head)
  2978. {
  2979. if (expect_true (*head == elem))
  2980. {
  2981. *head = elem->next;
  2982. break;
  2983. }
  2984. head = &(*head)->next;
  2985. }
  2986. }
  2987. /* internal, faster, version of ev_clear_pending */
  2988. inline_speed void
  2989. clear_pending (EV_P_ W w)
  2990. {
  2991. if (w->pending)
  2992. {
  2993. pendings [ABSPRI (w)][w->pending - 1].w = (W)&pending_w;
  2994. w->pending = 0;
  2995. }
  2996. }
  2997. int
  2998. ev_clear_pending (EV_P_ void *w) EV_THROW
  2999. {
  3000. W w_ = (W)w;
  3001. int pending = w_->pending;
  3002. if (expect_true (pending))
  3003. {
  3004. ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
  3005. p->w = (W)&pending_w;
  3006. w_->pending = 0;
  3007. return p->events;
  3008. }
  3009. else
  3010. return 0;
  3011. }
  3012. inline_size void
  3013. pri_adjust (EV_P_ W w)
  3014. {
  3015. int pri = ev_priority (w);
  3016. pri = pri < EV_MINPRI ? EV_MINPRI : pri;
  3017. pri = pri > EV_MAXPRI ? EV_MAXPRI : pri;
  3018. ev_set_priority (w, pri);
  3019. }
  3020. inline_speed void
  3021. ev_start (EV_P_ W w, int active)
  3022. {
  3023. pri_adjust (EV_A_ w);
  3024. w->active = active;
  3025. ev_ref (EV_A);
  3026. }
  3027. inline_size void
  3028. ev_stop (EV_P_ W w)
  3029. {
  3030. ev_unref (EV_A);
  3031. w->active = 0;
  3032. }
  3033. /*****************************************************************************/
  3034. void noinline
  3035. ev_io_start (EV_P_ ev_io *w) EV_THROW
  3036. {
  3037. int fd = w->fd;
  3038. if (expect_false (ev_is_active (w)))
  3039. return;
  3040. assert (("libev: ev_io_start called with negative fd", fd >= 0));
  3041. assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE))));
  3042. EV_FREQUENT_CHECK;
  3043. ev_start (EV_A_ (W)w, 1);
  3044. array_needsize (ANFD, anfds, anfdmax, fd + 1, array_init_zero);
  3045. wlist_add (&anfds[fd].head, (WL)w);
  3046. /* common bug, apparently */
  3047. assert (("libev: ev_io_start called with corrupted watcher", ((WL)w)->next != (WL)w));
  3048. fd_change (EV_A_ fd, w->events & EV__IOFDSET | EV_ANFD_REIFY);
  3049. w->events &= ~EV__IOFDSET;
  3050. EV_FREQUENT_CHECK;
  3051. }
  3052. void noinline
  3053. ev_io_stop (EV_P_ ev_io *w) EV_THROW
  3054. {
  3055. clear_pending (EV_A_ (W)w);
  3056. if (expect_false (!ev_is_active (w)))
  3057. return;
  3058. assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
  3059. EV_FREQUENT_CHECK;
  3060. wlist_del (&anfds[w->fd].head, (WL)w);
  3061. ev_stop (EV_A_ (W)w);
  3062. fd_change (EV_A_ w->fd, EV_ANFD_REIFY);
  3063. EV_FREQUENT_CHECK;
  3064. }
  3065. void noinline
  3066. ev_timer_start (EV_P_ ev_timer *w) EV_THROW
  3067. {
  3068. if (expect_false (ev_is_active (w)))
  3069. return;
  3070. ev_at (w) += mn_now;
  3071. assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
  3072. EV_FREQUENT_CHECK;
  3073. ++timercnt;
  3074. ev_start (EV_A_ (W)w, timercnt + HEAP0 - 1);
  3075. array_needsize (ANHE, timers, timermax, ev_active (w) + 1, EMPTY2);
  3076. ANHE_w (timers [ev_active (w)]) = (WT)w;
  3077. ANHE_at_cache (timers [ev_active (w)]);
  3078. upheap (timers, ev_active (w));
  3079. EV_FREQUENT_CHECK;
  3080. /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
  3081. }
  3082. void noinline
  3083. ev_timer_stop (EV_P_ ev_timer *w) EV_THROW
  3084. {
  3085. clear_pending (EV_A_ (W)w);
  3086. if (expect_false (!ev_is_active (w)))
  3087. return;
  3088. EV_FREQUENT_CHECK;
  3089. {
  3090. int active = ev_active (w);
  3091. assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w));
  3092. --timercnt;
  3093. if (expect_true (active < timercnt + HEAP0))
  3094. {
  3095. timers [active] = timers [timercnt + HEAP0];
  3096. adjustheap (timers, timercnt, active);
  3097. }
  3098. }
  3099. ev_at (w) -= mn_now;
  3100. ev_stop (EV_A_ (W)w);
  3101. EV_FREQUENT_CHECK;
  3102. }
  3103. void noinline
  3104. ev_timer_again (EV_P_ ev_timer *w) EV_THROW
  3105. {
  3106. EV_FREQUENT_CHECK;
  3107. clear_pending (EV_A_ (W)w);
  3108. if (ev_is_active (w))
  3109. {
  3110. if (w->repeat)
  3111. {
  3112. ev_at (w) = mn_now + w->repeat;
  3113. ANHE_at_cache (timers [ev_active (w)]);
  3114. adjustheap (timers, timercnt, ev_active (w));
  3115. }
  3116. else
  3117. ev_timer_stop (EV_A_ w);
  3118. }
  3119. else if (w->repeat)
  3120. {
  3121. ev_at (w) = w->repeat;
  3122. ev_timer_start (EV_A_ w);
  3123. }
  3124. EV_FREQUENT_CHECK;
  3125. }
  3126. ev_tstamp
  3127. ev_timer_remaining (EV_P_ ev_timer *w) EV_THROW
  3128. {
  3129. return ev_at (w) - (ev_is_active (w) ? mn_now : 0.);
  3130. }
  3131. #if EV_PERIODIC_ENABLE
  3132. void noinline
  3133. ev_periodic_start (EV_P_ ev_periodic *w) EV_THROW
  3134. {
  3135. if (expect_false (ev_is_active (w)))
  3136. return;
  3137. if (w->reschedule_cb)
  3138. ev_at (w) = w->reschedule_cb (w, ev_rt_now);
  3139. else if (w->interval)
  3140. {
  3141. assert (("libev: ev_periodic_start called with negative interval value", w->interval >= 0.));
  3142. periodic_recalc (EV_A_ w);
  3143. }
  3144. else
  3145. ev_at (w) = w->offset;
  3146. EV_FREQUENT_CHECK;
  3147. ++periodiccnt;
  3148. ev_start (EV_A_ (W)w, periodiccnt + HEAP0 - 1);
  3149. array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, EMPTY2);
  3150. ANHE_w (periodics [ev_active (w)]) = (WT)w;
  3151. ANHE_at_cache (periodics [ev_active (w)]);
  3152. upheap (periodics, ev_active (w));
  3153. EV_FREQUENT_CHECK;
  3154. /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
  3155. }
  3156. void noinline
  3157. ev_periodic_stop (EV_P_ ev_periodic *w) EV_THROW
  3158. {
  3159. clear_pending (EV_A_ (W)w);
  3160. if (expect_false (!ev_is_active (w)))
  3161. return;
  3162. EV_FREQUENT_CHECK;
  3163. {
  3164. int active = ev_active (w);
  3165. assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w));
  3166. --periodiccnt;
  3167. if (expect_true (active < periodiccnt + HEAP0))
  3168. {
  3169. periodics [active] = periodics [periodiccnt + HEAP0];
  3170. adjustheap (periodics, periodiccnt, active);
  3171. }
  3172. }
  3173. ev_stop (EV_A_ (W)w);
  3174. EV_FREQUENT_CHECK;
  3175. }
  3176. void noinline
  3177. ev_periodic_again (EV_P_ ev_periodic *w) EV_THROW
  3178. {
  3179. /* TODO: use adjustheap and recalculation */
  3180. ev_periodic_stop (EV_A_ w);
  3181. ev_periodic_start (EV_A_ w);
  3182. }
  3183. #endif
  3184. #ifndef SA_RESTART
  3185. # define SA_RESTART 0
  3186. #endif
  3187. #if EV_SIGNAL_ENABLE
  3188. void noinline
  3189. ev_signal_start (EV_P_ ev_signal *w) EV_THROW
  3190. {
  3191. if (expect_false (ev_is_active (w)))
  3192. return;
  3193. assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG));
  3194. #if EV_MULTIPLICITY
  3195. assert (("libev: a signal must not be attached to two different loops",
  3196. !signals [w->signum - 1].loop || signals [w->signum - 1].loop == loop));
  3197. signals [w->signum - 1].loop = EV_A;
  3198. ECB_MEMORY_FENCE_RELEASE;
  3199. #endif
  3200. EV_FREQUENT_CHECK;
  3201. #if EV_USE_SIGNALFD
  3202. if (sigfd == -2)
  3203. {
  3204. sigfd = signalfd (-1, &sigfd_set, SFD_NONBLOCK | SFD_CLOEXEC);
  3205. if (sigfd < 0 && errno == EINVAL)
  3206. sigfd = signalfd (-1, &sigfd_set, 0); /* retry without flags */
  3207. if (sigfd >= 0)
  3208. {
  3209. fd_intern (sigfd); /* doing it twice will not hurt */
  3210. sigemptyset (&sigfd_set);
  3211. ev_io_init (&sigfd_w, sigfdcb, sigfd, EV_READ);
  3212. ev_set_priority (&sigfd_w, EV_MAXPRI);
  3213. ev_io_start (EV_A_ &sigfd_w);
  3214. ev_unref (EV_A); /* signalfd watcher should not keep loop alive */
  3215. }
  3216. }
  3217. if (sigfd >= 0)
  3218. {
  3219. /* TODO: check .head */
  3220. sigaddset (&sigfd_set, w->signum);
  3221. sigprocmask (SIG_BLOCK, &sigfd_set, 0);
  3222. signalfd (sigfd, &sigfd_set, 0);
  3223. }
  3224. #endif
  3225. ev_start (EV_A_ (W)w, 1);
  3226. wlist_add (&signals [w->signum - 1].head, (WL)w);
  3227. if (!((WL)w)->next)
  3228. # if EV_USE_SIGNALFD
  3229. if (sigfd < 0) /*TODO*/
  3230. # endif
  3231. {
  3232. # ifdef _WIN32
  3233. evpipe_init (EV_A);
  3234. signal (w->signum, ev_sighandler);
  3235. # else
  3236. struct sigaction sa;
  3237. evpipe_init (EV_A);
  3238. sa.sa_handler = ev_sighandler;
  3239. sigfillset (&sa.sa_mask);
  3240. sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */
  3241. sigaction (w->signum, &sa, 0);
  3242. if (origflags & EVFLAG_NOSIGMASK)
  3243. {
  3244. sigemptyset (&sa.sa_mask);
  3245. sigaddset (&sa.sa_mask, w->signum);
  3246. sigprocmask (SIG_UNBLOCK, &sa.sa_mask, 0);
  3247. }
  3248. #endif
  3249. }
  3250. EV_FREQUENT_CHECK;
  3251. }
  3252. void noinline
  3253. ev_signal_stop (EV_P_ ev_signal *w) EV_THROW
  3254. {
  3255. clear_pending (EV_A_ (W)w);
  3256. if (expect_false (!ev_is_active (w)))
  3257. return;
  3258. EV_FREQUENT_CHECK;
  3259. wlist_del (&signals [w->signum - 1].head, (WL)w);
  3260. ev_stop (EV_A_ (W)w);
  3261. if (!signals [w->signum - 1].head)
  3262. {
  3263. #if EV_MULTIPLICITY
  3264. signals [w->signum - 1].loop = 0; /* unattach from signal */
  3265. #endif
  3266. #if EV_USE_SIGNALFD
  3267. if (sigfd >= 0)
  3268. {
  3269. sigset_t ss;
  3270. sigemptyset (&ss);
  3271. sigaddset (&ss, w->signum);
  3272. sigdelset (&sigfd_set, w->signum);
  3273. signalfd (sigfd, &sigfd_set, 0);
  3274. sigprocmask (SIG_UNBLOCK, &ss, 0);
  3275. }
  3276. else
  3277. #endif
  3278. signal (w->signum, SIG_DFL);
  3279. }
  3280. EV_FREQUENT_CHECK;
  3281. }
  3282. #endif
  3283. #if EV_CHILD_ENABLE
  3284. void
  3285. ev_child_start (EV_P_ ev_child *w) EV_THROW
  3286. {
  3287. #if EV_MULTIPLICITY
  3288. assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
  3289. #endif
  3290. if (expect_false (ev_is_active (w)))
  3291. return;
  3292. EV_FREQUENT_CHECK;
  3293. ev_start (EV_A_ (W)w, 1);
  3294. wlist_add (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w);
  3295. EV_FREQUENT_CHECK;
  3296. }
  3297. void
  3298. ev_child_stop (EV_P_ ev_child *w) EV_THROW
  3299. {
  3300. clear_pending (EV_A_ (W)w);
  3301. if (expect_false (!ev_is_active (w)))
  3302. return;
  3303. EV_FREQUENT_CHECK;
  3304. wlist_del (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w);
  3305. ev_stop (EV_A_ (W)w);
  3306. EV_FREQUENT_CHECK;
  3307. }
  3308. #endif
  3309. #if EV_STAT_ENABLE
  3310. # ifdef _WIN32
  3311. # undef lstat
  3312. # define lstat(a,b) _stati64 (a,b)
  3313. # endif
  3314. #define DEF_STAT_INTERVAL 5.0074891
  3315. #define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */
  3316. #define MIN_STAT_INTERVAL 0.1074891
  3317. static void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents);
  3318. #if EV_USE_INOTIFY
  3319. /* the * 2 is to allow for alignment padding, which for some reason is >> 8 */
  3320. # define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
  3321. static void noinline
  3322. infy_add (EV_P_ ev_stat *w)
  3323. {
  3324. w->wd = inotify_add_watch (fs_fd, w->path,
  3325. IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY
  3326. | IN_CREATE | IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO
  3327. | IN_DONT_FOLLOW | IN_MASK_ADD);
  3328. if (w->wd >= 0)
  3329. {
  3330. struct statfs sfs;
  3331. /* now local changes will be tracked by inotify, but remote changes won't */
  3332. /* unless the filesystem is known to be local, we therefore still poll */
  3333. /* also do poll on <2.6.25, but with normal frequency */
  3334. if (!fs_2625)
  3335. w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
  3336. else if (!statfs (w->path, &sfs)
  3337. && (sfs.f_type == 0x1373 /* devfs */
  3338. || sfs.f_type == 0x4006 /* fat */
  3339. || sfs.f_type == 0x4d44 /* msdos */
  3340. || sfs.f_type == 0xEF53 /* ext2/3 */
  3341. || sfs.f_type == 0x72b6 /* jffs2 */
  3342. || sfs.f_type == 0x858458f6 /* ramfs */
  3343. || sfs.f_type == 0x5346544e /* ntfs */
  3344. || sfs.f_type == 0x3153464a /* jfs */
  3345. || sfs.f_type == 0x9123683e /* btrfs */
  3346. || sfs.f_type == 0x52654973 /* reiser3 */
  3347. || sfs.f_type == 0x01021994 /* tmpfs */
  3348. || sfs.f_type == 0x58465342 /* xfs */))
  3349. w->timer.repeat = 0.; /* filesystem is local, kernel new enough */
  3350. else
  3351. w->timer.repeat = w->interval ? w->interval : NFS_STAT_INTERVAL; /* remote, use reduced frequency */
  3352. }
  3353. else
  3354. {
  3355. /* can't use inotify, continue to stat */
  3356. w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
  3357. /* if path is not there, monitor some parent directory for speedup hints */
  3358. /* note that exceeding the hardcoded path limit is not a correctness issue, */
  3359. /* but an efficiency issue only */
  3360. if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096)
  3361. {
  3362. char path [4096];
  3363. strcpy (path, w->path);
  3364. do
  3365. {
  3366. int mask = IN_MASK_ADD | IN_DELETE_SELF | IN_MOVE_SELF
  3367. | (errno == EACCES ? IN_ATTRIB : IN_CREATE | IN_MOVED_TO);
  3368. char *pend = strrchr (path, '/');
  3369. if (!pend || pend == path)
  3370. break;
  3371. *pend = 0;
  3372. w->wd = inotify_add_watch (fs_fd, path, mask);
  3373. }
  3374. while (w->wd < 0 && (errno == ENOENT || errno == EACCES));
  3375. }
  3376. }
  3377. if (w->wd >= 0)
  3378. wlist_add (&fs_hash [w->wd & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w);
  3379. /* now re-arm timer, if required */
  3380. if (ev_is_active (&w->timer)) ev_ref (EV_A);
  3381. ev_timer_again (EV_A_ &w->timer);
  3382. if (ev_is_active (&w->timer)) ev_unref (EV_A);
  3383. }
  3384. static void noinline
  3385. infy_del (EV_P_ ev_stat *w)
  3386. {
  3387. int slot;
  3388. int wd = w->wd;
  3389. if (wd < 0)
  3390. return;
  3391. w->wd = -2;
  3392. slot = wd & ((EV_INOTIFY_HASHSIZE) - 1);
  3393. wlist_del (&fs_hash [slot].head, (WL)w);
  3394. /* remove this watcher, if others are watching it, they will rearm */
  3395. inotify_rm_watch (fs_fd, wd);
  3396. }
  3397. static void noinline
  3398. infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
  3399. {
  3400. if (slot < 0)
  3401. /* overflow, need to check for all hash slots */
  3402. for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot)
  3403. infy_wd (EV_A_ slot, wd, ev);
  3404. else
  3405. {
  3406. WL w_;
  3407. for (w_ = fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head; w_; )
  3408. {
  3409. ev_stat *w = (ev_stat *)w_;
  3410. w_ = w_->next; /* lets us remove this watcher and all before it */
  3411. if (w->wd == wd || wd == -1)
  3412. {
  3413. if (ev->mask & (IN_IGNORED | IN_UNMOUNT | IN_DELETE_SELF))
  3414. {
  3415. wlist_del (&fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w);
  3416. w->wd = -1;
  3417. infy_add (EV_A_ w); /* re-add, no matter what */
  3418. }
  3419. stat_timer_cb (EV_A_ &w->timer, 0);
  3420. }
  3421. }
  3422. }
  3423. }
  3424. static void
  3425. infy_cb (EV_P_ ev_io *w, int revents)
  3426. {
  3427. char buf [EV_INOTIFY_BUFSIZE];
  3428. int ofs;
  3429. int len = read (fs_fd, buf, sizeof (buf));
  3430. for (ofs = 0; ofs < len; )
  3431. {
  3432. struct inotify_event *ev = (struct inotify_event *)(buf + ofs);
  3433. infy_wd (EV_A_ ev->wd, ev->wd, ev);
  3434. ofs += sizeof (struct inotify_event) + ev->len;
  3435. }
  3436. }
  3437. inline_size void ecb_cold
  3438. ev_check_2625 (EV_P)
  3439. {
  3440. /* kernels < 2.6.25 are borked
  3441. * http://www.ussg.indiana.edu/hypermail/linux/kernel/0711.3/1208.html
  3442. */
  3443. if (ev_linux_version () < 0x020619)
  3444. return;
  3445. fs_2625 = 1;
  3446. }
  3447. inline_size int
  3448. infy_newfd (void)
  3449. {
  3450. #if defined IN_CLOEXEC && defined IN_NONBLOCK
  3451. int fd = inotify_init1 (IN_CLOEXEC | IN_NONBLOCK);
  3452. if (fd >= 0)
  3453. return fd;
  3454. #endif
  3455. return inotify_init ();
  3456. }
  3457. inline_size void
  3458. infy_init (EV_P)
  3459. {
  3460. if (fs_fd != -2)
  3461. return;
  3462. fs_fd = -1;
  3463. ev_check_2625 (EV_A);
  3464. fs_fd = infy_newfd ();
  3465. if (fs_fd >= 0)
  3466. {
  3467. fd_intern (fs_fd);
  3468. ev_io_init (&fs_w, infy_cb, fs_fd, EV_READ);
  3469. ev_set_priority (&fs_w, EV_MAXPRI);
  3470. ev_io_start (EV_A_ &fs_w);
  3471. ev_unref (EV_A);
  3472. }
  3473. }
  3474. inline_size void
  3475. infy_fork (EV_P)
  3476. {
  3477. int slot;
  3478. if (fs_fd < 0)
  3479. return;
  3480. ev_ref (EV_A);
  3481. ev_io_stop (EV_A_ &fs_w);
  3482. close (fs_fd);
  3483. fs_fd = infy_newfd ();
  3484. if (fs_fd >= 0)
  3485. {
  3486. fd_intern (fs_fd);
  3487. ev_io_set (&fs_w, fs_fd, EV_READ);
  3488. ev_io_start (EV_A_ &fs_w);
  3489. ev_unref (EV_A);
  3490. }
  3491. for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot)
  3492. {
  3493. WL w_ = fs_hash [slot].head;
  3494. fs_hash [slot].head = 0;
  3495. while (w_)
  3496. {
  3497. ev_stat *w = (ev_stat *)w_;
  3498. w_ = w_->next; /* lets us add this watcher */
  3499. w->wd = -1;
  3500. if (fs_fd >= 0)
  3501. infy_add (EV_A_ w); /* re-add, no matter what */
  3502. else
  3503. {
  3504. w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
  3505. if (ev_is_active (&w->timer)) ev_ref (EV_A);
  3506. ev_timer_again (EV_A_ &w->timer);
  3507. if (ev_is_active (&w->timer)) ev_unref (EV_A);
  3508. }
  3509. }
  3510. }
  3511. }
  3512. #endif
  3513. #ifdef _WIN32
  3514. # define EV_LSTAT(p,b) _stati64 (p, b)
  3515. #else
  3516. # define EV_LSTAT(p,b) lstat (p, b)
  3517. #endif
  3518. void
  3519. ev_stat_stat (EV_P_ ev_stat *w) EV_THROW
  3520. {
  3521. if (lstat (w->path, &w->attr) < 0)
  3522. w->attr.st_nlink = 0;
  3523. else if (!w->attr.st_nlink)
  3524. w->attr.st_nlink = 1;
  3525. }
  3526. static void noinline
  3527. stat_timer_cb (EV_P_ ev_timer *w_, int revents)
  3528. {
  3529. ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer));
  3530. ev_statdata prev = w->attr;
  3531. ev_stat_stat (EV_A_ w);
  3532. /* memcmp doesn't work on netbsd, they.... do stuff to their struct stat */
  3533. if (
  3534. prev.st_dev != w->attr.st_dev
  3535. || prev.st_ino != w->attr.st_ino
  3536. || prev.st_mode != w->attr.st_mode
  3537. || prev.st_nlink != w->attr.st_nlink
  3538. || prev.st_uid != w->attr.st_uid
  3539. || prev.st_gid != w->attr.st_gid
  3540. || prev.st_rdev != w->attr.st_rdev
  3541. || prev.st_size != w->attr.st_size
  3542. || prev.st_atime != w->attr.st_atime
  3543. || prev.st_mtime != w->attr.st_mtime
  3544. || prev.st_ctime != w->attr.st_ctime
  3545. ) {
  3546. /* we only update w->prev on actual differences */
  3547. /* in case we test more often than invoke the callback, */
  3548. /* to ensure that prev is always different to attr */
  3549. w->prev = prev;
  3550. #if EV_USE_INOTIFY
  3551. if (fs_fd >= 0)
  3552. {
  3553. infy_del (EV_A_ w);
  3554. infy_add (EV_A_ w);
  3555. ev_stat_stat (EV_A_ w); /* avoid race... */
  3556. }
  3557. #endif
  3558. ev_feed_event (EV_A_ w, EV_STAT);
  3559. }
  3560. }
  3561. void
  3562. ev_stat_start (EV_P_ ev_stat *w) EV_THROW
  3563. {
  3564. if (expect_false (ev_is_active (w)))
  3565. return;
  3566. ev_stat_stat (EV_A_ w);
  3567. if (w->interval < MIN_STAT_INTERVAL && w->interval)
  3568. w->interval = MIN_STAT_INTERVAL;
  3569. ev_timer_init (&w->timer, stat_timer_cb, 0., w->interval ? w->interval : DEF_STAT_INTERVAL);
  3570. ev_set_priority (&w->timer, ev_priority (w));
  3571. #if EV_USE_INOTIFY
  3572. infy_init (EV_A);
  3573. if (fs_fd >= 0)
  3574. infy_add (EV_A_ w);
  3575. else
  3576. #endif
  3577. {
  3578. ev_timer_again (EV_A_ &w->timer);
  3579. ev_unref (EV_A);
  3580. }
  3581. ev_start (EV_A_ (W)w, 1);
  3582. EV_FREQUENT_CHECK;
  3583. }
  3584. void
  3585. ev_stat_stop (EV_P_ ev_stat *w) EV_THROW
  3586. {
  3587. clear_pending (EV_A_ (W)w);
  3588. if (expect_false (!ev_is_active (w)))
  3589. return;
  3590. EV_FREQUENT_CHECK;
  3591. #if EV_USE_INOTIFY
  3592. infy_del (EV_A_ w);
  3593. #endif
  3594. if (ev_is_active (&w->timer))
  3595. {
  3596. ev_ref (EV_A);
  3597. ev_timer_stop (EV_A_ &w->timer);
  3598. }
  3599. ev_stop (EV_A_ (W)w);
  3600. EV_FREQUENT_CHECK;
  3601. }
  3602. #endif
  3603. #if EV_IDLE_ENABLE
  3604. void
  3605. ev_idle_start (EV_P_ ev_idle *w) EV_THROW
  3606. {
  3607. if (expect_false (ev_is_active (w)))
  3608. return;
  3609. pri_adjust (EV_A_ (W)w);
  3610. EV_FREQUENT_CHECK;
  3611. {
  3612. int active = ++idlecnt [ABSPRI (w)];
  3613. ++idleall;
  3614. ev_start (EV_A_ (W)w, active);
  3615. array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, EMPTY2);
  3616. idles [ABSPRI (w)][active - 1] = w;
  3617. }
  3618. EV_FREQUENT_CHECK;
  3619. }
  3620. void
  3621. ev_idle_stop (EV_P_ ev_idle *w) EV_THROW
  3622. {
  3623. clear_pending (EV_A_ (W)w);
  3624. if (expect_false (!ev_is_active (w)))
  3625. return;
  3626. EV_FREQUENT_CHECK;
  3627. {
  3628. int active = ev_active (w);
  3629. idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]];
  3630. ev_active (idles [ABSPRI (w)][active - 1]) = active;
  3631. ev_stop (EV_A_ (W)w);
  3632. --idleall;
  3633. }
  3634. EV_FREQUENT_CHECK;
  3635. }
  3636. #endif
  3637. #if EV_PREPARE_ENABLE
  3638. void
  3639. ev_prepare_start (EV_P_ ev_prepare *w) EV_THROW
  3640. {
  3641. if (expect_false (ev_is_active (w)))
  3642. return;
  3643. EV_FREQUENT_CHECK;
  3644. ev_start (EV_A_ (W)w, ++preparecnt);
  3645. array_needsize (ev_prepare *, prepares, preparemax, preparecnt, EMPTY2);
  3646. prepares [preparecnt - 1] = w;
  3647. EV_FREQUENT_CHECK;
  3648. }
  3649. void
  3650. ev_prepare_stop (EV_P_ ev_prepare *w) EV_THROW
  3651. {
  3652. clear_pending (EV_A_ (W)w);
  3653. if (expect_false (!ev_is_active (w)))
  3654. return;
  3655. EV_FREQUENT_CHECK;
  3656. {
  3657. int active = ev_active (w);
  3658. prepares [active - 1] = prepares [--preparecnt];
  3659. ev_active (prepares [active - 1]) = active;
  3660. }
  3661. ev_stop (EV_A_ (W)w);
  3662. EV_FREQUENT_CHECK;
  3663. }
  3664. #endif
  3665. #if EV_CHECK_ENABLE
  3666. void
  3667. ev_check_start (EV_P_ ev_check *w) EV_THROW
  3668. {
  3669. if (expect_false (ev_is_active (w)))
  3670. return;
  3671. EV_FREQUENT_CHECK;
  3672. ev_start (EV_A_ (W)w, ++checkcnt);
  3673. array_needsize (ev_check *, checks, checkmax, checkcnt, EMPTY2);
  3674. checks [checkcnt - 1] = w;
  3675. EV_FREQUENT_CHECK;
  3676. }
  3677. void
  3678. ev_check_stop (EV_P_ ev_check *w) EV_THROW
  3679. {
  3680. clear_pending (EV_A_ (W)w);
  3681. if (expect_false (!ev_is_active (w)))
  3682. return;
  3683. EV_FREQUENT_CHECK;
  3684. {
  3685. int active = ev_active (w);
  3686. checks [active - 1] = checks [--checkcnt];
  3687. ev_active (checks [active - 1]) = active;
  3688. }
  3689. ev_stop (EV_A_ (W)w);
  3690. EV_FREQUENT_CHECK;
  3691. }
  3692. #endif
  3693. #if EV_EMBED_ENABLE
  3694. void noinline
  3695. ev_embed_sweep (EV_P_ ev_embed *w) EV_THROW
  3696. {
  3697. ev_run (w->other, EVRUN_NOWAIT);
  3698. }
  3699. static void
  3700. embed_io_cb (EV_P_ ev_io *io, int revents)
  3701. {
  3702. ev_embed *w = (ev_embed *)(((char *)io) - offsetof (ev_embed, io));
  3703. if (ev_cb (w))
  3704. ev_feed_event (EV_A_ (W)w, EV_EMBED);
  3705. else
  3706. ev_run (w->other, EVRUN_NOWAIT);
  3707. }
  3708. static void
  3709. embed_prepare_cb (EV_P_ ev_prepare *prepare, int revents)
  3710. {
  3711. ev_embed *w = (ev_embed *)(((char *)prepare) - offsetof (ev_embed, prepare));
  3712. {
  3713. EV_P = w->other;
  3714. while (fdchangecnt)
  3715. {
  3716. fd_reify (EV_A);
  3717. ev_run (EV_A_ EVRUN_NOWAIT);
  3718. }
  3719. }
  3720. }
  3721. static void
  3722. embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
  3723. {
  3724. ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork));
  3725. ev_embed_stop (EV_A_ w);
  3726. {
  3727. EV_P = w->other;
  3728. ev_loop_fork (EV_A);
  3729. ev_run (EV_A_ EVRUN_NOWAIT);
  3730. }
  3731. ev_embed_start (EV_A_ w);
  3732. }
  3733. #if 0
  3734. static void
  3735. embed_idle_cb (EV_P_ ev_idle *idle, int revents)
  3736. {
  3737. ev_idle_stop (EV_A_ idle);
  3738. }
  3739. #endif
  3740. void
  3741. ev_embed_start (EV_P_ ev_embed *w) EV_THROW
  3742. {
  3743. if (expect_false (ev_is_active (w)))
  3744. return;
  3745. {
  3746. EV_P = w->other;
  3747. assert (("libev: loop to be embedded is not embeddable", backend & ev_embeddable_backends ()));
  3748. ev_io_init (&w->io, embed_io_cb, backend_fd, EV_READ);
  3749. }
  3750. EV_FREQUENT_CHECK;
  3751. ev_set_priority (&w->io, ev_priority (w));
  3752. ev_io_start (EV_A_ &w->io);
  3753. ev_prepare_init (&w->prepare, embed_prepare_cb);
  3754. ev_set_priority (&w->prepare, EV_MINPRI);
  3755. ev_prepare_start (EV_A_ &w->prepare);
  3756. ev_fork_init (&w->fork, embed_fork_cb);
  3757. ev_fork_start (EV_A_ &w->fork);
  3758. /*ev_idle_init (&w->idle, e,bed_idle_cb);*/
  3759. ev_start (EV_A_ (W)w, 1);
  3760. EV_FREQUENT_CHECK;
  3761. }
  3762. void
  3763. ev_embed_stop (EV_P_ ev_embed *w) EV_THROW
  3764. {
  3765. clear_pending (EV_A_ (W)w);
  3766. if (expect_false (!ev_is_active (w)))
  3767. return;
  3768. EV_FREQUENT_CHECK;
  3769. ev_io_stop (EV_A_ &w->io);
  3770. ev_prepare_stop (EV_A_ &w->prepare);
  3771. ev_fork_stop (EV_A_ &w->fork);
  3772. ev_stop (EV_A_ (W)w);
  3773. EV_FREQUENT_CHECK;
  3774. }
  3775. #endif
  3776. #if EV_FORK_ENABLE
  3777. void
  3778. ev_fork_start (EV_P_ ev_fork *w) EV_THROW
  3779. {
  3780. if (expect_false (ev_is_active (w)))
  3781. return;
  3782. EV_FREQUENT_CHECK;
  3783. ev_start (EV_A_ (W)w, ++forkcnt);
  3784. array_needsize (ev_fork *, forks, forkmax, forkcnt, EMPTY2);
  3785. forks [forkcnt - 1] = w;
  3786. EV_FREQUENT_CHECK;
  3787. }
  3788. void
  3789. ev_fork_stop (EV_P_ ev_fork *w) EV_THROW
  3790. {
  3791. clear_pending (EV_A_ (W)w);
  3792. if (expect_false (!ev_is_active (w)))
  3793. return;
  3794. EV_FREQUENT_CHECK;
  3795. {
  3796. int active = ev_active (w);
  3797. forks [active - 1] = forks [--forkcnt];
  3798. ev_active (forks [active - 1]) = active;
  3799. }
  3800. ev_stop (EV_A_ (W)w);
  3801. EV_FREQUENT_CHECK;
  3802. }
  3803. #endif
  3804. #if EV_CLEANUP_ENABLE
  3805. void
  3806. ev_cleanup_start (EV_P_ ev_cleanup *w) EV_THROW
  3807. {
  3808. if (expect_false (ev_is_active (w)))
  3809. return;
  3810. EV_FREQUENT_CHECK;
  3811. ev_start (EV_A_ (W)w, ++cleanupcnt);
  3812. array_needsize (ev_cleanup *, cleanups, cleanupmax, cleanupcnt, EMPTY2);
  3813. cleanups [cleanupcnt - 1] = w;
  3814. /* cleanup watchers should never keep a refcount on the loop */
  3815. ev_unref (EV_A);
  3816. EV_FREQUENT_CHECK;
  3817. }
  3818. void
  3819. ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_THROW
  3820. {
  3821. clear_pending (EV_A_ (W)w);
  3822. if (expect_false (!ev_is_active (w)))
  3823. return;
  3824. EV_FREQUENT_CHECK;
  3825. ev_ref (EV_A);
  3826. {
  3827. int active = ev_active (w);
  3828. cleanups [active - 1] = cleanups [--cleanupcnt];
  3829. ev_active (cleanups [active - 1]) = active;
  3830. }
  3831. ev_stop (EV_A_ (W)w);
  3832. EV_FREQUENT_CHECK;
  3833. }
  3834. #endif
  3835. #if EV_ASYNC_ENABLE
  3836. void
  3837. ev_async_start (EV_P_ ev_async *w) EV_THROW
  3838. {
  3839. if (expect_false (ev_is_active (w)))
  3840. return;
  3841. w->sent = 0;
  3842. evpipe_init (EV_A);
  3843. EV_FREQUENT_CHECK;
  3844. ev_start (EV_A_ (W)w, ++asynccnt);
  3845. array_needsize (ev_async *, asyncs, asyncmax, asynccnt, EMPTY2);
  3846. asyncs [asynccnt - 1] = w;
  3847. EV_FREQUENT_CHECK;
  3848. }
  3849. void
  3850. ev_async_stop (EV_P_ ev_async *w) EV_THROW
  3851. {
  3852. clear_pending (EV_A_ (W)w);
  3853. if (expect_false (!ev_is_active (w)))
  3854. return;
  3855. EV_FREQUENT_CHECK;
  3856. {
  3857. int active = ev_active (w);
  3858. asyncs [active - 1] = asyncs [--asynccnt];
  3859. ev_active (asyncs [active - 1]) = active;
  3860. }
  3861. ev_stop (EV_A_ (W)w);
  3862. EV_FREQUENT_CHECK;
  3863. }
  3864. void
  3865. ev_async_send (EV_P_ ev_async *w) EV_THROW
  3866. {
  3867. w->sent = 1;
  3868. evpipe_write (EV_A_ &async_pending);
  3869. }
  3870. #endif
  3871. /*****************************************************************************/
  3872. struct ev_once
  3873. {
  3874. ev_io io;
  3875. ev_timer to;
  3876. void (*cb)(int revents, void *arg);
  3877. void *arg;
  3878. };
  3879. static void
  3880. once_cb (EV_P_ struct ev_once *once, int revents)
  3881. {
  3882. void (*cb)(int revents, void *arg) = once->cb;
  3883. void *arg = once->arg;
  3884. ev_io_stop (EV_A_ &once->io);
  3885. ev_timer_stop (EV_A_ &once->to);
  3886. ev_free (once);
  3887. cb (revents, arg);
  3888. }
  3889. static void
  3890. once_cb_io (EV_P_ ev_io *w, int revents)
  3891. {
  3892. struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io));
  3893. once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->to));
  3894. }
  3895. static void
  3896. once_cb_to (EV_P_ ev_timer *w, int revents)
  3897. {
  3898. struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to));
  3899. once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->io));
  3900. }
  3901. void
  3902. ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) EV_THROW
  3903. {
  3904. struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once));
  3905. if (expect_false (!once))
  3906. {
  3907. cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMER, arg);
  3908. return;
  3909. }
  3910. once->cb = cb;
  3911. once->arg = arg;
  3912. ev_init (&once->io, once_cb_io);
  3913. if (fd >= 0)
  3914. {
  3915. ev_io_set (&once->io, fd, events);
  3916. ev_io_start (EV_A_ &once->io);
  3917. }
  3918. ev_init (&once->to, once_cb_to);
  3919. if (timeout >= 0.)
  3920. {
  3921. ev_timer_set (&once->to, timeout, 0.);
  3922. ev_timer_start (EV_A_ &once->to);
  3923. }
  3924. }
  3925. /*****************************************************************************/
  3926. #if EV_WALK_ENABLE
  3927. void ecb_cold
  3928. ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_THROW
  3929. {
  3930. int i, j;
  3931. ev_watcher_list *wl, *wn;
  3932. if (types & (EV_IO | EV_EMBED))
  3933. for (i = 0; i < anfdmax; ++i)
  3934. for (wl = anfds [i].head; wl; )
  3935. {
  3936. wn = wl->next;
  3937. #if EV_EMBED_ENABLE
  3938. if (ev_cb ((ev_io *)wl) == embed_io_cb)
  3939. {
  3940. if (types & EV_EMBED)
  3941. cb (EV_A_ EV_EMBED, ((char *)wl) - offsetof (struct ev_embed, io));
  3942. }
  3943. else
  3944. #endif
  3945. #if EV_USE_INOTIFY
  3946. if (ev_cb ((ev_io *)wl) == infy_cb)
  3947. ;
  3948. else
  3949. #endif
  3950. if ((ev_io *)wl != &pipe_w)
  3951. if (types & EV_IO)
  3952. cb (EV_A_ EV_IO, wl);
  3953. wl = wn;
  3954. }
  3955. if (types & (EV_TIMER | EV_STAT))
  3956. for (i = timercnt + HEAP0; i-- > HEAP0; )
  3957. #if EV_STAT_ENABLE
  3958. /*TODO: timer is not always active*/
  3959. if (ev_cb ((ev_timer *)ANHE_w (timers [i])) == stat_timer_cb)
  3960. {
  3961. if (types & EV_STAT)
  3962. cb (EV_A_ EV_STAT, ((char *)ANHE_w (timers [i])) - offsetof (struct ev_stat, timer));
  3963. }
  3964. else
  3965. #endif
  3966. if (types & EV_TIMER)
  3967. cb (EV_A_ EV_TIMER, ANHE_w (timers [i]));
  3968. #if EV_PERIODIC_ENABLE
  3969. if (types & EV_PERIODIC)
  3970. for (i = periodiccnt + HEAP0; i-- > HEAP0; )
  3971. cb (EV_A_ EV_PERIODIC, ANHE_w (periodics [i]));
  3972. #endif
  3973. #if EV_IDLE_ENABLE
  3974. if (types & EV_IDLE)
  3975. for (j = NUMPRI; j--; )
  3976. for (i = idlecnt [j]; i--; )
  3977. cb (EV_A_ EV_IDLE, idles [j][i]);
  3978. #endif
  3979. #if EV_FORK_ENABLE
  3980. if (types & EV_FORK)
  3981. for (i = forkcnt; i--; )
  3982. if (ev_cb (forks [i]) != embed_fork_cb)
  3983. cb (EV_A_ EV_FORK, forks [i]);
  3984. #endif
  3985. #if EV_ASYNC_ENABLE
  3986. if (types & EV_ASYNC)
  3987. for (i = asynccnt; i--; )
  3988. cb (EV_A_ EV_ASYNC, asyncs [i]);
  3989. #endif
  3990. #if EV_PREPARE_ENABLE
  3991. if (types & EV_PREPARE)
  3992. for (i = preparecnt; i--; )
  3993. # if EV_EMBED_ENABLE
  3994. if (ev_cb (prepares [i]) != embed_prepare_cb)
  3995. # endif
  3996. cb (EV_A_ EV_PREPARE, prepares [i]);
  3997. #endif
  3998. #if EV_CHECK_ENABLE
  3999. if (types & EV_CHECK)
  4000. for (i = checkcnt; i--; )
  4001. cb (EV_A_ EV_CHECK, checks [i]);
  4002. #endif
  4003. #if EV_SIGNAL_ENABLE
  4004. if (types & EV_SIGNAL)
  4005. for (i = 0; i < EV_NSIG - 1; ++i)
  4006. for (wl = signals [i].head; wl; )
  4007. {
  4008. wn = wl->next;
  4009. cb (EV_A_ EV_SIGNAL, wl);
  4010. wl = wn;
  4011. }
  4012. #endif
  4013. #if EV_CHILD_ENABLE
  4014. if (types & EV_CHILD)
  4015. for (i = (EV_PID_HASHSIZE); i--; )
  4016. for (wl = childs [i]; wl; )
  4017. {
  4018. wn = wl->next;
  4019. cb (EV_A_ EV_CHILD, wl);
  4020. wl = wn;
  4021. }
  4022. #endif
  4023. /* EV_STAT 0x00001000 /* stat data changed */
  4024. /* EV_EMBED 0x00010000 /* embedded event loop needs sweep */
  4025. }
  4026. #endif
  4027. #if EV_MULTIPLICITY
  4028. #include "ev_wrap.h"
  4029. #endif