You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4964 lines
122 KiB

11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
9 years ago
11 years ago
10 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
10 years ago
11 years ago
9 years ago
11 years ago
10 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
9 years ago
11 years ago
10 years ago
11 years ago
10 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
10 years ago
9 years ago
10 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
10 years ago
9 years ago
10 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
10 years ago
9 years ago
10 years ago
11 years ago
10 years ago
9 years ago
10 years ago
9 years ago
10 years ago
9 years ago
10 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
9 years ago
11 years ago
10 years ago
11 years ago
8 years ago
11 years ago
10 years ago
11 years ago
  1. /*
  2. * libev event processing core, watcher management
  3. *
  4. * Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann <libev@schmorp.de>
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without modifica-
  8. * tion, are permitted provided that the following conditions are met:
  9. *
  10. * 1. Redistributions of source code must retain the above copyright notice,
  11. * this list of conditions and the following disclaimer.
  12. *
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. *
  17. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
  18. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
  19. * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
  20. * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
  21. * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  22. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  23. * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  24. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
  25. * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  26. * OF THE POSSIBILITY OF SUCH DAMAGE.
  27. *
  28. * Alternatively, the contents of this file may be used under the terms of
  29. * the GNU General Public License ("GPL") version 2 or any later version,
  30. * in which case the provisions of the GPL are applicable instead of
  31. * the above. If you wish to allow the use of your version of this file
  32. * only under the terms of the GPL and not to allow others to use your
  33. * version of this file under the BSD license, indicate your decision
  34. * by deleting the provisions above and replace them with the notice
  35. * and other provisions required by the GPL. If you do not delete the
  36. * provisions above, a recipient may use your version of this file under
  37. * either the BSD or the GPL.
  38. */
  39. /* this big block deduces configuration from config.h */
  40. #ifndef EV_STANDALONE
  41. # ifdef EV_CONFIG_H
  42. # include EV_CONFIG_H
  43. # else
  44. # include "config.h"
  45. # endif
  46. # if HAVE_FLOOR
  47. # ifndef EV_USE_FLOOR
  48. # define EV_USE_FLOOR 1
  49. # endif
  50. # endif
  51. # if HAVE_CLOCK_SYSCALL
  52. # ifndef EV_USE_CLOCK_SYSCALL
  53. # define EV_USE_CLOCK_SYSCALL 1
  54. # ifndef EV_USE_REALTIME
  55. # define EV_USE_REALTIME 0
  56. # endif
  57. # ifndef EV_USE_MONOTONIC
  58. # define EV_USE_MONOTONIC 1
  59. # endif
  60. # endif
  61. # elif !defined EV_USE_CLOCK_SYSCALL
  62. # define EV_USE_CLOCK_SYSCALL 0
  63. # endif
  64. # if HAVE_CLOCK_GETTIME
  65. # ifndef EV_USE_MONOTONIC
  66. # define EV_USE_MONOTONIC 1
  67. # endif
  68. # ifndef EV_USE_REALTIME
  69. # define EV_USE_REALTIME 0
  70. # endif
  71. # else
  72. # ifndef EV_USE_MONOTONIC
  73. # define EV_USE_MONOTONIC 0
  74. # endif
  75. # ifndef EV_USE_REALTIME
  76. # define EV_USE_REALTIME 0
  77. # endif
  78. # endif
  79. # if HAVE_NANOSLEEP
  80. # ifndef EV_USE_NANOSLEEP
  81. # define EV_USE_NANOSLEEP EV_FEATURE_OS
  82. # endif
  83. # else
  84. # undef EV_USE_NANOSLEEP
  85. # define EV_USE_NANOSLEEP 0
  86. # endif
  87. # if HAVE_SELECT && HAVE_SYS_SELECT_H
  88. # ifndef EV_USE_SELECT
  89. # define EV_USE_SELECT EV_FEATURE_BACKENDS
  90. # endif
  91. # else
  92. # undef EV_USE_SELECT
  93. # define EV_USE_SELECT 0
  94. # endif
  95. # if HAVE_POLL && HAVE_POLL_H
  96. # ifndef EV_USE_POLL
  97. # define EV_USE_POLL EV_FEATURE_BACKENDS
  98. # endif
  99. # else
  100. # undef EV_USE_POLL
  101. # define EV_USE_POLL 0
  102. # endif
  103. # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
  104. # ifndef EV_USE_EPOLL
  105. # define EV_USE_EPOLL EV_FEATURE_BACKENDS
  106. # endif
  107. # else
  108. # undef EV_USE_EPOLL
  109. # define EV_USE_EPOLL 0
  110. # endif
  111. # if HAVE_KQUEUE && HAVE_SYS_EVENT_H
  112. # ifndef EV_USE_KQUEUE
  113. # define EV_USE_KQUEUE EV_FEATURE_BACKENDS
  114. # endif
  115. # else
  116. # undef EV_USE_KQUEUE
  117. # define EV_USE_KQUEUE 0
  118. # endif
  119. # if HAVE_PORT_H && HAVE_PORT_CREATE
  120. # ifndef EV_USE_PORT
  121. # define EV_USE_PORT EV_FEATURE_BACKENDS
  122. # endif
  123. # else
  124. # undef EV_USE_PORT
  125. # define EV_USE_PORT 0
  126. # endif
  127. # if HAVE_INOTIFY_INIT && HAVE_SYS_INOTIFY_H
  128. # ifndef EV_USE_INOTIFY
  129. # define EV_USE_INOTIFY EV_FEATURE_OS
  130. # endif
  131. # else
  132. # undef EV_USE_INOTIFY
  133. # define EV_USE_INOTIFY 0
  134. # endif
  135. # if HAVE_SIGNALFD && HAVE_SYS_SIGNALFD_H
  136. # ifndef EV_USE_SIGNALFD
  137. # define EV_USE_SIGNALFD EV_FEATURE_OS
  138. # endif
  139. # else
  140. # undef EV_USE_SIGNALFD
  141. # define EV_USE_SIGNALFD 0
  142. # endif
  143. # if HAVE_EVENTFD
  144. # ifndef EV_USE_EVENTFD
  145. # define EV_USE_EVENTFD EV_FEATURE_OS
  146. # endif
  147. # else
  148. # undef EV_USE_EVENTFD
  149. # define EV_USE_EVENTFD 0
  150. # endif
  151. #endif
  152. #include <stdlib.h>
  153. #include <string.h>
  154. #include <fcntl.h>
  155. #include <stddef.h>
  156. #include <stdio.h>
  157. #include <assert.h>
  158. #include <errno.h>
  159. #include <sys/types.h>
  160. #include <time.h>
  161. #include <limits.h>
  162. #include <signal.h>
  163. #ifdef EV_H
  164. # include EV_H
  165. #else
  166. # include "ev.h"
  167. #endif
  168. #if EV_NO_THREADS
  169. # undef EV_NO_SMP
  170. # define EV_NO_SMP 1
  171. # undef ECB_NO_THREADS
  172. # define ECB_NO_THREADS 1
  173. #endif
  174. #if EV_NO_SMP
  175. # undef EV_NO_SMP
  176. # define ECB_NO_SMP 1
  177. #endif
  178. #ifndef _WIN32
  179. # include <sys/time.h>
  180. # include <sys/wait.h>
  181. # include <unistd.h>
  182. #else
  183. # include <io.h>
  184. # define WIN32_LEAN_AND_MEAN
  185. # include <winsock2.h>
  186. # include <windows.h>
  187. # ifndef EV_SELECT_IS_WINSOCKET
  188. # define EV_SELECT_IS_WINSOCKET 1
  189. # endif
  190. # undef EV_AVOID_STDIO
  191. #endif
  192. /* OS X, in its infinite idiocy, actually HARDCODES
  193. * a limit of 1024 into their select. Where people have brains,
  194. * OS X engineers apparently have a vacuum. Or maybe they were
  195. * ordered to have a vacuum, or they do anything for money.
  196. * This might help. Or not.
  197. */
  198. #define _DARWIN_UNLIMITED_SELECT 1
  199. /* this block tries to deduce configuration from header-defined symbols and defaults */
  200. /* try to deduce the maximum number of signals on this platform */
  201. #if defined EV_NSIG
  202. /* use what's provided */
  203. #elif defined NSIG
  204. # define EV_NSIG (NSIG)
  205. #elif defined _NSIG
  206. # define EV_NSIG (_NSIG)
  207. #elif defined SIGMAX
  208. # define EV_NSIG (SIGMAX+1)
  209. #elif defined SIG_MAX
  210. # define EV_NSIG (SIG_MAX+1)
  211. #elif defined _SIG_MAX
  212. # define EV_NSIG (_SIG_MAX+1)
  213. #elif defined MAXSIG
  214. # define EV_NSIG (MAXSIG+1)
  215. #elif defined MAX_SIG
  216. # define EV_NSIG (MAX_SIG+1)
  217. #elif defined SIGARRAYSIZE
  218. # define EV_NSIG (SIGARRAYSIZE) /* Assume ary[SIGARRAYSIZE] */
  219. #elif defined _sys_nsig
  220. # define EV_NSIG (_sys_nsig) /* Solaris 2.5 */
  221. #else
  222. # define EV_NSIG (8 * sizeof (sigset_t) + 1)
  223. #endif
  224. #ifndef EV_USE_FLOOR
  225. # define EV_USE_FLOOR 0
  226. #endif
  227. #ifndef EV_USE_CLOCK_SYSCALL
  228. # if __linux && __GLIBC__ == 2 && __GLIBC_MINOR__ < 17
  229. # define EV_USE_CLOCK_SYSCALL EV_FEATURE_OS
  230. # else
  231. # define EV_USE_CLOCK_SYSCALL 0
  232. # endif
  233. #endif
  234. #if !(_POSIX_TIMERS > 0)
  235. # ifndef EV_USE_MONOTONIC
  236. # define EV_USE_MONOTONIC 0
  237. # endif
  238. # ifndef EV_USE_REALTIME
  239. # define EV_USE_REALTIME 0
  240. # endif
  241. #endif
  242. #ifndef EV_USE_MONOTONIC
  243. # if defined _POSIX_MONOTONIC_CLOCK && _POSIX_MONOTONIC_CLOCK >= 0
  244. # define EV_USE_MONOTONIC EV_FEATURE_OS
  245. # else
  246. # define EV_USE_MONOTONIC 0
  247. # endif
  248. #endif
  249. #ifndef EV_USE_REALTIME
  250. # define EV_USE_REALTIME !EV_USE_CLOCK_SYSCALL
  251. #endif
  252. #ifndef EV_USE_NANOSLEEP
  253. # if _POSIX_C_SOURCE >= 199309L
  254. # define EV_USE_NANOSLEEP EV_FEATURE_OS
  255. # else
  256. # define EV_USE_NANOSLEEP 0
  257. # endif
  258. #endif
  259. #ifndef EV_USE_SELECT
  260. # define EV_USE_SELECT EV_FEATURE_BACKENDS
  261. #endif
  262. #ifndef EV_USE_POLL
  263. # ifdef _WIN32
  264. # define EV_USE_POLL 0
  265. # else
  266. # define EV_USE_POLL EV_FEATURE_BACKENDS
  267. # endif
  268. #endif
  269. #ifndef EV_USE_EPOLL
  270. # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
  271. # define EV_USE_EPOLL EV_FEATURE_BACKENDS
  272. # else
  273. # define EV_USE_EPOLL 0
  274. # endif
  275. #endif
  276. #ifndef EV_USE_KQUEUE
  277. # define EV_USE_KQUEUE 0
  278. #endif
  279. #ifndef EV_USE_PORT
  280. # define EV_USE_PORT 0
  281. #endif
  282. #ifndef EV_USE_INOTIFY
  283. # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
  284. # define EV_USE_INOTIFY EV_FEATURE_OS
  285. # else
  286. # define EV_USE_INOTIFY 0
  287. # endif
  288. #endif
  289. #ifndef EV_PID_HASHSIZE
  290. # define EV_PID_HASHSIZE EV_FEATURE_DATA ? 16 : 1
  291. #endif
  292. #ifndef EV_INOTIFY_HASHSIZE
  293. # define EV_INOTIFY_HASHSIZE EV_FEATURE_DATA ? 16 : 1
  294. #endif
  295. #ifndef EV_USE_EVENTFD
  296. # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
  297. # define EV_USE_EVENTFD EV_FEATURE_OS
  298. # else
  299. # define EV_USE_EVENTFD 0
  300. # endif
  301. #endif
  302. #ifndef EV_USE_SIGNALFD
  303. # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
  304. # define EV_USE_SIGNALFD EV_FEATURE_OS
  305. # else
  306. # define EV_USE_SIGNALFD 0
  307. # endif
  308. #endif
  309. #if 0 /* debugging */
  310. # define EV_VERIFY 3
  311. # define EV_USE_4HEAP 1
  312. # define EV_HEAP_CACHE_AT 1
  313. #endif
  314. #ifndef EV_VERIFY
  315. # define EV_VERIFY (EV_FEATURE_API ? 1 : 0)
  316. #endif
  317. #ifndef EV_USE_4HEAP
  318. # define EV_USE_4HEAP EV_FEATURE_DATA
  319. #endif
  320. #ifndef EV_HEAP_CACHE_AT
  321. # define EV_HEAP_CACHE_AT EV_FEATURE_DATA
  322. #endif
  323. #ifdef ANDROID
  324. /* supposedly, android doesn't typedef fd_mask */
  325. # undef EV_USE_SELECT
  326. # define EV_USE_SELECT 0
  327. /* supposedly, we need to include syscall.h, not sys/syscall.h, so just disable */
  328. # undef EV_USE_CLOCK_SYSCALL
  329. # define EV_USE_CLOCK_SYSCALL 0
  330. #endif
  331. /* aix's poll.h seems to cause lots of trouble */
  332. #ifdef _AIX
  333. /* AIX has a completely broken poll.h header */
  334. # undef EV_USE_POLL
  335. # define EV_USE_POLL 0
  336. #endif
  337. /* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */
  338. /* which makes programs even slower. might work on other unices, too. */
  339. #if EV_USE_CLOCK_SYSCALL
  340. # include <sys/syscall.h>
  341. # ifdef SYS_clock_gettime
  342. # define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
  343. # undef EV_USE_MONOTONIC
  344. # define EV_USE_MONOTONIC 1
  345. # else
  346. # undef EV_USE_CLOCK_SYSCALL
  347. # define EV_USE_CLOCK_SYSCALL 0
  348. # endif
  349. #endif
  350. /* this block fixes any misconfiguration where we know we run into trouble otherwise */
  351. #ifndef CLOCK_MONOTONIC
  352. # undef EV_USE_MONOTONIC
  353. # define EV_USE_MONOTONIC 0
  354. #endif
  355. #ifndef CLOCK_REALTIME
  356. # undef EV_USE_REALTIME
  357. # define EV_USE_REALTIME 0
  358. #endif
  359. #if !EV_STAT_ENABLE
  360. # undef EV_USE_INOTIFY
  361. # define EV_USE_INOTIFY 0
  362. #endif
  363. #if !EV_USE_NANOSLEEP
  364. /* hp-ux has it in sys/time.h, which we unconditionally include above */
  365. # if !defined _WIN32 && !defined __hpux
  366. # include <sys/select.h>
  367. # endif
  368. #endif
  369. #if EV_USE_INOTIFY
  370. # include <sys/statfs.h>
  371. # include <sys/inotify.h>
  372. /* some very old inotify.h headers don't have IN_DONT_FOLLOW */
  373. # ifndef IN_DONT_FOLLOW
  374. # undef EV_USE_INOTIFY
  375. # define EV_USE_INOTIFY 0
  376. # endif
  377. #endif
  378. #if EV_USE_EVENTFD
  379. /* our minimum requirement is glibc 2.7 which has the stub, but not the header */
  380. # include <stdint.h>
  381. # ifndef EFD_NONBLOCK
  382. # define EFD_NONBLOCK O_NONBLOCK
  383. # endif
  384. # ifndef EFD_CLOEXEC
  385. # ifdef O_CLOEXEC
  386. # define EFD_CLOEXEC O_CLOEXEC
  387. # else
  388. # define EFD_CLOEXEC 02000000
  389. # endif
  390. # endif
  391. EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
  392. #endif
  393. #if EV_USE_SIGNALFD
  394. /* our minimum requirement is glibc 2.7 which has the stub, but not the header */
  395. # include <stdint.h>
  396. # ifndef SFD_NONBLOCK
  397. # define SFD_NONBLOCK O_NONBLOCK
  398. # endif
  399. # ifndef SFD_CLOEXEC
  400. # ifdef O_CLOEXEC
  401. # define SFD_CLOEXEC O_CLOEXEC
  402. # else
  403. # define SFD_CLOEXEC 02000000
  404. # endif
  405. # endif
  406. EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags);
  407. struct signalfd_siginfo
  408. {
  409. uint32_t ssi_signo;
  410. char pad[128 - sizeof (uint32_t)];
  411. };
  412. #endif
  413. /**/
  414. #if EV_VERIFY >= 3
  415. # define EV_FREQUENT_CHECK ev_verify (EV_A)
  416. #else
  417. # define EV_FREQUENT_CHECK do { } while (0)
  418. #endif
  419. /*
  420. * This is used to work around floating point rounding problems.
  421. * This value is good at least till the year 4000.
  422. */
  423. #define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
  424. /*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
  425. #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
  426. #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
  427. #define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
  428. #define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
  429. /* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
  430. /* ECB.H BEGIN */
  431. /*
  432. * libecb - http://software.schmorp.de/pkg/libecb
  433. *
  434. * Copyright (©) 2009-2015 Marc Alexander Lehmann <libecb@schmorp.de>
  435. * Copyright (©) 2011 Emanuele Giaquinta
  436. * All rights reserved.
  437. *
  438. * Redistribution and use in source and binary forms, with or without modifica-
  439. * tion, are permitted provided that the following conditions are met:
  440. *
  441. * 1. Redistributions of source code must retain the above copyright notice,
  442. * this list of conditions and the following disclaimer.
  443. *
  444. * 2. Redistributions in binary form must reproduce the above copyright
  445. * notice, this list of conditions and the following disclaimer in the
  446. * documentation and/or other materials provided with the distribution.
  447. *
  448. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
  449. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
  450. * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
  451. * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
  452. * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  453. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  454. * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  455. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
  456. * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  457. * OF THE POSSIBILITY OF SUCH DAMAGE.
  458. *
  459. * Alternatively, the contents of this file may be used under the terms of
  460. * the GNU General Public License ("GPL") version 2 or any later version,
  461. * in which case the provisions of the GPL are applicable instead of
  462. * the above. If you wish to allow the use of your version of this file
  463. * only under the terms of the GPL and not to allow others to use your
  464. * version of this file under the BSD license, indicate your decision
  465. * by deleting the provisions above and replace them with the notice
  466. * and other provisions required by the GPL. If you do not delete the
  467. * provisions above, a recipient may use your version of this file under
  468. * either the BSD or the GPL.
  469. */
  470. #ifndef ECB_H
  471. #define ECB_H
  472. /* 16 bits major, 16 bits minor */
  473. #define ECB_VERSION 0x00010004
  474. #ifdef _WIN32
  475. typedef signed char int8_t;
  476. typedef unsigned char uint8_t;
  477. typedef signed short int16_t;
  478. typedef unsigned short uint16_t;
  479. typedef signed int int32_t;
  480. typedef unsigned int uint32_t;
  481. #if __GNUC__
  482. typedef signed long long int64_t;
  483. typedef unsigned long long uint64_t;
  484. #else /* _MSC_VER || __BORLANDC__ */
  485. typedef signed __int64 int64_t;
  486. typedef unsigned __int64 uint64_t;
  487. #endif
  488. #ifdef _WIN64
  489. #define ECB_PTRSIZE 8
  490. typedef uint64_t uintptr_t;
  491. typedef int64_t intptr_t;
  492. #else
  493. #define ECB_PTRSIZE 4
  494. typedef uint32_t uintptr_t;
  495. typedef int32_t intptr_t;
  496. #endif
  497. #else
  498. #include <inttypes.h>
  499. #if UINTMAX_MAX > 0xffffffffU
  500. #define ECB_PTRSIZE 8
  501. #else
  502. #define ECB_PTRSIZE 4
  503. #endif
  504. #endif
  505. #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__)
  506. #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64)
  507. /* work around x32 idiocy by defining proper macros */
  508. #if ECB_GCC_AMD64 || ECB_MSVC_AMD64
  509. #if _ILP32
  510. #define ECB_AMD64_X32 1
  511. #else
  512. #define ECB_AMD64 1
  513. #endif
  514. #endif
  515. /* many compilers define _GNUC_ to some versions but then only implement
  516. * what their idiot authors think are the "more important" extensions,
  517. * causing enormous grief in return for some better fake benchmark numbers.
  518. * or so.
  519. * we try to detect these and simply assume they are not gcc - if they have
  520. * an issue with that they should have done it right in the first place.
  521. */
  522. #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__
  523. #define ECB_GCC_VERSION(major,minor) 0
  524. #else
  525. #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
  526. #endif
  527. #define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor)))
  528. #if __clang__ && defined __has_builtin
  529. #define ECB_CLANG_BUILTIN(x) __has_builtin (x)
  530. #else
  531. #define ECB_CLANG_BUILTIN(x) 0
  532. #endif
  533. #if __clang__ && defined __has_extension
  534. #define ECB_CLANG_EXTENSION(x) __has_extension (x)
  535. #else
  536. #define ECB_CLANG_EXTENSION(x) 0
  537. #endif
  538. #define ECB_CPP (__cplusplus+0)
  539. #define ECB_CPP11 (__cplusplus >= 201103L)
  540. #if ECB_CPP
  541. #define ECB_C 0
  542. #define ECB_STDC_VERSION 0
  543. #else
  544. #define ECB_C 1
  545. #define ECB_STDC_VERSION __STDC_VERSION__
  546. #endif
  547. #define ECB_C99 (ECB_STDC_VERSION >= 199901L)
  548. #define ECB_C11 (ECB_STDC_VERSION >= 201112L)
  549. #if ECB_CPP
  550. #define ECB_EXTERN_C extern "C"
  551. #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
  552. #define ECB_EXTERN_C_END }
  553. #else
  554. #define ECB_EXTERN_C extern
  555. #define ECB_EXTERN_C_BEG
  556. #define ECB_EXTERN_C_END
  557. #endif
  558. /*****************************************************************************/
  559. /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
  560. /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
  561. #if ECB_NO_THREADS
  562. #define ECB_NO_SMP 1
  563. #endif
  564. #if ECB_NO_SMP
  565. #define ECB_MEMORY_FENCE do { } while (0)
  566. #endif
  567. /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */
  568. #if __xlC__ && ECB_CPP
  569. #include <builtins.h>
  570. #endif
  571. #ifndef ECB_MEMORY_FENCE
  572. #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
  573. #if __i386 || __i386__
  574. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
  575. #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
  576. #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
  577. #elif ECB_GCC_AMD64
  578. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
  579. #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
  580. #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
  581. #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
  582. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
  583. #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
  584. || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__
  585. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
  586. #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
  587. || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__
  588. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
  589. #elif __aarch64__
  590. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
  591. #elif (__sparc || __sparc__) && !__sparcv8
  592. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
  593. #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
  594. #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
  595. #elif defined __s390__ || defined __s390x__
  596. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
  597. #elif defined __mips__
  598. /* GNU/Linux emulates sync on mips1 architectures, so we force its use */
  599. /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
  600. #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
  601. #elif defined __alpha__
  602. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
  603. #elif defined __hppa__
  604. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
  605. #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
  606. #elif defined __ia64__
  607. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
  608. #elif defined __m68k__
  609. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
  610. #elif defined __m88k__
  611. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory")
  612. #elif defined __sh__
  613. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
  614. #endif
  615. #endif
  616. #endif
  617. #ifndef ECB_MEMORY_FENCE
  618. #if ECB_GCC_VERSION(4,7)
  619. /* see comment below (stdatomic.h) about the C11 memory model. */
  620. #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
  621. #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
  622. #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
  623. #elif ECB_CLANG_EXTENSION(c_atomic)
  624. /* see comment below (stdatomic.h) about the C11 memory model. */
  625. #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
  626. #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
  627. #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
  628. #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
  629. #define ECB_MEMORY_FENCE __sync_synchronize ()
  630. #elif _MSC_VER >= 1500 /* VC++ 2008 */
  631. /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
  632. #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
  633. #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier()
  634. #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */
  635. #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier()
  636. #elif _MSC_VER >= 1400 /* VC++ 2005 */
  637. #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
  638. #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
  639. #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
  640. #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
  641. #elif defined _WIN32
  642. #include <WinNT.h>
  643. #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
  644. #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
  645. #include <mbarrier.h>
  646. #define ECB_MEMORY_FENCE __machine_rw_barrier ()
  647. #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier ()
  648. #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier ()
  649. #elif __xlC__
  650. #define ECB_MEMORY_FENCE __sync ()
  651. #endif
  652. #endif
  653. #ifndef ECB_MEMORY_FENCE
  654. #if ECB_C11 && !defined __STDC_NO_ATOMICS__
  655. /* we assume that these memory fences work on all variables/all memory accesses, */
  656. /* not just C11 atomics and atomic accesses */
  657. #include <stdatomic.h>
  658. /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
  659. /* any fence other than seq_cst, which isn't very efficient for us. */
  660. /* Why that is, we don't know - either the C11 memory model is quite useless */
  661. /* for most usages, or gcc and clang have a bug */
  662. /* I *currently* lean towards the latter, and inefficiently implement */
  663. /* all three of ecb's fences as a seq_cst fence */
  664. /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
  665. /* for all __atomic_thread_fence's except seq_cst */
  666. #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
  667. #endif
  668. #endif
  669. #ifndef ECB_MEMORY_FENCE
  670. #if !ECB_AVOID_PTHREADS
  671. /*
  672. * if you get undefined symbol references to pthread_mutex_lock,
  673. * or failure to find pthread.h, then you should implement
  674. * the ECB_MEMORY_FENCE operations for your cpu/compiler
  675. * OR provide pthread.h and link against the posix thread library
  676. * of your system.
  677. */
  678. #include <pthread.h>
  679. #define ECB_NEEDS_PTHREADS 1
  680. #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1
  681. static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER;
  682. #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0)
  683. #endif
  684. #endif
  685. #if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE
  686. #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
  687. #endif
  688. #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
  689. #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
  690. #endif
  691. /*****************************************************************************/
  692. #if ECB_CPP
  693. #define ecb_inline static inline
  694. #elif ECB_GCC_VERSION(2,5)
  695. #define ecb_inline static __inline__
  696. #elif ECB_C99
  697. #define ecb_inline static inline
  698. #else
  699. #define ecb_inline static
  700. #endif
  701. #if ECB_GCC_VERSION(3,3)
  702. #define ecb_restrict __restrict__
  703. #elif ECB_C99
  704. #define ecb_restrict restrict
  705. #else
  706. #define ecb_restrict
  707. #endif
  708. typedef int ecb_bool;
  709. #define ECB_CONCAT_(a, b) a ## b
  710. #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b)
  711. #define ECB_STRINGIFY_(a) # a
  712. #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
  713. #define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr))
  714. #define ecb_function_ ecb_inline
  715. #if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8)
  716. #define ecb_attribute(attrlist) __attribute__ (attrlist)
  717. #else
  718. #define ecb_attribute(attrlist)
  719. #endif
  720. #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p)
  721. #define ecb_is_constant(expr) __builtin_constant_p (expr)
  722. #else
  723. /* possible C11 impl for integral types
  724. typedef struct ecb_is_constant_struct ecb_is_constant_struct;
  725. #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */
  726. #define ecb_is_constant(expr) 0
  727. #endif
  728. #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect)
  729. #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
  730. #else
  731. #define ecb_expect(expr,value) (expr)
  732. #endif
  733. #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch)
  734. #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
  735. #else
  736. #define ecb_prefetch(addr,rw,locality)
  737. #endif
  738. /* no emulation for ecb_decltype */
  739. #if ECB_CPP11
  740. // older implementations might have problems with decltype(x)::type, work around it
  741. template<class T> struct ecb_decltype_t { typedef T type; };
  742. #define ecb_decltype(x) ecb_decltype_t<decltype (x)>::type
  743. #elif ECB_GCC_VERSION(3,0) || ECB_CLANG_VERSION(2,8)
  744. #define ecb_decltype(x) __typeof__ (x)
  745. #endif
  746. #if _MSC_VER >= 1300
  747. #define ecb_deprecated __declspec (deprecated)
  748. #else
  749. #define ecb_deprecated ecb_attribute ((__deprecated__))
  750. #endif
  751. #if _MSC_VER >= 1500
  752. #define ecb_deprecated_message(msg) __declspec (deprecated (msg))
  753. #elif ECB_GCC_VERSION(4,5)
  754. #define ecb_deprecated_message(msg) ecb_attribute ((__deprecated__ (msg))
  755. #else
  756. #define ecb_deprecated_message(msg) ecb_deprecated
  757. #endif
  758. #if _MSC_VER >= 1400
  759. #define ecb_noinline __declspec (noinline)
  760. #else
  761. #define ecb_noinline ecb_attribute ((__noinline__))
  762. #endif
  763. #define ecb_unused ecb_attribute ((__unused__))
  764. #define ecb_const ecb_attribute ((__const__))
  765. #define ecb_pure ecb_attribute ((__pure__))
  766. #if ECB_C11 || __IBMC_NORETURN
  767. /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/language_ref/noreturn.html */
  768. #define ecb_noreturn _Noreturn
  769. #elif ECB_CPP11
  770. #define ecb_noreturn [[noreturn]]
  771. #elif _MSC_VER >= 1200
  772. /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx */
  773. #define ecb_noreturn __declspec (noreturn)
  774. #else
  775. #define ecb_noreturn ecb_attribute ((__noreturn__))
  776. #endif
  777. #if ECB_GCC_VERSION(4,3)
  778. #define ecb_artificial ecb_attribute ((__artificial__))
  779. #define ecb_hot ecb_attribute ((__hot__))
  780. #define ecb_cold ecb_attribute ((__cold__))
  781. #else
  782. #define ecb_artificial
  783. #define ecb_hot
  784. #define ecb_cold
  785. #endif
  786. /* put around conditional expressions if you are very sure that the */
  787. /* expression is mostly true or mostly false. note that these return */
  788. /* booleans, not the expression. */
  789. #define ecb_expect_false(expr) ecb_expect (!!(expr), 0)
  790. #define ecb_expect_true(expr) ecb_expect (!!(expr), 1)
  791. /* for compatibility to the rest of the world */
  792. #define ecb_likely(expr) ecb_expect_true (expr)
  793. #define ecb_unlikely(expr) ecb_expect_false (expr)
  794. /* count trailing zero bits and count # of one bits */
  795. #if ECB_GCC_VERSION(3,4) \
  796. || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \
  797. && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \
  798. && ECB_CLANG_BUILTIN(__builtin_popcount))
  799. /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */
  800. #define ecb_ld32(x) (__builtin_clz (x) ^ 31)
  801. #define ecb_ld64(x) (__builtin_clzll (x) ^ 63)
  802. #define ecb_ctz32(x) __builtin_ctz (x)
  803. #define ecb_ctz64(x) __builtin_ctzll (x)
  804. #define ecb_popcount32(x) __builtin_popcount (x)
  805. /* no popcountll */
  806. #else
  807. ecb_function_ ecb_const int ecb_ctz32 (uint32_t x);
  808. ecb_function_ ecb_const int
  809. ecb_ctz32 (uint32_t x)
  810. {
  811. int r = 0;
  812. x &= ~x + 1; /* this isolates the lowest bit */
  813. #if ECB_branchless_on_i386
  814. r += !!(x & 0xaaaaaaaa) << 0;
  815. r += !!(x & 0xcccccccc) << 1;
  816. r += !!(x & 0xf0f0f0f0) << 2;
  817. r += !!(x & 0xff00ff00) << 3;
  818. r += !!(x & 0xffff0000) << 4;
  819. #else
  820. if (x & 0xaaaaaaaa) r += 1;
  821. if (x & 0xcccccccc) r += 2;
  822. if (x & 0xf0f0f0f0) r += 4;
  823. if (x & 0xff00ff00) r += 8;
  824. if (x & 0xffff0000) r += 16;
  825. #endif
  826. return r;
  827. }
  828. ecb_function_ ecb_const int ecb_ctz64 (uint64_t x);
  829. ecb_function_ ecb_const int
  830. ecb_ctz64 (uint64_t x)
  831. {
  832. int shift = x & 0xffffffffU ? 0 : 32;
  833. return ecb_ctz32 (x >> shift) + shift;
  834. }
  835. ecb_function_ ecb_const int ecb_popcount32 (uint32_t x);
  836. ecb_function_ ecb_const int
  837. ecb_popcount32 (uint32_t x)
  838. {
  839. x -= (x >> 1) & 0x55555555;
  840. x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
  841. x = ((x >> 4) + x) & 0x0f0f0f0f;
  842. x *= 0x01010101;
  843. return x >> 24;
  844. }
  845. ecb_function_ ecb_const int ecb_ld32 (uint32_t x);
  846. ecb_function_ ecb_const int ecb_ld32 (uint32_t x)
  847. {
  848. int r = 0;
  849. if (x >> 16) { x >>= 16; r += 16; }
  850. if (x >> 8) { x >>= 8; r += 8; }
  851. if (x >> 4) { x >>= 4; r += 4; }
  852. if (x >> 2) { x >>= 2; r += 2; }
  853. if (x >> 1) { r += 1; }
  854. return r;
  855. }
  856. ecb_function_ ecb_const int ecb_ld64 (uint64_t x);
  857. ecb_function_ ecb_const int ecb_ld64 (uint64_t x)
  858. {
  859. int r = 0;
  860. if (x >> 32) { x >>= 32; r += 32; }
  861. return r + ecb_ld32 (x);
  862. }
  863. #endif
  864. ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x);
  865. ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
  866. ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x);
  867. ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); }
  868. ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x);
  869. ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x)
  870. {
  871. return ( (x * 0x0802U & 0x22110U)
  872. | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
  873. }
  874. ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x);
  875. ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x)
  876. {
  877. x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1);
  878. x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2);
  879. x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4);
  880. x = ( x >> 8 ) | ( x << 8);
  881. return x;
  882. }
  883. ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x);
  884. ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x)
  885. {
  886. x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
  887. x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2);
  888. x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4);
  889. x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8);
  890. x = ( x >> 16 ) | ( x << 16);
  891. return x;
  892. }
  893. /* popcount64 is only available on 64 bit cpus as gcc builtin */
  894. /* so for this version we are lazy */
  895. ecb_function_ ecb_const int ecb_popcount64 (uint64_t x);
  896. ecb_function_ ecb_const int
  897. ecb_popcount64 (uint64_t x)
  898. {
  899. return ecb_popcount32 (x) + ecb_popcount32 (x >> 32);
  900. }
  901. ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count);
  902. ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count);
  903. ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count);
  904. ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count);
  905. ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count);
  906. ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count);
  907. ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count);
  908. ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count);
  909. ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); }
  910. ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); }
  911. ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); }
  912. ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); }
  913. ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); }
  914. ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); }
  915. ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); }
  916. ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); }
  917. #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
  918. #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16)
  919. #define ecb_bswap16(x) __builtin_bswap16 (x)
  920. #else
  921. #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
  922. #endif
  923. #define ecb_bswap32(x) __builtin_bswap32 (x)
  924. #define ecb_bswap64(x) __builtin_bswap64 (x)
  925. #elif _MSC_VER
  926. #include <stdlib.h>
  927. #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x)))
  928. #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x)))
  929. #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x)))
  930. #else
  931. ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x);
  932. ecb_function_ ecb_const uint16_t
  933. ecb_bswap16 (uint16_t x)
  934. {
  935. return ecb_rotl16 (x, 8);
  936. }
  937. ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x);
  938. ecb_function_ ecb_const uint32_t
  939. ecb_bswap32 (uint32_t x)
  940. {
  941. return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16);
  942. }
  943. ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x);
  944. ecb_function_ ecb_const uint64_t
  945. ecb_bswap64 (uint64_t x)
  946. {
  947. return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);
  948. }
  949. #endif
  950. #if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable)
  951. #define ecb_unreachable() __builtin_unreachable ()
  952. #else
  953. /* this seems to work fine, but gcc always emits a warning for it :/ */
  954. ecb_inline ecb_noreturn void ecb_unreachable (void);
  955. ecb_inline ecb_noreturn void ecb_unreachable (void) { }
  956. #endif
  957. /* try to tell the compiler that some condition is definitely true */
  958. #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
  959. ecb_inline ecb_const unsigned char ecb_byteorder_helper (void);
  960. ecb_inline ecb_const unsigned char
  961. ecb_byteorder_helper (void)
  962. {
  963. /* the union code still generates code under pressure in gcc, */
  964. /* but less than using pointers, and always seems to */
  965. /* successfully return a constant. */
  966. /* the reason why we have this horrible preprocessor mess */
  967. /* is to avoid it in all cases, at least on common architectures */
  968. /* or when using a recent enough gcc version (>= 4.6) */
  969. #if ((__i386 || __i386__) && !__VOS__) || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64
  970. return 0x44;
  971. #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  972. return 0x44;
  973. #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  974. return 0x11;
  975. #else
  976. union
  977. {
  978. uint32_t i;
  979. uint8_t c;
  980. } u = { 0x11223344 };
  981. return u.c;
  982. #endif
  983. }
  984. ecb_inline ecb_const ecb_bool ecb_big_endian (void);
  985. ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11; }
  986. ecb_inline ecb_const ecb_bool ecb_little_endian (void);
  987. ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; }
  988. #if ECB_GCC_VERSION(3,0) || ECB_C99
  989. #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
  990. #else
  991. #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
  992. #endif
  993. #if ECB_CPP
  994. template<typename T>
  995. static inline T ecb_div_rd (T val, T div)
  996. {
  997. return val < 0 ? - ((-val + div - 1) / div) : (val ) / div;
  998. }
  999. template<typename T>
  1000. static inline T ecb_div_ru (T val, T div)
  1001. {
  1002. return val < 0 ? - ((-val ) / div) : (val + div - 1) / div;
  1003. }
  1004. #else
  1005. #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div))
  1006. #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div))
  1007. #endif
  1008. #if ecb_cplusplus_does_not_suck
  1009. /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */
  1010. template<typename T, int N>
  1011. static inline int ecb_array_length (const T (&arr)[N])
  1012. {
  1013. return N;
  1014. }
  1015. #else
  1016. #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
  1017. #endif
  1018. /*******************************************************************************/
  1019. /* floating point stuff, can be disabled by defining ECB_NO_LIBM */
  1020. /* basically, everything uses "ieee pure-endian" floating point numbers */
  1021. /* the only noteworthy exception is ancient armle, which uses order 43218765 */
  1022. #if 0 \
  1023. || __i386 || __i386__ \
  1024. || ECB_GCC_AMD64 \
  1025. || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
  1026. || defined __s390__ || defined __s390x__ \
  1027. || defined __mips__ \
  1028. || defined __alpha__ \
  1029. || defined __hppa__ \
  1030. || defined __ia64__ \
  1031. || defined __m68k__ \
  1032. || defined __m88k__ \
  1033. || defined __sh__ \
  1034. || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \
  1035. || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
  1036. || defined __aarch64__
  1037. #define ECB_STDFP 1
  1038. #include <string.h> /* for memcpy */
  1039. #else
  1040. #define ECB_STDFP 0
  1041. #endif
  1042. #ifndef ECB_NO_LIBM
  1043. #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */
  1044. /* only the oldest of old doesn't have this one. solaris. */
  1045. #ifdef INFINITY
  1046. #define ECB_INFINITY INFINITY
  1047. #else
  1048. #define ECB_INFINITY HUGE_VAL
  1049. #endif
  1050. #ifdef NAN
  1051. #define ECB_NAN NAN
  1052. #else
  1053. #define ECB_NAN ECB_INFINITY
  1054. #endif
  1055. #if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L
  1056. #define ecb_ldexpf(x,e) ldexpf ((x), (e))
  1057. #define ecb_frexpf(x,e) frexpf ((x), (e))
  1058. #else
  1059. #define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e))
  1060. #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e))
  1061. #endif
  1062. /* converts an ieee half/binary16 to a float */
  1063. ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x);
  1064. ecb_function_ ecb_const float
  1065. ecb_binary16_to_float (uint16_t x)
  1066. {
  1067. int e = (x >> 10) & 0x1f;
  1068. int m = x & 0x3ff;
  1069. float r;
  1070. if (!e ) r = ecb_ldexpf (m , -24);
  1071. else if (e != 31) r = ecb_ldexpf (m + 0x400, e - 25);
  1072. else if (m ) r = ECB_NAN;
  1073. else r = ECB_INFINITY;
  1074. return x & 0x8000 ? -r : r;
  1075. }
  1076. /* convert a float to ieee single/binary32 */
  1077. ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x);
  1078. ecb_function_ ecb_const uint32_t
  1079. ecb_float_to_binary32 (float x)
  1080. {
  1081. uint32_t r;
  1082. #if ECB_STDFP
  1083. memcpy (&r, &x, 4);
  1084. #else
  1085. /* slow emulation, works for anything but -0 */
  1086. uint32_t m;
  1087. int e;
  1088. if (x == 0e0f ) return 0x00000000U;
  1089. if (x > +3.40282346638528860e+38f) return 0x7f800000U;
  1090. if (x < -3.40282346638528860e+38f) return 0xff800000U;
  1091. if (x != x ) return 0x7fbfffffU;
  1092. m = ecb_frexpf (x, &e) * 0x1000000U;
  1093. r = m & 0x80000000U;
  1094. if (r)
  1095. m = -m;
  1096. if (e <= -126)
  1097. {
  1098. m &= 0xffffffU;
  1099. m >>= (-125 - e);
  1100. e = -126;
  1101. }
  1102. r |= (e + 126) << 23;
  1103. r |= m & 0x7fffffU;
  1104. #endif
  1105. return r;
  1106. }
  1107. /* converts an ieee single/binary32 to a float */
  1108. ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x);
  1109. ecb_function_ ecb_const float
  1110. ecb_binary32_to_float (uint32_t x)
  1111. {
  1112. float r;
  1113. #if ECB_STDFP
  1114. memcpy (&r, &x, 4);
  1115. #else
  1116. /* emulation, only works for normals and subnormals and +0 */
  1117. int neg = x >> 31;
  1118. int e = (x >> 23) & 0xffU;
  1119. x &= 0x7fffffU;
  1120. if (e)
  1121. x |= 0x800000U;
  1122. else
  1123. e = 1;
  1124. /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
  1125. r = ecb_ldexpf (x * (0.5f / 0x800000U), e - 126);
  1126. r = neg ? -r : r;
  1127. #endif
  1128. return r;
  1129. }
  1130. /* convert a double to ieee double/binary64 */
  1131. ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x);
  1132. ecb_function_ ecb_const uint64_t
  1133. ecb_double_to_binary64 (double x)
  1134. {
  1135. uint64_t r;
  1136. #if ECB_STDFP
  1137. memcpy (&r, &x, 8);
  1138. #else
  1139. /* slow emulation, works for anything but -0 */
  1140. uint64_t m;
  1141. int e;
  1142. if (x == 0e0 ) return 0x0000000000000000U;
  1143. if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
  1144. if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
  1145. if (x != x ) return 0X7ff7ffffffffffffU;
  1146. m = frexp (x, &e) * 0x20000000000000U;
  1147. r = m & 0x8000000000000000;;
  1148. if (r)
  1149. m = -m;
  1150. if (e <= -1022)
  1151. {
  1152. m &= 0x1fffffffffffffU;
  1153. m >>= (-1021 - e);
  1154. e = -1022;
  1155. }
  1156. r |= ((uint64_t)(e + 1022)) << 52;
  1157. r |= m & 0xfffffffffffffU;
  1158. #endif
  1159. return r;
  1160. }
  1161. /* converts an ieee double/binary64 to a double */
  1162. ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x);
  1163. ecb_function_ ecb_const double
  1164. ecb_binary64_to_double (uint64_t x)
  1165. {
  1166. double r;
  1167. #if ECB_STDFP
  1168. memcpy (&r, &x, 8);
  1169. #else
  1170. /* emulation, only works for normals and subnormals and +0 */
  1171. int neg = x >> 63;
  1172. int e = (x >> 52) & 0x7ffU;
  1173. x &= 0xfffffffffffffU;
  1174. if (e)
  1175. x |= 0x10000000000000U;
  1176. else
  1177. e = 1;
  1178. /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
  1179. r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
  1180. r = neg ? -r : r;
  1181. #endif
  1182. return r;
  1183. }
  1184. #endif
  1185. #endif
  1186. /* ECB.H END */
  1187. #if ECB_MEMORY_FENCE_NEEDS_PTHREADS
  1188. /* if your architecture doesn't need memory fences, e.g. because it is
  1189. * single-cpu/core, or if you use libev in a project that doesn't use libev
  1190. * from multiple threads, then you can define ECB_AVOID_PTHREADS when compiling
  1191. * libev, in which cases the memory fences become nops.
  1192. * alternatively, you can remove this #error and link against libpthread,
  1193. * which will then provide the memory fences.
  1194. */
  1195. # error "memory fences not defined for your architecture, please report"
  1196. #endif
  1197. #ifndef ECB_MEMORY_FENCE
  1198. # define ECB_MEMORY_FENCE do { } while (0)
  1199. # define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
  1200. # define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
  1201. #endif
  1202. #define expect_false(cond) ecb_expect_false (cond)
  1203. #define expect_true(cond) ecb_expect_true (cond)
  1204. #define noinline ecb_noinline
  1205. #define inline_size ecb_inline
  1206. #if EV_FEATURE_CODE
  1207. # define inline_speed ecb_inline
  1208. #else
  1209. # define inline_speed static noinline
  1210. #endif
  1211. #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
  1212. #if EV_MINPRI == EV_MAXPRI
  1213. # define ABSPRI(w) (((W)w), 0)
  1214. #else
  1215. # define ABSPRI(w) (((W)w)->priority - EV_MINPRI)
  1216. #endif
  1217. #define EMPTY /* required for microsofts broken pseudo-c compiler */
  1218. #define EMPTY2(a,b) /* used to suppress some warnings */
  1219. typedef ev_watcher *W;
  1220. typedef ev_watcher_list *WL;
  1221. typedef ev_watcher_time *WT;
  1222. #define ev_active(w) ((W)(w))->active
  1223. #define ev_at(w) ((WT)(w))->at
  1224. #if EV_USE_REALTIME
  1225. /* sig_atomic_t is used to avoid per-thread variables or locking but still */
  1226. /* giving it a reasonably high chance of working on typical architectures */
  1227. static EV_ATOMIC_T have_realtime; /* did clock_gettime (CLOCK_REALTIME) work? */
  1228. #endif
  1229. #if EV_USE_MONOTONIC
  1230. static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
  1231. #endif
  1232. #ifndef EV_FD_TO_WIN32_HANDLE
  1233. # define EV_FD_TO_WIN32_HANDLE(fd) _get_osfhandle (fd)
  1234. #endif
  1235. #ifndef EV_WIN32_HANDLE_TO_FD
  1236. # define EV_WIN32_HANDLE_TO_FD(handle) _open_osfhandle (handle, 0)
  1237. #endif
  1238. #ifndef EV_WIN32_CLOSE_FD
  1239. # define EV_WIN32_CLOSE_FD(fd) close (fd)
  1240. #endif
  1241. #ifdef _WIN32
  1242. # include "ev_win32.c"
  1243. #endif
  1244. /*****************************************************************************/
  1245. /* define a suitable floor function (only used by periodics atm) */
  1246. #if EV_USE_FLOOR
  1247. # include <math.h>
  1248. # define ev_floor(v) floor (v)
  1249. #else
  1250. #include <float.h>
  1251. /* a floor() replacement function, should be independent of ev_tstamp type */
  1252. static ev_tstamp noinline
  1253. ev_floor (ev_tstamp v)
  1254. {
  1255. /* the choice of shift factor is not terribly important */
  1256. #if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */
  1257. const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.;
  1258. #else
  1259. const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
  1260. #endif
  1261. /* argument too large for an unsigned long? */
  1262. if (expect_false (v >= shift))
  1263. {
  1264. ev_tstamp f;
  1265. if (v == v - 1.)
  1266. return v; /* very large number */
  1267. f = shift * ev_floor (v * (1. / shift));
  1268. return f + ev_floor (v - f);
  1269. }
  1270. /* special treatment for negative args? */
  1271. if (expect_false (v < 0.))
  1272. {
  1273. ev_tstamp f = -ev_floor (-v);
  1274. return f - (f == v ? 0 : 1);
  1275. }
  1276. /* fits into an unsigned long */
  1277. return (unsigned long)v;
  1278. }
  1279. #endif
  1280. /*****************************************************************************/
  1281. #ifdef __linux
  1282. # include <sys/utsname.h>
  1283. #endif
  1284. static unsigned int noinline ecb_cold
  1285. ev_linux_version (void)
  1286. {
  1287. #ifdef __linux
  1288. unsigned int v = 0;
  1289. struct utsname buf;
  1290. int i;
  1291. char *p = buf.release;
  1292. if (uname (&buf))
  1293. return 0;
  1294. for (i = 3+1; --i; )
  1295. {
  1296. unsigned int c = 0;
  1297. for (;;)
  1298. {
  1299. if (*p >= '0' && *p <= '9')
  1300. c = c * 10 + *p++ - '0';
  1301. else
  1302. {
  1303. p += *p == '.';
  1304. break;
  1305. }
  1306. }
  1307. v = (v << 8) | c;
  1308. }
  1309. return v;
  1310. #else
  1311. return 0;
  1312. #endif
  1313. }
  1314. /*****************************************************************************/
  1315. #if EV_AVOID_STDIO
  1316. static void noinline ecb_cold
  1317. ev_printerr (const char *msg)
  1318. {
  1319. write (STDERR_FILENO, msg, strlen (msg));
  1320. }
  1321. #endif
  1322. static void (*syserr_cb)(const char *msg) EV_THROW;
  1323. void ecb_cold
  1324. ev_set_syserr_cb (void (*cb)(const char *msg) EV_THROW) EV_THROW
  1325. {
  1326. syserr_cb = cb;
  1327. }
  1328. static void noinline ecb_cold
  1329. ev_syserr (const char *msg)
  1330. {
  1331. if (!msg)
  1332. msg = "(libev) system error";
  1333. if (syserr_cb)
  1334. syserr_cb (msg);
  1335. else
  1336. {
  1337. #if EV_AVOID_STDIO
  1338. ev_printerr (msg);
  1339. ev_printerr (": ");
  1340. ev_printerr (strerror (errno));
  1341. ev_printerr ("\n");
  1342. #else
  1343. perror (msg);
  1344. #endif
  1345. abort ();
  1346. }
  1347. }
  1348. static void *
  1349. ev_realloc_emul (void *ptr, long size) EV_THROW
  1350. {
  1351. /* some systems, notably openbsd and darwin, fail to properly
  1352. * implement realloc (x, 0) (as required by both ansi c-89 and
  1353. * the single unix specification, so work around them here.
  1354. * recently, also (at least) fedora and debian started breaking it,
  1355. * despite documenting it otherwise.
  1356. */
  1357. if (size)
  1358. return realloc (ptr, size);
  1359. free (ptr);
  1360. return 0;
  1361. }
  1362. static void *(*alloc)(void *ptr, long size) EV_THROW = ev_realloc_emul;
  1363. void ecb_cold
  1364. ev_set_allocator (void *(*cb)(void *ptr, long size) EV_THROW) EV_THROW
  1365. {
  1366. alloc = cb;
  1367. }
  1368. inline_speed void *
  1369. ev_realloc (void *ptr, long size)
  1370. {
  1371. ptr = alloc (ptr, size);
  1372. if (!ptr && size)
  1373. {
  1374. #if EV_AVOID_STDIO
  1375. ev_printerr ("(libev) memory allocation failed, aborting.\n");
  1376. #else
  1377. fprintf (stderr, "(libev) cannot allocate %ld bytes, aborting.", size);
  1378. #endif
  1379. abort ();
  1380. }
  1381. return ptr;
  1382. }
  1383. #define ev_malloc(size) ev_realloc (0, (size))
  1384. #define ev_free(ptr) ev_realloc ((ptr), 0)
  1385. /*****************************************************************************/
  1386. /* set in reify when reification needed */
  1387. #define EV_ANFD_REIFY 1
  1388. /* file descriptor info structure */
  1389. typedef struct
  1390. {
  1391. WL head;
  1392. unsigned char events; /* the events watched for */
  1393. unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
  1394. unsigned char emask; /* the epoll backend stores the actual kernel mask in here */
  1395. unsigned char unused;
  1396. #if EV_USE_EPOLL
  1397. unsigned int egen; /* generation counter to counter epoll bugs */
  1398. #endif
  1399. #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
  1400. SOCKET handle;
  1401. #endif
  1402. #if EV_USE_IOCP
  1403. OVERLAPPED or, ow;
  1404. #endif
  1405. } ANFD;
  1406. /* stores the pending event set for a given watcher */
  1407. typedef struct
  1408. {
  1409. W w;
  1410. int events; /* the pending event set for the given watcher */
  1411. } ANPENDING;
  1412. #if EV_USE_INOTIFY
  1413. /* hash table entry per inotify-id */
  1414. typedef struct
  1415. {
  1416. WL head;
  1417. } ANFS;
  1418. #endif
  1419. /* Heap Entry */
  1420. #if EV_HEAP_CACHE_AT
  1421. /* a heap element */
  1422. typedef struct {
  1423. ev_tstamp at;
  1424. WT w;
  1425. } ANHE;
  1426. #define ANHE_w(he) (he).w /* access watcher, read-write */
  1427. #define ANHE_at(he) (he).at /* access cached at, read-only */
  1428. #define ANHE_at_cache(he) (he).at = (he).w->at /* update at from watcher */
  1429. #else
  1430. /* a heap element */
  1431. typedef WT ANHE;
  1432. #define ANHE_w(he) (he)
  1433. #define ANHE_at(he) (he)->at
  1434. #define ANHE_at_cache(he)
  1435. #endif
  1436. #if EV_MULTIPLICITY
  1437. struct ev_loop
  1438. {
  1439. ev_tstamp ev_rt_now;
  1440. #define ev_rt_now ((loop)->ev_rt_now)
  1441. #define VAR(name,decl) decl;
  1442. #include "ev_vars.h"
  1443. #undef VAR
  1444. };
  1445. #include "ev_wrap.h"
  1446. static struct ev_loop default_loop_struct;
  1447. EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */
  1448. #else
  1449. EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */
  1450. #define VAR(name,decl) static decl;
  1451. #include "ev_vars.h"
  1452. #undef VAR
  1453. static int ev_default_loop_ptr;
  1454. #endif
  1455. #if EV_FEATURE_API
  1456. # define EV_RELEASE_CB if (expect_false (release_cb)) release_cb (EV_A)
  1457. # define EV_ACQUIRE_CB if (expect_false (acquire_cb)) acquire_cb (EV_A)
  1458. # define EV_INVOKE_PENDING invoke_cb (EV_A)
  1459. #else
  1460. # define EV_RELEASE_CB (void)0
  1461. # define EV_ACQUIRE_CB (void)0
  1462. # define EV_INVOKE_PENDING ev_invoke_pending (EV_A)
  1463. #endif
  1464. #define EVBREAK_RECURSE 0x80
  1465. /*****************************************************************************/
  1466. #ifndef EV_HAVE_EV_TIME
  1467. ev_tstamp
  1468. ev_time (void) EV_THROW
  1469. {
  1470. #if EV_USE_REALTIME
  1471. if (expect_true (have_realtime))
  1472. {
  1473. struct timespec ts;
  1474. clock_gettime (CLOCK_REALTIME, &ts);
  1475. return ts.tv_sec + ts.tv_nsec * 1e-9;
  1476. }
  1477. #endif
  1478. struct timeval tv;
  1479. gettimeofday (&tv, 0);
  1480. return tv.tv_sec + tv.tv_usec * 1e-6;
  1481. }
  1482. #endif
  1483. inline_size ev_tstamp
  1484. get_clock (void)
  1485. {
  1486. #if EV_USE_MONOTONIC
  1487. if (expect_true (have_monotonic))
  1488. {
  1489. struct timespec ts;
  1490. clock_gettime (CLOCK_MONOTONIC, &ts);
  1491. return ts.tv_sec + ts.tv_nsec * 1e-9;
  1492. }
  1493. #endif
  1494. return ev_time ();
  1495. }
  1496. #if EV_MULTIPLICITY
  1497. ev_tstamp
  1498. ev_now (EV_P) EV_THROW
  1499. {
  1500. return ev_rt_now;
  1501. }
  1502. #endif
  1503. void
  1504. ev_sleep (ev_tstamp delay) EV_THROW
  1505. {
  1506. if (delay > 0.)
  1507. {
  1508. #if EV_USE_NANOSLEEP
  1509. struct timespec ts;
  1510. EV_TS_SET (ts, delay);
  1511. nanosleep (&ts, 0);
  1512. #elif defined _WIN32
  1513. Sleep ((unsigned long)(delay * 1e3));
  1514. #else
  1515. struct timeval tv;
  1516. /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */
  1517. /* something not guaranteed by newer posix versions, but guaranteed */
  1518. /* by older ones */
  1519. EV_TV_SET (tv, delay);
  1520. select (0, 0, 0, 0, &tv);
  1521. #endif
  1522. }
  1523. }
  1524. /*****************************************************************************/
  1525. #define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */
  1526. /* find a suitable new size for the given array, */
  1527. /* hopefully by rounding to a nice-to-malloc size */
  1528. inline_size int
  1529. array_nextsize (int elem, int cur, int cnt)
  1530. {
  1531. int ncur = cur + 1;
  1532. do
  1533. ncur <<= 1;
  1534. while (cnt > ncur);
  1535. /* if size is large, round to MALLOC_ROUND - 4 * longs to accommodate malloc overhead */
  1536. if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4)
  1537. {
  1538. ncur *= elem;
  1539. ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1);
  1540. ncur = ncur - sizeof (void *) * 4;
  1541. ncur /= elem;
  1542. }
  1543. return ncur;
  1544. }
  1545. static void * noinline ecb_cold
  1546. array_realloc (int elem, void *base, int *cur, int cnt)
  1547. {
  1548. *cur = array_nextsize (elem, *cur, cnt);
  1549. return ev_realloc (base, elem * *cur);
  1550. }
  1551. #define array_init_zero(base,count) \
  1552. memset ((void *)(base), 0, sizeof (*(base)) * (count))
  1553. #define array_needsize(type,base,cur,cnt,init) \
  1554. if (expect_false ((cnt) > (cur))) \
  1555. { \
  1556. int ecb_unused ocur_ = (cur); \
  1557. (base) = (type *)array_realloc \
  1558. (sizeof (type), (base), &(cur), (cnt)); \
  1559. init ((base) + (ocur_), (cur) - ocur_); \
  1560. }
  1561. #if 0
  1562. #define array_slim(type,stem) \
  1563. if (stem ## max < array_roundsize (stem ## cnt >> 2)) \
  1564. { \
  1565. stem ## max = array_roundsize (stem ## cnt >> 1); \
  1566. base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\
  1567. fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\
  1568. }
  1569. #endif
  1570. #define array_free(stem, idx) \
  1571. ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0
  1572. /*****************************************************************************/
  1573. /* dummy callback for pending events */
  1574. static void noinline
  1575. pendingcb (EV_P_ ev_prepare *w, int revents)
  1576. {
  1577. }
  1578. void noinline
  1579. ev_feed_event (EV_P_ void *w, int revents) EV_THROW
  1580. {
  1581. W w_ = (W)w;
  1582. int pri = ABSPRI (w_);
  1583. if (expect_false (w_->pending))
  1584. pendings [pri][w_->pending - 1].events |= revents;
  1585. else
  1586. {
  1587. w_->pending = ++pendingcnt [pri];
  1588. array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, EMPTY2);
  1589. pendings [pri][w_->pending - 1].w = w_;
  1590. pendings [pri][w_->pending - 1].events = revents;
  1591. }
  1592. pendingpri = NUMPRI - 1;
  1593. }
  1594. inline_speed void
  1595. feed_reverse (EV_P_ W w)
  1596. {
  1597. array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, EMPTY2);
  1598. rfeeds [rfeedcnt++] = w;
  1599. }
  1600. inline_size void
  1601. feed_reverse_done (EV_P_ int revents)
  1602. {
  1603. do
  1604. ev_feed_event (EV_A_ rfeeds [--rfeedcnt], revents);
  1605. while (rfeedcnt);
  1606. }
  1607. inline_speed void
  1608. queue_events (EV_P_ W *events, int eventcnt, int type)
  1609. {
  1610. int i;
  1611. for (i = 0; i < eventcnt; ++i)
  1612. ev_feed_event (EV_A_ events [i], type);
  1613. }
  1614. /*****************************************************************************/
  1615. inline_speed void
  1616. fd_event_nocheck (EV_P_ int fd, int revents)
  1617. {
  1618. ANFD *anfd = anfds + fd;
  1619. ev_io *w;
  1620. for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
  1621. {
  1622. int ev = w->events & revents;
  1623. if (ev)
  1624. ev_feed_event (EV_A_ (W)w, ev);
  1625. }
  1626. }
  1627. /* do not submit kernel events for fds that have reify set */
  1628. /* because that means they changed while we were polling for new events */
  1629. inline_speed void
  1630. fd_event (EV_P_ int fd, int revents)
  1631. {
  1632. ANFD *anfd = anfds + fd;
  1633. if (expect_true (!anfd->reify))
  1634. fd_event_nocheck (EV_A_ fd, revents);
  1635. }
  1636. void
  1637. ev_feed_fd_event (EV_P_ int fd, int revents) EV_THROW
  1638. {
  1639. if (fd >= 0 && fd < anfdmax)
  1640. fd_event_nocheck (EV_A_ fd, revents);
  1641. }
  1642. /* make sure the external fd watch events are in-sync */
  1643. /* with the kernel/libev internal state */
  1644. inline_size void
  1645. fd_reify (EV_P)
  1646. {
  1647. int i;
  1648. #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
  1649. for (i = 0; i < fdchangecnt; ++i)
  1650. {
  1651. int fd = fdchanges [i];
  1652. ANFD *anfd = anfds + fd;
  1653. if (anfd->reify & EV__IOFDSET && anfd->head)
  1654. {
  1655. SOCKET handle = EV_FD_TO_WIN32_HANDLE (fd);
  1656. if (handle != anfd->handle)
  1657. {
  1658. unsigned long arg;
  1659. assert (("libev: only socket fds supported in this configuration", ioctlsocket (handle, FIONREAD, &arg) == 0));
  1660. /* handle changed, but fd didn't - we need to do it in two steps */
  1661. backend_modify (EV_A_ fd, anfd->events, 0);
  1662. anfd->events = 0;
  1663. anfd->handle = handle;
  1664. }
  1665. }
  1666. }
  1667. #endif
  1668. for (i = 0; i < fdchangecnt; ++i)
  1669. {
  1670. int fd = fdchanges [i];
  1671. ANFD *anfd = anfds + fd;
  1672. ev_io *w;
  1673. unsigned char o_events = anfd->events;
  1674. unsigned char o_reify = anfd->reify;
  1675. anfd->reify = 0;
  1676. /*if (expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
  1677. {
  1678. anfd->events = 0;
  1679. for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
  1680. anfd->events |= (unsigned char)w->events;
  1681. if (o_events != anfd->events)
  1682. o_reify = EV__IOFDSET; /* actually |= */
  1683. }
  1684. if (o_reify & EV__IOFDSET)
  1685. backend_modify (EV_A_ fd, o_events, anfd->events);
  1686. }
  1687. fdchangecnt = 0;
  1688. }
  1689. /* something about the given fd changed */
  1690. inline_size void
  1691. fd_change (EV_P_ int fd, int flags)
  1692. {
  1693. unsigned char reify = anfds [fd].reify;
  1694. anfds [fd].reify |= flags;
  1695. if (expect_true (!reify))
  1696. {
  1697. ++fdchangecnt;
  1698. array_needsize (int, fdchanges, fdchangemax, fdchangecnt, EMPTY2);
  1699. fdchanges [fdchangecnt - 1] = fd;
  1700. }
  1701. }
  1702. /* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */
  1703. inline_speed void ecb_cold
  1704. fd_kill (EV_P_ int fd)
  1705. {
  1706. ev_io *w;
  1707. while ((w = (ev_io *)anfds [fd].head))
  1708. {
  1709. ev_io_stop (EV_A_ w);
  1710. ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
  1711. }
  1712. }
  1713. /* check whether the given fd is actually valid, for error recovery */
  1714. inline_size int ecb_cold
  1715. fd_valid (int fd)
  1716. {
  1717. #ifdef _WIN32
  1718. return EV_FD_TO_WIN32_HANDLE (fd) != -1;
  1719. #else
  1720. return fcntl (fd, F_GETFD) != -1;
  1721. #endif
  1722. }
  1723. /* called on EBADF to verify fds */
  1724. static void noinline ecb_cold
  1725. fd_ebadf (EV_P)
  1726. {
  1727. int fd;
  1728. for (fd = 0; fd < anfdmax; ++fd)
  1729. if (anfds [fd].events)
  1730. if (!fd_valid (fd) && errno == EBADF)
  1731. fd_kill (EV_A_ fd);
  1732. }
  1733. /* called on ENOMEM in select/poll to kill some fds and retry */
  1734. static void noinline ecb_cold
  1735. fd_enomem (EV_P)
  1736. {
  1737. int fd;
  1738. for (fd = anfdmax; fd--; )
  1739. if (anfds [fd].events)
  1740. {
  1741. fd_kill (EV_A_ fd);
  1742. break;
  1743. }
  1744. }
  1745. /* usually called after fork if backend needs to re-arm all fds from scratch */
  1746. static void noinline
  1747. fd_rearm_all (EV_P)
  1748. {
  1749. int fd;
  1750. for (fd = 0; fd < anfdmax; ++fd)
  1751. if (anfds [fd].events)
  1752. {
  1753. anfds [fd].events = 0;
  1754. anfds [fd].emask = 0;
  1755. fd_change (EV_A_ fd, EV__IOFDSET | EV_ANFD_REIFY);
  1756. }
  1757. }
  1758. /* used to prepare libev internal fd's */
  1759. /* this is not fork-safe */
  1760. inline_speed void
  1761. fd_intern (int fd)
  1762. {
  1763. #ifdef _WIN32
  1764. unsigned long arg = 1;
  1765. ioctlsocket (EV_FD_TO_WIN32_HANDLE (fd), FIONBIO, &arg);
  1766. #else
  1767. fcntl (fd, F_SETFD, FD_CLOEXEC);
  1768. fcntl (fd, F_SETFL, O_NONBLOCK);
  1769. #endif
  1770. }
  1771. /*****************************************************************************/
  1772. /*
  1773. * the heap functions want a real array index. array index 0 is guaranteed to not
  1774. * be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives
  1775. * the branching factor of the d-tree.
  1776. */
  1777. /*
  1778. * at the moment we allow libev the luxury of two heaps,
  1779. * a small-code-size 2-heap one and a ~1.5kb larger 4-heap
  1780. * which is more cache-efficient.
  1781. * the difference is about 5% with 50000+ watchers.
  1782. */
  1783. #if EV_USE_4HEAP
  1784. #define DHEAP 4
  1785. #define HEAP0 (DHEAP - 1) /* index of first element in heap */
  1786. #define HPARENT(k) ((((k) - HEAP0 - 1) / DHEAP) + HEAP0)
  1787. #define UPHEAP_DONE(p,k) ((p) == (k))
  1788. /* away from the root */
  1789. inline_speed void
  1790. downheap (ANHE *heap, int N, int k)
  1791. {
  1792. ANHE he = heap [k];
  1793. ANHE *E = heap + N + HEAP0;
  1794. for (;;)
  1795. {
  1796. ev_tstamp minat;
  1797. ANHE *minpos;
  1798. ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1;
  1799. /* find minimum child */
  1800. if (expect_true (pos + DHEAP - 1 < E))
  1801. {
  1802. /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
  1803. if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
  1804. if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
  1805. if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
  1806. }
  1807. else if (pos < E)
  1808. {
  1809. /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
  1810. if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
  1811. if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
  1812. if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
  1813. }
  1814. else
  1815. break;
  1816. if (ANHE_at (he) <= minat)
  1817. break;
  1818. heap [k] = *minpos;
  1819. ev_active (ANHE_w (*minpos)) = k;
  1820. k = minpos - heap;
  1821. }
  1822. heap [k] = he;
  1823. ev_active (ANHE_w (he)) = k;
  1824. }
  1825. #else /* 4HEAP */
  1826. #define HEAP0 1
  1827. #define HPARENT(k) ((k) >> 1)
  1828. #define UPHEAP_DONE(p,k) (!(p))
  1829. /* away from the root */
  1830. inline_speed void
  1831. downheap (ANHE *heap, int N, int k)
  1832. {
  1833. ANHE he = heap [k];
  1834. for (;;)
  1835. {
  1836. int c = k << 1;
  1837. if (c >= N + HEAP0)
  1838. break;
  1839. c += c + 1 < N + HEAP0 && ANHE_at (heap [c]) > ANHE_at (heap [c + 1])
  1840. ? 1 : 0;
  1841. if (ANHE_at (he) <= ANHE_at (heap [c]))
  1842. break;
  1843. heap [k] = heap [c];
  1844. ev_active (ANHE_w (heap [k])) = k;
  1845. k = c;
  1846. }
  1847. heap [k] = he;
  1848. ev_active (ANHE_w (he)) = k;
  1849. }
  1850. #endif
  1851. /* towards the root */
  1852. inline_speed void
  1853. upheap (ANHE *heap, int k)
  1854. {
  1855. ANHE he = heap [k];
  1856. for (;;)
  1857. {
  1858. int p = HPARENT (k);
  1859. if (UPHEAP_DONE (p, k) || ANHE_at (heap [p]) <= ANHE_at (he))
  1860. break;
  1861. heap [k] = heap [p];
  1862. ev_active (ANHE_w (heap [k])) = k;
  1863. k = p;
  1864. }
  1865. heap [k] = he;
  1866. ev_active (ANHE_w (he)) = k;
  1867. }
  1868. /* move an element suitably so it is in a correct place */
  1869. inline_size void
  1870. adjustheap (ANHE *heap, int N, int k)
  1871. {
  1872. if (k > HEAP0 && ANHE_at (heap [k]) <= ANHE_at (heap [HPARENT (k)]))
  1873. upheap (heap, k);
  1874. else
  1875. downheap (heap, N, k);
  1876. }
  1877. /* rebuild the heap: this function is used only once and executed rarely */
  1878. inline_size void
  1879. reheap (ANHE *heap, int N)
  1880. {
  1881. int i;
  1882. /* we don't use floyds algorithm, upheap is simpler and is more cache-efficient */
  1883. /* also, this is easy to implement and correct for both 2-heaps and 4-heaps */
  1884. for (i = 0; i < N; ++i)
  1885. upheap (heap, i + HEAP0);
  1886. }
  1887. /*****************************************************************************/
  1888. /* associate signal watchers to a signal signal */
  1889. typedef struct
  1890. {
  1891. EV_ATOMIC_T pending;
  1892. #if EV_MULTIPLICITY
  1893. EV_P;
  1894. #endif
  1895. WL head;
  1896. } ANSIG;
  1897. static ANSIG signals [EV_NSIG - 1];
  1898. /*****************************************************************************/
  1899. #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
  1900. static void noinline ecb_cold
  1901. evpipe_init (EV_P)
  1902. {
  1903. if (!ev_is_active (&pipe_w))
  1904. {
  1905. int fds [2];
  1906. # if EV_USE_EVENTFD
  1907. fds [0] = -1;
  1908. fds [1] = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC);
  1909. if (fds [1] < 0 && errno == EINVAL)
  1910. fds [1] = eventfd (0, 0);
  1911. if (fds [1] < 0)
  1912. # endif
  1913. {
  1914. while (pipe (fds))
  1915. ev_syserr ("(libev) error creating signal/async pipe");
  1916. fd_intern (fds [0]);
  1917. }
  1918. evpipe [0] = fds [0];
  1919. if (evpipe [1] < 0)
  1920. evpipe [1] = fds [1]; /* first call, set write fd */
  1921. else
  1922. {
  1923. /* on subsequent calls, do not change evpipe [1] */
  1924. /* so that evpipe_write can always rely on its value. */
  1925. /* this branch does not do anything sensible on windows, */
  1926. /* so must not be executed on windows */
  1927. dup2 (fds [1], evpipe [1]);
  1928. close (fds [1]);
  1929. }
  1930. fd_intern (evpipe [1]);
  1931. ev_io_set (&pipe_w, evpipe [0] < 0 ? evpipe [1] : evpipe [0], EV_READ);
  1932. ev_io_start (EV_A_ &pipe_w);
  1933. ev_unref (EV_A); /* watcher should not keep loop alive */
  1934. }
  1935. }
  1936. inline_speed void
  1937. evpipe_write (EV_P_ EV_ATOMIC_T *flag)
  1938. {
  1939. ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */
  1940. if (expect_true (*flag))
  1941. return;
  1942. *flag = 1;
  1943. ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */
  1944. pipe_write_skipped = 1;
  1945. ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */
  1946. if (pipe_write_wanted)
  1947. {
  1948. int old_errno;
  1949. pipe_write_skipped = 0;
  1950. ECB_MEMORY_FENCE_RELEASE;
  1951. old_errno = errno; /* save errno because write will clobber it */
  1952. #if EV_USE_EVENTFD
  1953. if (evpipe [0] < 0)
  1954. {
  1955. uint64_t counter = 1;
  1956. write (evpipe [1], &counter, sizeof (uint64_t));
  1957. }
  1958. else
  1959. #endif
  1960. {
  1961. #ifdef _WIN32
  1962. WSABUF buf;
  1963. DWORD sent;
  1964. buf.buf = &buf;
  1965. buf.len = 1;
  1966. WSASend (EV_FD_TO_WIN32_HANDLE (evpipe [1]), &buf, 1, &sent, 0, 0, 0);
  1967. #else
  1968. write (evpipe [1], &(evpipe [1]), 1);
  1969. #endif
  1970. }
  1971. errno = old_errno;
  1972. }
  1973. }
  1974. /* called whenever the libev signal pipe */
  1975. /* got some events (signal, async) */
  1976. static void
  1977. pipecb (EV_P_ ev_io *iow, int revents)
  1978. {
  1979. int i;
  1980. if (revents & EV_READ)
  1981. {
  1982. #if EV_USE_EVENTFD
  1983. if (evpipe [0] < 0)
  1984. {
  1985. uint64_t counter;
  1986. read (evpipe [1], &counter, sizeof (uint64_t));
  1987. }
  1988. else
  1989. #endif
  1990. {
  1991. char dummy[4];
  1992. #ifdef _WIN32
  1993. WSABUF buf;
  1994. DWORD recvd;
  1995. DWORD flags = 0;
  1996. buf.buf = dummy;
  1997. buf.len = sizeof (dummy);
  1998. WSARecv (EV_FD_TO_WIN32_HANDLE (evpipe [0]), &buf, 1, &recvd, &flags, 0, 0);
  1999. #else
  2000. read (evpipe [0], &dummy, sizeof (dummy));
  2001. #endif
  2002. }
  2003. }
  2004. pipe_write_skipped = 0;
  2005. ECB_MEMORY_FENCE; /* push out skipped, acquire flags */
  2006. #if EV_SIGNAL_ENABLE
  2007. if (sig_pending)
  2008. {
  2009. sig_pending = 0;
  2010. ECB_MEMORY_FENCE;
  2011. for (i = EV_NSIG - 1; i--; )
  2012. if (expect_false (signals [i].pending))
  2013. ev_feed_signal_event (EV_A_ i + 1);
  2014. }
  2015. #endif
  2016. #if EV_ASYNC_ENABLE
  2017. if (async_pending)
  2018. {
  2019. async_pending = 0;
  2020. ECB_MEMORY_FENCE;
  2021. for (i = asynccnt; i--; )
  2022. if (asyncs [i]->sent)
  2023. {
  2024. asyncs [i]->sent = 0;
  2025. ECB_MEMORY_FENCE_RELEASE;
  2026. ev_feed_event (EV_A_ asyncs [i], EV_ASYNC);
  2027. }
  2028. }
  2029. #endif
  2030. }
  2031. /*****************************************************************************/
  2032. void
  2033. ev_feed_signal (int signum) EV_THROW
  2034. {
  2035. #if EV_MULTIPLICITY
  2036. EV_P;
  2037. ECB_MEMORY_FENCE_ACQUIRE;
  2038. EV_A = signals [signum - 1].loop;
  2039. if (!EV_A)
  2040. return;
  2041. #endif
  2042. signals [signum - 1].pending = 1;
  2043. evpipe_write (EV_A_ &sig_pending);
  2044. }
  2045. static void
  2046. ev_sighandler (int signum)
  2047. {
  2048. #ifdef _WIN32
  2049. signal (signum, ev_sighandler);
  2050. #endif
  2051. ev_feed_signal (signum);
  2052. }
  2053. void noinline
  2054. ev_feed_signal_event (EV_P_ int signum) EV_THROW
  2055. {
  2056. WL w;
  2057. if (expect_false (signum <= 0 || signum >= EV_NSIG))
  2058. return;
  2059. --signum;
  2060. #if EV_MULTIPLICITY
  2061. /* it is permissible to try to feed a signal to the wrong loop */
  2062. /* or, likely more useful, feeding a signal nobody is waiting for */
  2063. if (expect_false (signals [signum].loop != EV_A))
  2064. return;
  2065. #endif
  2066. signals [signum].pending = 0;
  2067. ECB_MEMORY_FENCE_RELEASE;
  2068. for (w = signals [signum].head; w; w = w->next)
  2069. ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
  2070. }
  2071. #if EV_USE_SIGNALFD
  2072. static void
  2073. sigfdcb (EV_P_ ev_io *iow, int revents)
  2074. {
  2075. struct signalfd_siginfo si[2], *sip; /* these structs are big */
  2076. for (;;)
  2077. {
  2078. ssize_t res = read (sigfd, si, sizeof (si));
  2079. /* not ISO-C, as res might be -1, but works with SuS */
  2080. for (sip = si; (char *)sip < (char *)si + res; ++sip)
  2081. ev_feed_signal_event (EV_A_ sip->ssi_signo);
  2082. if (res < (ssize_t)sizeof (si))
  2083. break;
  2084. }
  2085. }
  2086. #endif
  2087. #endif
  2088. /*****************************************************************************/
  2089. #if EV_CHILD_ENABLE
  2090. static WL childs [EV_PID_HASHSIZE];
  2091. static ev_signal childev;
  2092. #ifndef WIFCONTINUED
  2093. # define WIFCONTINUED(status) 0
  2094. #endif
  2095. /* handle a single child status event */
  2096. inline_speed void
  2097. child_reap (EV_P_ int chain, int pid, int status)
  2098. {
  2099. ev_child *w;
  2100. int traced = WIFSTOPPED (status) || WIFCONTINUED (status);
  2101. for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next)
  2102. {
  2103. if ((w->pid == pid || !w->pid)
  2104. && (!traced || (w->flags & 1)))
  2105. {
  2106. ev_set_priority (w, EV_MAXPRI); /* need to do it *now*, this *must* be the same prio as the signal watcher itself */
  2107. w->rpid = pid;
  2108. w->rstatus = status;
  2109. ev_feed_event (EV_A_ (W)w, EV_CHILD);
  2110. }
  2111. }
  2112. }
  2113. #ifndef WCONTINUED
  2114. # define WCONTINUED 0
  2115. #endif
  2116. /* called on sigchld etc., calls waitpid */
  2117. static void
  2118. childcb (EV_P_ ev_signal *sw, int revents)
  2119. {
  2120. int pid, status;
  2121. /* some systems define WCONTINUED but then fail to support it (linux 2.4) */
  2122. if (0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)))
  2123. if (!WCONTINUED
  2124. || errno != EINVAL
  2125. || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED)))
  2126. return;
  2127. /* make sure we are called again until all children have been reaped */
  2128. /* we need to do it this way so that the callback gets called before we continue */
  2129. ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);
  2130. child_reap (EV_A_ pid, pid, status);
  2131. if ((EV_PID_HASHSIZE) > 1)
  2132. child_reap (EV_A_ 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */
  2133. }
  2134. #endif
  2135. /*****************************************************************************/
  2136. #if EV_USE_IOCP
  2137. # include "ev_iocp.c"
  2138. #endif
  2139. #if EV_USE_PORT
  2140. # include "ev_port.c"
  2141. #endif
  2142. #if EV_USE_KQUEUE
  2143. # include "ev_kqueue.c"
  2144. #endif
  2145. #if EV_USE_EPOLL
  2146. # include "ev_epoll.c"
  2147. #endif
  2148. #if EV_USE_POLL
  2149. # include "ev_poll.c"
  2150. #endif
  2151. #if EV_USE_SELECT
  2152. # include "ev_select.c"
  2153. #endif
  2154. int ecb_cold
  2155. ev_version_major (void) EV_THROW
  2156. {
  2157. return EV_VERSION_MAJOR;
  2158. }
  2159. int ecb_cold
  2160. ev_version_minor (void) EV_THROW
  2161. {
  2162. return EV_VERSION_MINOR;
  2163. }
  2164. /* return true if we are running with elevated privileges and should ignore env variables */
  2165. int inline_size ecb_cold
  2166. enable_secure (void)
  2167. {
  2168. #ifdef _WIN32
  2169. return 0;
  2170. #else
  2171. return getuid () != geteuid ()
  2172. || getgid () != getegid ();
  2173. #endif
  2174. }
  2175. unsigned int ecb_cold
  2176. ev_supported_backends (void) EV_THROW
  2177. {
  2178. unsigned int flags = 0;
  2179. if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
  2180. if (EV_USE_KQUEUE) flags |= EVBACKEND_KQUEUE;
  2181. if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
  2182. if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
  2183. if (EV_USE_SELECT) flags |= EVBACKEND_SELECT;
  2184. return flags;
  2185. }
  2186. unsigned int ecb_cold
  2187. ev_recommended_backends (void) EV_THROW
  2188. {
  2189. unsigned int flags = ev_supported_backends ();
  2190. #if !defined(__NetBSD__) && !defined(__FreeBSD__)
  2191. /* kqueue is borked on everything but netbsd apparently */
  2192. /* it usually doesn't work correctly on anything but sockets and pipes */
  2193. flags &= ~EVBACKEND_KQUEUE;
  2194. #endif
  2195. #ifdef __APPLE__
  2196. /* only select works correctly on that "unix-certified" platform */
  2197. flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */
  2198. flags &= ~EVBACKEND_POLL; /* poll is based on kqueue from 10.5 onwards */
  2199. #endif
  2200. #ifdef __FreeBSD__
  2201. flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */
  2202. #endif
  2203. return flags;
  2204. }
  2205. unsigned int ecb_cold
  2206. ev_embeddable_backends (void) EV_THROW
  2207. {
  2208. int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
  2209. /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
  2210. if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
  2211. flags &= ~EVBACKEND_EPOLL;
  2212. return flags;
  2213. }
  2214. unsigned int
  2215. ev_backend (EV_P) EV_THROW
  2216. {
  2217. return backend;
  2218. }
  2219. #if EV_FEATURE_API
  2220. unsigned int
  2221. ev_iteration (EV_P) EV_THROW
  2222. {
  2223. return loop_count;
  2224. }
  2225. unsigned int
  2226. ev_depth (EV_P) EV_THROW
  2227. {
  2228. return loop_depth;
  2229. }
  2230. void
  2231. ev_set_io_collect_interval (EV_P_ ev_tstamp interval) EV_THROW
  2232. {
  2233. io_blocktime = interval;
  2234. }
  2235. void
  2236. ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) EV_THROW
  2237. {
  2238. timeout_blocktime = interval;
  2239. }
  2240. void
  2241. ev_set_userdata (EV_P_ void *data) EV_THROW
  2242. {
  2243. userdata = data;
  2244. }
  2245. void *
  2246. ev_userdata (EV_P) EV_THROW
  2247. {
  2248. return userdata;
  2249. }
  2250. void
  2251. ev_set_invoke_pending_cb (EV_P_ ev_loop_callback invoke_pending_cb) EV_THROW
  2252. {
  2253. invoke_cb = invoke_pending_cb;
  2254. }
  2255. void
  2256. ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_THROW, void (*acquire)(EV_P) EV_THROW) EV_THROW
  2257. {
  2258. release_cb = release;
  2259. acquire_cb = acquire;
  2260. }
  2261. #endif
  2262. /* initialise a loop structure, must be zero-initialised */
  2263. static void noinline ecb_cold
  2264. loop_init (EV_P_ unsigned int flags) EV_THROW
  2265. {
  2266. if (!backend)
  2267. {
  2268. origflags = flags;
  2269. #if EV_USE_REALTIME
  2270. if (!have_realtime)
  2271. {
  2272. struct timespec ts;
  2273. if (!clock_gettime (CLOCK_REALTIME, &ts))
  2274. have_realtime = 1;
  2275. }
  2276. #endif
  2277. #if EV_USE_MONOTONIC
  2278. if (!have_monotonic)
  2279. {
  2280. struct timespec ts;
  2281. if (!clock_gettime (CLOCK_MONOTONIC, &ts))
  2282. have_monotonic = 1;
  2283. }
  2284. #endif
  2285. /* pid check not overridable via env */
  2286. #ifndef _WIN32
  2287. if (flags & EVFLAG_FORKCHECK)
  2288. curpid = getpid ();
  2289. #endif
  2290. if (!(flags & EVFLAG_NOENV)
  2291. && !enable_secure ()
  2292. && getenv ("LIBEV_FLAGS"))
  2293. flags = atoi (getenv ("LIBEV_FLAGS"));
  2294. ev_rt_now = ev_time ();
  2295. mn_now = get_clock ();
  2296. now_floor = mn_now;
  2297. rtmn_diff = ev_rt_now - mn_now;
  2298. #if EV_FEATURE_API
  2299. invoke_cb = ev_invoke_pending;
  2300. #endif
  2301. io_blocktime = 0.;
  2302. timeout_blocktime = 0.;
  2303. backend = 0;
  2304. backend_fd = -1;
  2305. sig_pending = 0;
  2306. #if EV_ASYNC_ENABLE
  2307. async_pending = 0;
  2308. #endif
  2309. pipe_write_skipped = 0;
  2310. pipe_write_wanted = 0;
  2311. evpipe [0] = -1;
  2312. evpipe [1] = -1;
  2313. #if EV_USE_INOTIFY
  2314. fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2;
  2315. #endif
  2316. #if EV_USE_SIGNALFD
  2317. sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
  2318. #endif
  2319. if (!(flags & EVBACKEND_MASK))
  2320. flags |= ev_recommended_backends ();
  2321. #if EV_USE_IOCP
  2322. if (!backend && (flags & EVBACKEND_IOCP )) backend = iocp_init (EV_A_ flags);
  2323. #endif
  2324. #if EV_USE_PORT
  2325. if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
  2326. #endif
  2327. #if EV_USE_KQUEUE
  2328. if (!backend && (flags & EVBACKEND_KQUEUE)) backend = kqueue_init (EV_A_ flags);
  2329. #endif
  2330. #if EV_USE_EPOLL
  2331. if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags);
  2332. #endif
  2333. #if EV_USE_POLL
  2334. if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags);
  2335. #endif
  2336. #if EV_USE_SELECT
  2337. if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init (EV_A_ flags);
  2338. #endif
  2339. ev_prepare_init (&pending_w, pendingcb);
  2340. #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
  2341. ev_init (&pipe_w, pipecb);
  2342. ev_set_priority (&pipe_w, EV_MAXPRI);
  2343. #endif
  2344. }
  2345. }
  2346. /* free up a loop structure */
  2347. void ecb_cold
  2348. ev_loop_destroy (EV_P)
  2349. {
  2350. int i;
  2351. #if EV_MULTIPLICITY
  2352. /* mimic free (0) */
  2353. if (!EV_A)
  2354. return;
  2355. #endif
  2356. #if EV_CLEANUP_ENABLE
  2357. /* queue cleanup watchers (and execute them) */
  2358. if (expect_false (cleanupcnt))
  2359. {
  2360. queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP);
  2361. EV_INVOKE_PENDING;
  2362. }
  2363. #endif
  2364. #if EV_CHILD_ENABLE
  2365. if (ev_is_default_loop (EV_A) && ev_is_active (&childev))
  2366. {
  2367. ev_ref (EV_A); /* child watcher */
  2368. ev_signal_stop (EV_A_ &childev);
  2369. }
  2370. #endif
  2371. if (ev_is_active (&pipe_w))
  2372. {
  2373. /*ev_ref (EV_A);*/
  2374. /*ev_io_stop (EV_A_ &pipe_w);*/
  2375. if (evpipe [0] >= 0) EV_WIN32_CLOSE_FD (evpipe [0]);
  2376. if (evpipe [1] >= 0) EV_WIN32_CLOSE_FD (evpipe [1]);
  2377. }
  2378. #if EV_USE_SIGNALFD
  2379. if (ev_is_active (&sigfd_w))
  2380. close (sigfd);
  2381. #endif
  2382. #if EV_USE_INOTIFY
  2383. if (fs_fd >= 0)
  2384. close (fs_fd);
  2385. #endif
  2386. if (backend_fd >= 0)
  2387. close (backend_fd);
  2388. #if EV_USE_IOCP
  2389. if (backend == EVBACKEND_IOCP ) iocp_destroy (EV_A);
  2390. #endif
  2391. #if EV_USE_PORT
  2392. if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
  2393. #endif
  2394. #if EV_USE_KQUEUE
  2395. if (backend == EVBACKEND_KQUEUE) kqueue_destroy (EV_A);
  2396. #endif
  2397. #if EV_USE_EPOLL
  2398. if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A);
  2399. #endif
  2400. #if EV_USE_POLL
  2401. if (backend == EVBACKEND_POLL ) poll_destroy (EV_A);
  2402. #endif
  2403. #if EV_USE_SELECT
  2404. if (backend == EVBACKEND_SELECT) select_destroy (EV_A);
  2405. #endif
  2406. for (i = NUMPRI; i--; )
  2407. {
  2408. array_free (pending, [i]);
  2409. #if EV_IDLE_ENABLE
  2410. array_free (idle, [i]);
  2411. #endif
  2412. }
  2413. ev_free (anfds); anfds = 0; anfdmax = 0;
  2414. /* have to use the microsoft-never-gets-it-right macro */
  2415. array_free (rfeed, EMPTY);
  2416. array_free (fdchange, EMPTY);
  2417. array_free (timer, EMPTY);
  2418. #if EV_PERIODIC_ENABLE
  2419. array_free (periodic, EMPTY);
  2420. #endif
  2421. #if EV_FORK_ENABLE
  2422. array_free (fork, EMPTY);
  2423. #endif
  2424. #if EV_CLEANUP_ENABLE
  2425. array_free (cleanup, EMPTY);
  2426. #endif
  2427. array_free (prepare, EMPTY);
  2428. array_free (check, EMPTY);
  2429. #if EV_ASYNC_ENABLE
  2430. array_free (async, EMPTY);
  2431. #endif
  2432. backend = 0;
  2433. #if EV_MULTIPLICITY
  2434. if (ev_is_default_loop (EV_A))
  2435. #endif
  2436. ev_default_loop_ptr = 0;
  2437. #if EV_MULTIPLICITY
  2438. else
  2439. ev_free (EV_A);
  2440. #endif
  2441. }
  2442. #if EV_USE_INOTIFY
  2443. inline_size void infy_fork (EV_P);
  2444. #endif
  2445. inline_size void
  2446. loop_fork (EV_P)
  2447. {
  2448. #if EV_USE_PORT
  2449. if (backend == EVBACKEND_PORT ) port_fork (EV_A);
  2450. #endif
  2451. #if EV_USE_KQUEUE
  2452. if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A);
  2453. #endif
  2454. #if EV_USE_EPOLL
  2455. if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
  2456. #endif
  2457. #if EV_USE_INOTIFY
  2458. infy_fork (EV_A);
  2459. #endif
  2460. #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
  2461. if (ev_is_active (&pipe_w))
  2462. {
  2463. /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
  2464. ev_ref (EV_A);
  2465. ev_io_stop (EV_A_ &pipe_w);
  2466. if (evpipe [0] >= 0)
  2467. EV_WIN32_CLOSE_FD (evpipe [0]);
  2468. evpipe_init (EV_A);
  2469. /* iterate over everything, in case we missed something before */
  2470. ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
  2471. }
  2472. #endif
  2473. postfork = 0;
  2474. }
  2475. #if EV_MULTIPLICITY
  2476. struct ev_loop * ecb_cold
  2477. ev_loop_new (unsigned int flags) EV_THROW
  2478. {
  2479. EV_P = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop));
  2480. memset (EV_A, 0, sizeof (struct ev_loop));
  2481. loop_init (EV_A_ flags);
  2482. if (ev_backend (EV_A))
  2483. return EV_A;
  2484. ev_free (EV_A);
  2485. return 0;
  2486. }
  2487. #endif /* multiplicity */
  2488. #if EV_VERIFY
  2489. static void noinline ecb_cold
  2490. verify_watcher (EV_P_ W w)
  2491. {
  2492. assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI));
  2493. if (w->pending)
  2494. assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
  2495. }
  2496. static void noinline ecb_cold
  2497. verify_heap (EV_P_ ANHE *heap, int N)
  2498. {
  2499. int i;
  2500. for (i = HEAP0; i < N + HEAP0; ++i)
  2501. {
  2502. assert (("libev: active index mismatch in heap", ev_active (ANHE_w (heap [i])) == i));
  2503. assert (("libev: heap condition violated", i == HEAP0 || ANHE_at (heap [HPARENT (i)]) <= ANHE_at (heap [i])));
  2504. assert (("libev: heap at cache mismatch", ANHE_at (heap [i]) == ev_at (ANHE_w (heap [i]))));
  2505. verify_watcher (EV_A_ (W)ANHE_w (heap [i]));
  2506. }
  2507. }
  2508. static void noinline ecb_cold
  2509. array_verify (EV_P_ W *ws, int cnt)
  2510. {
  2511. while (cnt--)
  2512. {
  2513. assert (("libev: active index mismatch", ev_active (ws [cnt]) == cnt + 1));
  2514. verify_watcher (EV_A_ ws [cnt]);
  2515. }
  2516. }
  2517. #endif
  2518. #if EV_FEATURE_API
  2519. void ecb_cold
  2520. ev_verify (EV_P) EV_THROW
  2521. {
  2522. #if EV_VERIFY
  2523. int i;
  2524. WL w, w2;
  2525. assert (activecnt >= -1);
  2526. assert (fdchangemax >= fdchangecnt);
  2527. for (i = 0; i < fdchangecnt; ++i)
  2528. assert (("libev: negative fd in fdchanges", fdchanges [i] >= 0));
  2529. assert (anfdmax >= 0);
  2530. for (i = 0; i < anfdmax; ++i)
  2531. {
  2532. int j = 0;
  2533. for (w = w2 = anfds [i].head; w; w = w->next)
  2534. {
  2535. verify_watcher (EV_A_ (W)w);
  2536. if (j++ & 1)
  2537. {
  2538. assert (("libev: io watcher list contains a loop", w != w2));
  2539. w2 = w2->next;
  2540. }
  2541. assert (("libev: inactive fd watcher on anfd list", ev_active (w) == 1));
  2542. assert (("libev: fd mismatch between watcher and anfd", ((ev_io *)w)->fd == i));
  2543. }
  2544. }
  2545. assert (timermax >= timercnt);
  2546. verify_heap (EV_A_ timers, timercnt);
  2547. #if EV_PERIODIC_ENABLE
  2548. assert (periodicmax >= periodiccnt);
  2549. verify_heap (EV_A_ periodics, periodiccnt);
  2550. #endif
  2551. for (i = NUMPRI; i--; )
  2552. {
  2553. assert (pendingmax [i] >= pendingcnt [i]);
  2554. #if EV_IDLE_ENABLE
  2555. assert (idleall >= 0);
  2556. assert (idlemax [i] >= idlecnt [i]);
  2557. array_verify (EV_A_ (W *)idles [i], idlecnt [i]);
  2558. #endif
  2559. }
  2560. #if EV_FORK_ENABLE
  2561. assert (forkmax >= forkcnt);
  2562. array_verify (EV_A_ (W *)forks, forkcnt);
  2563. #endif
  2564. #if EV_CLEANUP_ENABLE
  2565. assert (cleanupmax >= cleanupcnt);
  2566. array_verify (EV_A_ (W *)cleanups, cleanupcnt);
  2567. #endif
  2568. #if EV_ASYNC_ENABLE
  2569. assert (asyncmax >= asynccnt);
  2570. array_verify (EV_A_ (W *)asyncs, asynccnt);
  2571. #endif
  2572. #if EV_PREPARE_ENABLE
  2573. assert (preparemax >= preparecnt);
  2574. array_verify (EV_A_ (W *)prepares, preparecnt);
  2575. #endif
  2576. #if EV_CHECK_ENABLE
  2577. assert (checkmax >= checkcnt);
  2578. array_verify (EV_A_ (W *)checks, checkcnt);
  2579. #endif
  2580. # if 0
  2581. #if EV_CHILD_ENABLE
  2582. for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next)
  2583. for (signum = EV_NSIG; signum--; ) if (signals [signum].pending)
  2584. #endif
  2585. # endif
  2586. #endif
  2587. }
  2588. #endif
  2589. #if EV_MULTIPLICITY
  2590. struct ev_loop * ecb_cold
  2591. #else
  2592. int
  2593. #endif
  2594. ev_default_loop (unsigned int flags) EV_THROW
  2595. {
  2596. if (!ev_default_loop_ptr)
  2597. {
  2598. #if EV_MULTIPLICITY
  2599. EV_P = ev_default_loop_ptr = &default_loop_struct;
  2600. #else
  2601. ev_default_loop_ptr = 1;
  2602. #endif
  2603. loop_init (EV_A_ flags);
  2604. if (ev_backend (EV_A))
  2605. {
  2606. #if EV_CHILD_ENABLE
  2607. ev_signal_init (&childev, childcb, SIGCHLD);
  2608. ev_set_priority (&childev, EV_MAXPRI);
  2609. ev_signal_start (EV_A_ &childev);
  2610. ev_unref (EV_A); /* child watcher should not keep loop alive */
  2611. #endif
  2612. }
  2613. else
  2614. ev_default_loop_ptr = 0;
  2615. }
  2616. return ev_default_loop_ptr;
  2617. }
  2618. void
  2619. ev_loop_fork (EV_P) EV_THROW
  2620. {
  2621. postfork = 1;
  2622. }
  2623. /*****************************************************************************/
  2624. void
  2625. ev_invoke (EV_P_ void *w, int revents)
  2626. {
  2627. EV_CB_INVOKE ((W)w, revents);
  2628. }
  2629. unsigned int
  2630. ev_pending_count (EV_P) EV_THROW
  2631. {
  2632. int pri;
  2633. unsigned int count = 0;
  2634. for (pri = NUMPRI; pri--; )
  2635. count += pendingcnt [pri];
  2636. return count;
  2637. }
  2638. void noinline
  2639. ev_invoke_pending (EV_P)
  2640. {
  2641. pendingpri = NUMPRI;
  2642. while (pendingpri) /* pendingpri possibly gets modified in the inner loop */
  2643. {
  2644. --pendingpri;
  2645. while (pendingcnt [pendingpri])
  2646. {
  2647. ANPENDING *p = pendings [pendingpri] + --pendingcnt [pendingpri];
  2648. p->w->pending = 0;
  2649. EV_CB_INVOKE (p->w, p->events);
  2650. EV_FREQUENT_CHECK;
  2651. }
  2652. }
  2653. }
  2654. #if EV_IDLE_ENABLE
  2655. /* make idle watchers pending. this handles the "call-idle */
  2656. /* only when higher priorities are idle" logic */
  2657. inline_size void
  2658. idle_reify (EV_P)
  2659. {
  2660. if (expect_false (idleall))
  2661. {
  2662. int pri;
  2663. for (pri = NUMPRI; pri--; )
  2664. {
  2665. if (pendingcnt [pri])
  2666. break;
  2667. if (idlecnt [pri])
  2668. {
  2669. queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE);
  2670. break;
  2671. }
  2672. }
  2673. }
  2674. }
  2675. #endif
  2676. /* make timers pending */
  2677. inline_size void
  2678. timers_reify (EV_P)
  2679. {
  2680. EV_FREQUENT_CHECK;
  2681. if (timercnt && ANHE_at (timers [HEAP0]) < mn_now)
  2682. {
  2683. do
  2684. {
  2685. ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]);
  2686. /*assert (("libev: inactive timer on timer heap detected", ev_is_active (w)));*/
  2687. /* first reschedule or stop timer */
  2688. if (w->repeat)
  2689. {
  2690. ev_at (w) += w->repeat;
  2691. if (ev_at (w) < mn_now)
  2692. ev_at (w) = mn_now;
  2693. assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.));
  2694. ANHE_at_cache (timers [HEAP0]);
  2695. downheap (timers, timercnt, HEAP0);
  2696. }
  2697. else
  2698. ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
  2699. EV_FREQUENT_CHECK;
  2700. feed_reverse (EV_A_ (W)w);
  2701. }
  2702. while (timercnt && ANHE_at (timers [HEAP0]) < mn_now);
  2703. feed_reverse_done (EV_A_ EV_TIMER);
  2704. }
  2705. }
  2706. #if EV_PERIODIC_ENABLE
  2707. static void noinline
  2708. periodic_recalc (EV_P_ ev_periodic *w)
  2709. {
  2710. ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL;
  2711. ev_tstamp at = w->offset + interval * ev_floor ((ev_rt_now - w->offset) / interval);
  2712. /* the above almost always errs on the low side */
  2713. while (at <= ev_rt_now)
  2714. {
  2715. ev_tstamp nat = at + w->interval;
  2716. /* when resolution fails us, we use ev_rt_now */
  2717. if (expect_false (nat == at))
  2718. {
  2719. at = ev_rt_now;
  2720. break;
  2721. }
  2722. at = nat;
  2723. }
  2724. ev_at (w) = at;
  2725. }
  2726. /* make periodics pending */
  2727. inline_size void
  2728. periodics_reify (EV_P)
  2729. {
  2730. EV_FREQUENT_CHECK;
  2731. while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now)
  2732. {
  2733. do
  2734. {
  2735. ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]);
  2736. /*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/
  2737. /* first reschedule or stop timer */
  2738. if (w->reschedule_cb)
  2739. {
  2740. ev_at (w) = w->reschedule_cb (w, ev_rt_now);
  2741. assert (("libev: ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now));
  2742. ANHE_at_cache (periodics [HEAP0]);
  2743. downheap (periodics, periodiccnt, HEAP0);
  2744. }
  2745. else if (w->interval)
  2746. {
  2747. periodic_recalc (EV_A_ w);
  2748. ANHE_at_cache (periodics [HEAP0]);
  2749. downheap (periodics, periodiccnt, HEAP0);
  2750. }
  2751. else
  2752. ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
  2753. EV_FREQUENT_CHECK;
  2754. feed_reverse (EV_A_ (W)w);
  2755. }
  2756. while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now);
  2757. feed_reverse_done (EV_A_ EV_PERIODIC);
  2758. }
  2759. }
  2760. /* simply recalculate all periodics */
  2761. /* TODO: maybe ensure that at least one event happens when jumping forward? */
  2762. static void noinline ecb_cold
  2763. periodics_reschedule (EV_P)
  2764. {
  2765. int i;
  2766. /* adjust periodics after time jump */
  2767. for (i = HEAP0; i < periodiccnt + HEAP0; ++i)
  2768. {
  2769. ev_periodic *w = (ev_periodic *)ANHE_w (periodics [i]);
  2770. if (w->reschedule_cb)
  2771. ev_at (w) = w->reschedule_cb (w, ev_rt_now);
  2772. else if (w->interval)
  2773. periodic_recalc (EV_A_ w);
  2774. ANHE_at_cache (periodics [i]);
  2775. }
  2776. reheap (periodics, periodiccnt);
  2777. }
  2778. #endif
  2779. /* adjust all timers by a given offset */
  2780. static void noinline ecb_cold
  2781. timers_reschedule (EV_P_ ev_tstamp adjust)
  2782. {
  2783. int i;
  2784. for (i = 0; i < timercnt; ++i)
  2785. {
  2786. ANHE *he = timers + i + HEAP0;
  2787. ANHE_w (*he)->at += adjust;
  2788. ANHE_at_cache (*he);
  2789. }
  2790. }
  2791. /* fetch new monotonic and realtime times from the kernel */
  2792. /* also detect if there was a timejump, and act accordingly */
  2793. inline_speed void
  2794. time_update (EV_P_ ev_tstamp max_block)
  2795. {
  2796. #if EV_USE_MONOTONIC
  2797. if (expect_true (have_monotonic))
  2798. {
  2799. int i;
  2800. ev_tstamp odiff = rtmn_diff;
  2801. mn_now = get_clock ();
  2802. /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
  2803. /* interpolate in the meantime */
  2804. if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
  2805. {
  2806. ev_rt_now = rtmn_diff + mn_now;
  2807. return;
  2808. }
  2809. now_floor = mn_now;
  2810. ev_rt_now = ev_time ();
  2811. /* loop a few times, before making important decisions.
  2812. * on the choice of "4": one iteration isn't enough,
  2813. * in case we get preempted during the calls to
  2814. * ev_time and get_clock. a second call is almost guaranteed
  2815. * to succeed in that case, though. and looping a few more times
  2816. * doesn't hurt either as we only do this on time-jumps or
  2817. * in the unlikely event of having been preempted here.
  2818. */
  2819. for (i = 4; --i; )
  2820. {
  2821. ev_tstamp diff;
  2822. rtmn_diff = ev_rt_now - mn_now;
  2823. diff = odiff - rtmn_diff;
  2824. if (expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP))
  2825. return; /* all is well */
  2826. ev_rt_now = ev_time ();
  2827. mn_now = get_clock ();
  2828. now_floor = mn_now;
  2829. }
  2830. /* no timer adjustment, as the monotonic clock doesn't jump */
  2831. /* timers_reschedule (EV_A_ rtmn_diff - odiff) */
  2832. # if EV_PERIODIC_ENABLE
  2833. periodics_reschedule (EV_A);
  2834. # endif
  2835. }
  2836. else
  2837. #endif
  2838. {
  2839. ev_rt_now = ev_time ();
  2840. if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP))
  2841. {
  2842. /* adjust timers. this is easy, as the offset is the same for all of them */
  2843. timers_reschedule (EV_A_ ev_rt_now - mn_now);
  2844. #if EV_PERIODIC_ENABLE
  2845. periodics_reschedule (EV_A);
  2846. #endif
  2847. }
  2848. mn_now = ev_rt_now;
  2849. }
  2850. }
  2851. int
  2852. ev_run (EV_P_ int flags)
  2853. {
  2854. #if EV_FEATURE_API
  2855. ++loop_depth;
  2856. #endif
  2857. assert (("libev: ev_loop recursion during release detected", loop_done != EVBREAK_RECURSE));
  2858. loop_done = EVBREAK_CANCEL;
  2859. EV_INVOKE_PENDING; /* in case we recurse, ensure ordering stays nice and clean */
  2860. do
  2861. {
  2862. #if EV_VERIFY >= 2
  2863. ev_verify (EV_A);
  2864. #endif
  2865. #ifndef _WIN32
  2866. if (expect_false (curpid)) /* penalise the forking check even more */
  2867. if (expect_false (getpid () != curpid))
  2868. {
  2869. curpid = getpid ();
  2870. postfork = 1;
  2871. }
  2872. #endif
  2873. #if EV_FORK_ENABLE
  2874. /* we might have forked, so queue fork handlers */
  2875. if (expect_false (postfork))
  2876. if (forkcnt)
  2877. {
  2878. queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
  2879. EV_INVOKE_PENDING;
  2880. }
  2881. #endif
  2882. #if EV_PREPARE_ENABLE
  2883. /* queue prepare watchers (and execute them) */
  2884. if (expect_false (preparecnt))
  2885. {
  2886. queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
  2887. EV_INVOKE_PENDING;
  2888. }
  2889. #endif
  2890. if (expect_false (loop_done))
  2891. break;
  2892. /* we might have forked, so reify kernel state if necessary */
  2893. if (expect_false (postfork))
  2894. loop_fork (EV_A);
  2895. /* update fd-related kernel structures */
  2896. fd_reify (EV_A);
  2897. /* calculate blocking time */
  2898. {
  2899. ev_tstamp waittime = 0.;
  2900. ev_tstamp sleeptime = 0.;
  2901. /* remember old timestamp for io_blocktime calculation */
  2902. ev_tstamp prev_mn_now = mn_now;
  2903. /* update time to cancel out callback processing overhead */
  2904. time_update (EV_A_ 1e100);
  2905. /* from now on, we want a pipe-wake-up */
  2906. pipe_write_wanted = 1;
  2907. ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
  2908. if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
  2909. {
  2910. waittime = MAX_BLOCKTIME;
  2911. if (timercnt)
  2912. {
  2913. ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now;
  2914. if (waittime > to) waittime = to;
  2915. }
  2916. #if EV_PERIODIC_ENABLE
  2917. if (periodiccnt)
  2918. {
  2919. ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now;
  2920. if (waittime > to) waittime = to;
  2921. }
  2922. #endif
  2923. /* don't let timeouts decrease the waittime below timeout_blocktime */
  2924. if (expect_false (waittime < timeout_blocktime))
  2925. waittime = timeout_blocktime;
  2926. /* at this point, we NEED to wait, so we have to ensure */
  2927. /* to pass a minimum nonzero value to the backend */
  2928. if (expect_false (waittime < backend_mintime))
  2929. waittime = backend_mintime;
  2930. /* extra check because io_blocktime is commonly 0 */
  2931. if (expect_false (io_blocktime))
  2932. {
  2933. sleeptime = io_blocktime - (mn_now - prev_mn_now);
  2934. if (sleeptime > waittime - backend_mintime)
  2935. sleeptime = waittime - backend_mintime;
  2936. if (expect_true (sleeptime > 0.))
  2937. {
  2938. ev_sleep (sleeptime);
  2939. waittime -= sleeptime;
  2940. }
  2941. }
  2942. }
  2943. #if EV_FEATURE_API
  2944. ++loop_count;
  2945. #endif
  2946. assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */
  2947. backend_poll (EV_A_ waittime);
  2948. assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */
  2949. pipe_write_wanted = 0; /* just an optimisation, no fence needed */
  2950. ECB_MEMORY_FENCE_ACQUIRE;
  2951. if (pipe_write_skipped)
  2952. {
  2953. assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w)));
  2954. ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
  2955. }
  2956. /* update ev_rt_now, do magic */
  2957. time_update (EV_A_ waittime + sleeptime);
  2958. }
  2959. /* queue pending timers and reschedule them */
  2960. timers_reify (EV_A); /* relative timers called last */
  2961. #if EV_PERIODIC_ENABLE
  2962. periodics_reify (EV_A); /* absolute timers called first */
  2963. #endif
  2964. #if EV_IDLE_ENABLE
  2965. /* queue idle watchers unless other events are pending */
  2966. idle_reify (EV_A);
  2967. #endif
  2968. #if EV_CHECK_ENABLE
  2969. /* queue check watchers, to be executed first */
  2970. if (expect_false (checkcnt))
  2971. queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
  2972. #endif
  2973. EV_INVOKE_PENDING;
  2974. }
  2975. while (expect_true (
  2976. activecnt
  2977. && !loop_done
  2978. && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT))
  2979. ));
  2980. if (loop_done == EVBREAK_ONE)
  2981. loop_done = EVBREAK_CANCEL;
  2982. #if EV_FEATURE_API
  2983. --loop_depth;
  2984. #endif
  2985. return activecnt;
  2986. }
  2987. void
  2988. ev_break (EV_P_ int how) EV_THROW
  2989. {
  2990. loop_done = how;
  2991. }
  2992. void
  2993. ev_ref (EV_P) EV_THROW
  2994. {
  2995. ++activecnt;
  2996. }
  2997. void
  2998. ev_unref (EV_P) EV_THROW
  2999. {
  3000. --activecnt;
  3001. }
  3002. void
  3003. ev_now_update (EV_P) EV_THROW
  3004. {
  3005. time_update (EV_A_ 1e100);
  3006. }
  3007. void
  3008. ev_suspend (EV_P) EV_THROW
  3009. {
  3010. ev_now_update (EV_A);
  3011. }
  3012. void
  3013. ev_resume (EV_P) EV_THROW
  3014. {
  3015. ev_tstamp mn_prev = mn_now;
  3016. ev_now_update (EV_A);
  3017. timers_reschedule (EV_A_ mn_now - mn_prev);
  3018. #if EV_PERIODIC_ENABLE
  3019. /* TODO: really do this? */
  3020. periodics_reschedule (EV_A);
  3021. #endif
  3022. }
  3023. /*****************************************************************************/
  3024. /* singly-linked list management, used when the expected list length is short */
  3025. inline_size void
  3026. wlist_add (WL *head, WL elem)
  3027. {
  3028. elem->next = *head;
  3029. *head = elem;
  3030. }
  3031. inline_size void
  3032. wlist_del (WL *head, WL elem)
  3033. {
  3034. while (*head)
  3035. {
  3036. if (expect_true (*head == elem))
  3037. {
  3038. *head = elem->next;
  3039. break;
  3040. }
  3041. head = &(*head)->next;
  3042. }
  3043. }
  3044. /* internal, faster, version of ev_clear_pending */
  3045. inline_speed void
  3046. clear_pending (EV_P_ W w)
  3047. {
  3048. if (w->pending)
  3049. {
  3050. pendings [ABSPRI (w)][w->pending - 1].w = (W)&pending_w;
  3051. w->pending = 0;
  3052. }
  3053. }
  3054. int
  3055. ev_clear_pending (EV_P_ void *w) EV_THROW
  3056. {
  3057. W w_ = (W)w;
  3058. int pending = w_->pending;
  3059. if (expect_true (pending))
  3060. {
  3061. ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
  3062. p->w = (W)&pending_w;
  3063. w_->pending = 0;
  3064. return p->events;
  3065. }
  3066. else
  3067. return 0;
  3068. }
  3069. inline_size void
  3070. pri_adjust (EV_P_ W w)
  3071. {
  3072. int pri = ev_priority (w);
  3073. pri = pri < EV_MINPRI ? EV_MINPRI : pri;
  3074. pri = pri > EV_MAXPRI ? EV_MAXPRI : pri;
  3075. ev_set_priority (w, pri);
  3076. }
  3077. inline_speed void
  3078. ev_start (EV_P_ W w, int active)
  3079. {
  3080. pri_adjust (EV_A_ w);
  3081. w->active = active;
  3082. ev_ref (EV_A);
  3083. }
  3084. inline_size void
  3085. ev_stop (EV_P_ W w)
  3086. {
  3087. ev_unref (EV_A);
  3088. w->active = 0;
  3089. }
  3090. /*****************************************************************************/
  3091. void noinline
  3092. ev_io_start (EV_P_ ev_io *w) EV_THROW
  3093. {
  3094. int fd = w->fd;
  3095. if (expect_false (ev_is_active (w)))
  3096. return;
  3097. assert (("libev: ev_io_start called with negative fd", fd >= 0));
  3098. assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE))));
  3099. EV_FREQUENT_CHECK;
  3100. ev_start (EV_A_ (W)w, 1);
  3101. array_needsize (ANFD, anfds, anfdmax, fd + 1, array_init_zero);
  3102. wlist_add (&anfds[fd].head, (WL)w);
  3103. /* common bug, apparently */
  3104. assert (("libev: ev_io_start called with corrupted watcher", ((WL)w)->next != (WL)w));
  3105. fd_change (EV_A_ fd, w->events & EV__IOFDSET | EV_ANFD_REIFY);
  3106. w->events &= ~EV__IOFDSET;
  3107. EV_FREQUENT_CHECK;
  3108. }
  3109. void noinline
  3110. ev_io_stop (EV_P_ ev_io *w) EV_THROW
  3111. {
  3112. clear_pending (EV_A_ (W)w);
  3113. if (expect_false (!ev_is_active (w)))
  3114. return;
  3115. assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
  3116. EV_FREQUENT_CHECK;
  3117. wlist_del (&anfds[w->fd].head, (WL)w);
  3118. ev_stop (EV_A_ (W)w);
  3119. fd_change (EV_A_ w->fd, EV_ANFD_REIFY);
  3120. EV_FREQUENT_CHECK;
  3121. }
  3122. void noinline
  3123. ev_timer_start (EV_P_ ev_timer *w) EV_THROW
  3124. {
  3125. if (expect_false (ev_is_active (w)))
  3126. return;
  3127. ev_at (w) += mn_now;
  3128. assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
  3129. EV_FREQUENT_CHECK;
  3130. ++timercnt;
  3131. ev_start (EV_A_ (W)w, timercnt + HEAP0 - 1);
  3132. array_needsize (ANHE, timers, timermax, ev_active (w) + 1, EMPTY2);
  3133. ANHE_w (timers [ev_active (w)]) = (WT)w;
  3134. ANHE_at_cache (timers [ev_active (w)]);
  3135. upheap (timers, ev_active (w));
  3136. EV_FREQUENT_CHECK;
  3137. /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
  3138. }
  3139. void noinline
  3140. ev_timer_stop (EV_P_ ev_timer *w) EV_THROW
  3141. {
  3142. clear_pending (EV_A_ (W)w);
  3143. if (expect_false (!ev_is_active (w)))
  3144. return;
  3145. EV_FREQUENT_CHECK;
  3146. {
  3147. int active = ev_active (w);
  3148. assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w));
  3149. --timercnt;
  3150. if (expect_true (active < timercnt + HEAP0))
  3151. {
  3152. timers [active] = timers [timercnt + HEAP0];
  3153. adjustheap (timers, timercnt, active);
  3154. }
  3155. }
  3156. ev_at (w) -= mn_now;
  3157. ev_stop (EV_A_ (W)w);
  3158. EV_FREQUENT_CHECK;
  3159. }
  3160. void noinline
  3161. ev_timer_again (EV_P_ ev_timer *w) EV_THROW
  3162. {
  3163. EV_FREQUENT_CHECK;
  3164. clear_pending (EV_A_ (W)w);
  3165. if (ev_is_active (w))
  3166. {
  3167. if (w->repeat)
  3168. {
  3169. ev_at (w) = mn_now + w->repeat;
  3170. ANHE_at_cache (timers [ev_active (w)]);
  3171. adjustheap (timers, timercnt, ev_active (w));
  3172. }
  3173. else
  3174. ev_timer_stop (EV_A_ w);
  3175. }
  3176. else if (w->repeat)
  3177. {
  3178. ev_at (w) = w->repeat;
  3179. ev_timer_start (EV_A_ w);
  3180. }
  3181. EV_FREQUENT_CHECK;
  3182. }
  3183. ev_tstamp
  3184. ev_timer_remaining (EV_P_ ev_timer *w) EV_THROW
  3185. {
  3186. return ev_at (w) - (ev_is_active (w) ? mn_now : 0.);
  3187. }
  3188. #if EV_PERIODIC_ENABLE
  3189. void noinline
  3190. ev_periodic_start (EV_P_ ev_periodic *w) EV_THROW
  3191. {
  3192. if (expect_false (ev_is_active (w)))
  3193. return;
  3194. if (w->reschedule_cb)
  3195. ev_at (w) = w->reschedule_cb (w, ev_rt_now);
  3196. else if (w->interval)
  3197. {
  3198. assert (("libev: ev_periodic_start called with negative interval value", w->interval >= 0.));
  3199. periodic_recalc (EV_A_ w);
  3200. }
  3201. else
  3202. ev_at (w) = w->offset;
  3203. EV_FREQUENT_CHECK;
  3204. ++periodiccnt;
  3205. ev_start (EV_A_ (W)w, periodiccnt + HEAP0 - 1);
  3206. array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, EMPTY2);
  3207. ANHE_w (periodics [ev_active (w)]) = (WT)w;
  3208. ANHE_at_cache (periodics [ev_active (w)]);
  3209. upheap (periodics, ev_active (w));
  3210. EV_FREQUENT_CHECK;
  3211. /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
  3212. }
  3213. void noinline
  3214. ev_periodic_stop (EV_P_ ev_periodic *w) EV_THROW
  3215. {
  3216. clear_pending (EV_A_ (W)w);
  3217. if (expect_false (!ev_is_active (w)))
  3218. return;
  3219. EV_FREQUENT_CHECK;
  3220. {
  3221. int active = ev_active (w);
  3222. assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w));
  3223. --periodiccnt;
  3224. if (expect_true (active < periodiccnt + HEAP0))
  3225. {
  3226. periodics [active] = periodics [periodiccnt + HEAP0];
  3227. adjustheap (periodics, periodiccnt, active);
  3228. }
  3229. }
  3230. ev_stop (EV_A_ (W)w);
  3231. EV_FREQUENT_CHECK;
  3232. }
  3233. void noinline
  3234. ev_periodic_again (EV_P_ ev_periodic *w) EV_THROW
  3235. {
  3236. /* TODO: use adjustheap and recalculation */
  3237. ev_periodic_stop (EV_A_ w);
  3238. ev_periodic_start (EV_A_ w);
  3239. }
  3240. #endif
  3241. #ifndef SA_RESTART
  3242. # define SA_RESTART 0
  3243. #endif
  3244. #if EV_SIGNAL_ENABLE
  3245. void noinline
  3246. ev_signal_start (EV_P_ ev_signal *w) EV_THROW
  3247. {
  3248. if (expect_false (ev_is_active (w)))
  3249. return;
  3250. assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG));
  3251. #if EV_MULTIPLICITY
  3252. assert (("libev: a signal must not be attached to two different loops",
  3253. !signals [w->signum - 1].loop || signals [w->signum - 1].loop == loop));
  3254. signals [w->signum - 1].loop = EV_A;
  3255. ECB_MEMORY_FENCE_RELEASE;
  3256. #endif
  3257. EV_FREQUENT_CHECK;
  3258. #if EV_USE_SIGNALFD
  3259. if (sigfd == -2)
  3260. {
  3261. sigfd = signalfd (-1, &sigfd_set, SFD_NONBLOCK | SFD_CLOEXEC);
  3262. if (sigfd < 0 && errno == EINVAL)
  3263. sigfd = signalfd (-1, &sigfd_set, 0); /* retry without flags */
  3264. if (sigfd >= 0)
  3265. {
  3266. fd_intern (sigfd); /* doing it twice will not hurt */
  3267. sigemptyset (&sigfd_set);
  3268. ev_io_init (&sigfd_w, sigfdcb, sigfd, EV_READ);
  3269. ev_set_priority (&sigfd_w, EV_MAXPRI);
  3270. ev_io_start (EV_A_ &sigfd_w);
  3271. ev_unref (EV_A); /* signalfd watcher should not keep loop alive */
  3272. }
  3273. }
  3274. if (sigfd >= 0)
  3275. {
  3276. /* TODO: check .head */
  3277. sigaddset (&sigfd_set, w->signum);
  3278. sigprocmask (SIG_BLOCK, &sigfd_set, 0);
  3279. signalfd (sigfd, &sigfd_set, 0);
  3280. }
  3281. #endif
  3282. ev_start (EV_A_ (W)w, 1);
  3283. wlist_add (&signals [w->signum - 1].head, (WL)w);
  3284. if (!((WL)w)->next)
  3285. # if EV_USE_SIGNALFD
  3286. if (sigfd < 0) /*TODO*/
  3287. # endif
  3288. {
  3289. # ifdef _WIN32
  3290. evpipe_init (EV_A);
  3291. signal (w->signum, ev_sighandler);
  3292. # else
  3293. struct sigaction sa;
  3294. evpipe_init (EV_A);
  3295. sa.sa_handler = ev_sighandler;
  3296. sigfillset (&sa.sa_mask);
  3297. sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */
  3298. sigaction (w->signum, &sa, 0);
  3299. if (origflags & EVFLAG_NOSIGMASK)
  3300. {
  3301. sigemptyset (&sa.sa_mask);
  3302. sigaddset (&sa.sa_mask, w->signum);
  3303. sigprocmask (SIG_UNBLOCK, &sa.sa_mask, 0);
  3304. }
  3305. #endif
  3306. }
  3307. EV_FREQUENT_CHECK;
  3308. }
  3309. void noinline
  3310. ev_signal_stop (EV_P_ ev_signal *w) EV_THROW
  3311. {
  3312. clear_pending (EV_A_ (W)w);
  3313. if (expect_false (!ev_is_active (w)))
  3314. return;
  3315. EV_FREQUENT_CHECK;
  3316. wlist_del (&signals [w->signum - 1].head, (WL)w);
  3317. ev_stop (EV_A_ (W)w);
  3318. if (!signals [w->signum - 1].head)
  3319. {
  3320. #if EV_MULTIPLICITY
  3321. signals [w->signum - 1].loop = 0; /* unattach from signal */
  3322. #endif
  3323. #if EV_USE_SIGNALFD
  3324. if (sigfd >= 0)
  3325. {
  3326. sigset_t ss;
  3327. sigemptyset (&ss);
  3328. sigaddset (&ss, w->signum);
  3329. sigdelset (&sigfd_set, w->signum);
  3330. signalfd (sigfd, &sigfd_set, 0);
  3331. sigprocmask (SIG_UNBLOCK, &ss, 0);
  3332. }
  3333. else
  3334. #endif
  3335. signal (w->signum, SIG_DFL);
  3336. }
  3337. EV_FREQUENT_CHECK;
  3338. }
  3339. #endif
  3340. #if EV_CHILD_ENABLE
  3341. void
  3342. ev_child_start (EV_P_ ev_child *w) EV_THROW
  3343. {
  3344. #if EV_MULTIPLICITY
  3345. assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
  3346. #endif
  3347. if (expect_false (ev_is_active (w)))
  3348. return;
  3349. EV_FREQUENT_CHECK;
  3350. ev_start (EV_A_ (W)w, 1);
  3351. wlist_add (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w);
  3352. EV_FREQUENT_CHECK;
  3353. }
  3354. void
  3355. ev_child_stop (EV_P_ ev_child *w) EV_THROW
  3356. {
  3357. clear_pending (EV_A_ (W)w);
  3358. if (expect_false (!ev_is_active (w)))
  3359. return;
  3360. EV_FREQUENT_CHECK;
  3361. wlist_del (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w);
  3362. ev_stop (EV_A_ (W)w);
  3363. EV_FREQUENT_CHECK;
  3364. }
  3365. #endif
  3366. #if EV_STAT_ENABLE
  3367. # ifdef _WIN32
  3368. # undef lstat
  3369. # define lstat(a,b) _stati64 (a,b)
  3370. # endif
  3371. #define DEF_STAT_INTERVAL 5.0074891
  3372. #define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */
  3373. #define MIN_STAT_INTERVAL 0.1074891
  3374. static void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents);
  3375. #if EV_USE_INOTIFY
  3376. /* the * 2 is to allow for alignment padding, which for some reason is >> 8 */
  3377. # define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
  3378. static void noinline
  3379. infy_add (EV_P_ ev_stat *w)
  3380. {
  3381. w->wd = inotify_add_watch (fs_fd, w->path,
  3382. IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY
  3383. | IN_CREATE | IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO
  3384. | IN_DONT_FOLLOW | IN_MASK_ADD);
  3385. if (w->wd >= 0)
  3386. {
  3387. struct statfs sfs;
  3388. /* now local changes will be tracked by inotify, but remote changes won't */
  3389. /* unless the filesystem is known to be local, we therefore still poll */
  3390. /* also do poll on <2.6.25, but with normal frequency */
  3391. if (!fs_2625)
  3392. w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
  3393. else if (!statfs (w->path, &sfs)
  3394. && (sfs.f_type == 0x1373 /* devfs */
  3395. || sfs.f_type == 0x4006 /* fat */
  3396. || sfs.f_type == 0x4d44 /* msdos */
  3397. || sfs.f_type == 0xEF53 /* ext2/3 */
  3398. || sfs.f_type == 0x72b6 /* jffs2 */
  3399. || sfs.f_type == 0x858458f6 /* ramfs */
  3400. || sfs.f_type == 0x5346544e /* ntfs */
  3401. || sfs.f_type == 0x3153464a /* jfs */
  3402. || sfs.f_type == 0x9123683e /* btrfs */
  3403. || sfs.f_type == 0x52654973 /* reiser3 */
  3404. || sfs.f_type == 0x01021994 /* tmpfs */
  3405. || sfs.f_type == 0x58465342 /* xfs */))
  3406. w->timer.repeat = 0.; /* filesystem is local, kernel new enough */
  3407. else
  3408. w->timer.repeat = w->interval ? w->interval : NFS_STAT_INTERVAL; /* remote, use reduced frequency */
  3409. }
  3410. else
  3411. {
  3412. /* can't use inotify, continue to stat */
  3413. w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
  3414. /* if path is not there, monitor some parent directory for speedup hints */
  3415. /* note that exceeding the hardcoded path limit is not a correctness issue, */
  3416. /* but an efficiency issue only */
  3417. if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096)
  3418. {
  3419. char path [4096];
  3420. strcpy (path, w->path);
  3421. do
  3422. {
  3423. int mask = IN_MASK_ADD | IN_DELETE_SELF | IN_MOVE_SELF
  3424. | (errno == EACCES ? IN_ATTRIB : IN_CREATE | IN_MOVED_TO);
  3425. char *pend = strrchr (path, '/');
  3426. if (!pend || pend == path)
  3427. break;
  3428. *pend = 0;
  3429. w->wd = inotify_add_watch (fs_fd, path, mask);
  3430. }
  3431. while (w->wd < 0 && (errno == ENOENT || errno == EACCES));
  3432. }
  3433. }
  3434. if (w->wd >= 0)
  3435. wlist_add (&fs_hash [w->wd & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w);
  3436. /* now re-arm timer, if required */
  3437. if (ev_is_active (&w->timer)) ev_ref (EV_A);
  3438. ev_timer_again (EV_A_ &w->timer);
  3439. if (ev_is_active (&w->timer)) ev_unref (EV_A);
  3440. }
  3441. static void noinline
  3442. infy_del (EV_P_ ev_stat *w)
  3443. {
  3444. int slot;
  3445. int wd = w->wd;
  3446. if (wd < 0)
  3447. return;
  3448. w->wd = -2;
  3449. slot = wd & ((EV_INOTIFY_HASHSIZE) - 1);
  3450. wlist_del (&fs_hash [slot].head, (WL)w);
  3451. /* remove this watcher, if others are watching it, they will rearm */
  3452. inotify_rm_watch (fs_fd, wd);
  3453. }
  3454. static void noinline
  3455. infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
  3456. {
  3457. if (slot < 0)
  3458. /* overflow, need to check for all hash slots */
  3459. for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot)
  3460. infy_wd (EV_A_ slot, wd, ev);
  3461. else
  3462. {
  3463. WL w_;
  3464. for (w_ = fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head; w_; )
  3465. {
  3466. ev_stat *w = (ev_stat *)w_;
  3467. w_ = w_->next; /* lets us remove this watcher and all before it */
  3468. if (w->wd == wd || wd == -1)
  3469. {
  3470. if (ev->mask & (IN_IGNORED | IN_UNMOUNT | IN_DELETE_SELF))
  3471. {
  3472. wlist_del (&fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w);
  3473. w->wd = -1;
  3474. infy_add (EV_A_ w); /* re-add, no matter what */
  3475. }
  3476. stat_timer_cb (EV_A_ &w->timer, 0);
  3477. }
  3478. }
  3479. }
  3480. }
  3481. static void
  3482. infy_cb (EV_P_ ev_io *w, int revents)
  3483. {
  3484. char buf [EV_INOTIFY_BUFSIZE];
  3485. int ofs;
  3486. int len = read (fs_fd, buf, sizeof (buf));
  3487. for (ofs = 0; ofs < len; )
  3488. {
  3489. struct inotify_event *ev = (struct inotify_event *)(buf + ofs);
  3490. infy_wd (EV_A_ ev->wd, ev->wd, ev);
  3491. ofs += sizeof (struct inotify_event) + ev->len;
  3492. }
  3493. }
  3494. inline_size void ecb_cold
  3495. ev_check_2625 (EV_P)
  3496. {
  3497. /* kernels < 2.6.25 are borked
  3498. * http://www.ussg.indiana.edu/hypermail/linux/kernel/0711.3/1208.html
  3499. */
  3500. if (ev_linux_version () < 0x020619)
  3501. return;
  3502. fs_2625 = 1;
  3503. }
  3504. inline_size int
  3505. infy_newfd (void)
  3506. {
  3507. #if defined IN_CLOEXEC && defined IN_NONBLOCK
  3508. int fd = inotify_init1 (IN_CLOEXEC | IN_NONBLOCK);
  3509. if (fd >= 0)
  3510. return fd;
  3511. #endif
  3512. return inotify_init ();
  3513. }
  3514. inline_size void
  3515. infy_init (EV_P)
  3516. {
  3517. if (fs_fd != -2)
  3518. return;
  3519. fs_fd = -1;
  3520. ev_check_2625 (EV_A);
  3521. fs_fd = infy_newfd ();
  3522. if (fs_fd >= 0)
  3523. {
  3524. fd_intern (fs_fd);
  3525. ev_io_init (&fs_w, infy_cb, fs_fd, EV_READ);
  3526. ev_set_priority (&fs_w, EV_MAXPRI);
  3527. ev_io_start (EV_A_ &fs_w);
  3528. ev_unref (EV_A);
  3529. }
  3530. }
  3531. inline_size void
  3532. infy_fork (EV_P)
  3533. {
  3534. int slot;
  3535. if (fs_fd < 0)
  3536. return;
  3537. ev_ref (EV_A);
  3538. ev_io_stop (EV_A_ &fs_w);
  3539. close (fs_fd);
  3540. fs_fd = infy_newfd ();
  3541. if (fs_fd >= 0)
  3542. {
  3543. fd_intern (fs_fd);
  3544. ev_io_set (&fs_w, fs_fd, EV_READ);
  3545. ev_io_start (EV_A_ &fs_w);
  3546. ev_unref (EV_A);
  3547. }
  3548. for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot)
  3549. {
  3550. WL w_ = fs_hash [slot].head;
  3551. fs_hash [slot].head = 0;
  3552. while (w_)
  3553. {
  3554. ev_stat *w = (ev_stat *)w_;
  3555. w_ = w_->next; /* lets us add this watcher */
  3556. w->wd = -1;
  3557. if (fs_fd >= 0)
  3558. infy_add (EV_A_ w); /* re-add, no matter what */
  3559. else
  3560. {
  3561. w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
  3562. if (ev_is_active (&w->timer)) ev_ref (EV_A);
  3563. ev_timer_again (EV_A_ &w->timer);
  3564. if (ev_is_active (&w->timer)) ev_unref (EV_A);
  3565. }
  3566. }
  3567. }
  3568. }
  3569. #endif
  3570. #ifdef _WIN32
  3571. # define EV_LSTAT(p,b) _stati64 (p, b)
  3572. #else
  3573. # define EV_LSTAT(p,b) lstat (p, b)
  3574. #endif
  3575. void
  3576. ev_stat_stat (EV_P_ ev_stat *w) EV_THROW
  3577. {
  3578. if (lstat (w->path, &w->attr) < 0)
  3579. w->attr.st_nlink = 0;
  3580. else if (!w->attr.st_nlink)
  3581. w->attr.st_nlink = 1;
  3582. }
  3583. static void noinline
  3584. stat_timer_cb (EV_P_ ev_timer *w_, int revents)
  3585. {
  3586. ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer));
  3587. ev_statdata prev = w->attr;
  3588. ev_stat_stat (EV_A_ w);
  3589. /* memcmp doesn't work on netbsd, they.... do stuff to their struct stat */
  3590. if (
  3591. prev.st_dev != w->attr.st_dev
  3592. || prev.st_ino != w->attr.st_ino
  3593. || prev.st_mode != w->attr.st_mode
  3594. || prev.st_nlink != w->attr.st_nlink
  3595. || prev.st_uid != w->attr.st_uid
  3596. || prev.st_gid != w->attr.st_gid
  3597. || prev.st_rdev != w->attr.st_rdev
  3598. || prev.st_size != w->attr.st_size
  3599. || prev.st_atime != w->attr.st_atime
  3600. || prev.st_mtime != w->attr.st_mtime
  3601. || prev.st_ctime != w->attr.st_ctime
  3602. ) {
  3603. /* we only update w->prev on actual differences */
  3604. /* in case we test more often than invoke the callback, */
  3605. /* to ensure that prev is always different to attr */
  3606. w->prev = prev;
  3607. #if EV_USE_INOTIFY
  3608. if (fs_fd >= 0)
  3609. {
  3610. infy_del (EV_A_ w);
  3611. infy_add (EV_A_ w);
  3612. ev_stat_stat (EV_A_ w); /* avoid race... */
  3613. }
  3614. #endif
  3615. ev_feed_event (EV_A_ w, EV_STAT);
  3616. }
  3617. }
  3618. void
  3619. ev_stat_start (EV_P_ ev_stat *w) EV_THROW
  3620. {
  3621. if (expect_false (ev_is_active (w)))
  3622. return;
  3623. ev_stat_stat (EV_A_ w);
  3624. if (w->interval < MIN_STAT_INTERVAL && w->interval)
  3625. w->interval = MIN_STAT_INTERVAL;
  3626. ev_timer_init (&w->timer, stat_timer_cb, 0., w->interval ? w->interval : DEF_STAT_INTERVAL);
  3627. ev_set_priority (&w->timer, ev_priority (w));
  3628. #if EV_USE_INOTIFY
  3629. infy_init (EV_A);
  3630. if (fs_fd >= 0)
  3631. infy_add (EV_A_ w);
  3632. else
  3633. #endif
  3634. {
  3635. ev_timer_again (EV_A_ &w->timer);
  3636. ev_unref (EV_A);
  3637. }
  3638. ev_start (EV_A_ (W)w, 1);
  3639. EV_FREQUENT_CHECK;
  3640. }
  3641. void
  3642. ev_stat_stop (EV_P_ ev_stat *w) EV_THROW
  3643. {
  3644. clear_pending (EV_A_ (W)w);
  3645. if (expect_false (!ev_is_active (w)))
  3646. return;
  3647. EV_FREQUENT_CHECK;
  3648. #if EV_USE_INOTIFY
  3649. infy_del (EV_A_ w);
  3650. #endif
  3651. if (ev_is_active (&w->timer))
  3652. {
  3653. ev_ref (EV_A);
  3654. ev_timer_stop (EV_A_ &w->timer);
  3655. }
  3656. ev_stop (EV_A_ (W)w);
  3657. EV_FREQUENT_CHECK;
  3658. }
  3659. #endif
  3660. #if EV_IDLE_ENABLE
  3661. void
  3662. ev_idle_start (EV_P_ ev_idle *w) EV_THROW
  3663. {
  3664. if (expect_false (ev_is_active (w)))
  3665. return;
  3666. pri_adjust (EV_A_ (W)w);
  3667. EV_FREQUENT_CHECK;
  3668. {
  3669. int active = ++idlecnt [ABSPRI (w)];
  3670. ++idleall;
  3671. ev_start (EV_A_ (W)w, active);
  3672. array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, EMPTY2);
  3673. idles [ABSPRI (w)][active - 1] = w;
  3674. }
  3675. EV_FREQUENT_CHECK;
  3676. }
  3677. void
  3678. ev_idle_stop (EV_P_ ev_idle *w) EV_THROW
  3679. {
  3680. clear_pending (EV_A_ (W)w);
  3681. if (expect_false (!ev_is_active (w)))
  3682. return;
  3683. EV_FREQUENT_CHECK;
  3684. {
  3685. int active = ev_active (w);
  3686. idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]];
  3687. ev_active (idles [ABSPRI (w)][active - 1]) = active;
  3688. ev_stop (EV_A_ (W)w);
  3689. --idleall;
  3690. }
  3691. EV_FREQUENT_CHECK;
  3692. }
  3693. #endif
  3694. #if EV_PREPARE_ENABLE
  3695. void
  3696. ev_prepare_start (EV_P_ ev_prepare *w) EV_THROW
  3697. {
  3698. if (expect_false (ev_is_active (w)))
  3699. return;
  3700. EV_FREQUENT_CHECK;
  3701. ev_start (EV_A_ (W)w, ++preparecnt);
  3702. array_needsize (ev_prepare *, prepares, preparemax, preparecnt, EMPTY2);
  3703. prepares [preparecnt - 1] = w;
  3704. EV_FREQUENT_CHECK;
  3705. }
  3706. void
  3707. ev_prepare_stop (EV_P_ ev_prepare *w) EV_THROW
  3708. {
  3709. clear_pending (EV_A_ (W)w);
  3710. if (expect_false (!ev_is_active (w)))
  3711. return;
  3712. EV_FREQUENT_CHECK;
  3713. {
  3714. int active = ev_active (w);
  3715. prepares [active - 1] = prepares [--preparecnt];
  3716. ev_active (prepares [active - 1]) = active;
  3717. }
  3718. ev_stop (EV_A_ (W)w);
  3719. EV_FREQUENT_CHECK;
  3720. }
  3721. #endif
  3722. #if EV_CHECK_ENABLE
  3723. void
  3724. ev_check_start (EV_P_ ev_check *w) EV_THROW
  3725. {
  3726. if (expect_false (ev_is_active (w)))
  3727. return;
  3728. EV_FREQUENT_CHECK;
  3729. ev_start (EV_A_ (W)w, ++checkcnt);
  3730. array_needsize (ev_check *, checks, checkmax, checkcnt, EMPTY2);
  3731. checks [checkcnt - 1] = w;
  3732. EV_FREQUENT_CHECK;
  3733. }
  3734. void
  3735. ev_check_stop (EV_P_ ev_check *w) EV_THROW
  3736. {
  3737. clear_pending (EV_A_ (W)w);
  3738. if (expect_false (!ev_is_active (w)))
  3739. return;
  3740. EV_FREQUENT_CHECK;
  3741. {
  3742. int active = ev_active (w);
  3743. checks [active - 1] = checks [--checkcnt];
  3744. ev_active (checks [active - 1]) = active;
  3745. }
  3746. ev_stop (EV_A_ (W)w);
  3747. EV_FREQUENT_CHECK;
  3748. }
  3749. #endif
  3750. #if EV_EMBED_ENABLE
  3751. void noinline
  3752. ev_embed_sweep (EV_P_ ev_embed *w) EV_THROW
  3753. {
  3754. ev_run (w->other, EVRUN_NOWAIT);
  3755. }
  3756. static void
  3757. embed_io_cb (EV_P_ ev_io *io, int revents)
  3758. {
  3759. ev_embed *w = (ev_embed *)(((char *)io) - offsetof (ev_embed, io));
  3760. if (ev_cb (w))
  3761. ev_feed_event (EV_A_ (W)w, EV_EMBED);
  3762. else
  3763. ev_run (w->other, EVRUN_NOWAIT);
  3764. }
  3765. static void
  3766. embed_prepare_cb (EV_P_ ev_prepare *prepare, int revents)
  3767. {
  3768. ev_embed *w = (ev_embed *)(((char *)prepare) - offsetof (ev_embed, prepare));
  3769. {
  3770. EV_P = w->other;
  3771. while (fdchangecnt)
  3772. {
  3773. fd_reify (EV_A);
  3774. ev_run (EV_A_ EVRUN_NOWAIT);
  3775. }
  3776. }
  3777. }
  3778. static void
  3779. embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
  3780. {
  3781. ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork));
  3782. ev_embed_stop (EV_A_ w);
  3783. {
  3784. EV_P = w->other;
  3785. ev_loop_fork (EV_A);
  3786. ev_run (EV_A_ EVRUN_NOWAIT);
  3787. }
  3788. ev_embed_start (EV_A_ w);
  3789. }
  3790. #if 0
  3791. static void
  3792. embed_idle_cb (EV_P_ ev_idle *idle, int revents)
  3793. {
  3794. ev_idle_stop (EV_A_ idle);
  3795. }
  3796. #endif
  3797. void
  3798. ev_embed_start (EV_P_ ev_embed *w) EV_THROW
  3799. {
  3800. if (expect_false (ev_is_active (w)))
  3801. return;
  3802. {
  3803. EV_P = w->other;
  3804. assert (("libev: loop to be embedded is not embeddable", backend & ev_embeddable_backends ()));
  3805. ev_io_init (&w->io, embed_io_cb, backend_fd, EV_READ);
  3806. }
  3807. EV_FREQUENT_CHECK;
  3808. ev_set_priority (&w->io, ev_priority (w));
  3809. ev_io_start (EV_A_ &w->io);
  3810. ev_prepare_init (&w->prepare, embed_prepare_cb);
  3811. ev_set_priority (&w->prepare, EV_MINPRI);
  3812. ev_prepare_start (EV_A_ &w->prepare);
  3813. ev_fork_init (&w->fork, embed_fork_cb);
  3814. ev_fork_start (EV_A_ &w->fork);
  3815. /*ev_idle_init (&w->idle, e,bed_idle_cb);*/
  3816. ev_start (EV_A_ (W)w, 1);
  3817. EV_FREQUENT_CHECK;
  3818. }
  3819. void
  3820. ev_embed_stop (EV_P_ ev_embed *w) EV_THROW
  3821. {
  3822. clear_pending (EV_A_ (W)w);
  3823. if (expect_false (!ev_is_active (w)))
  3824. return;
  3825. EV_FREQUENT_CHECK;
  3826. ev_io_stop (EV_A_ &w->io);
  3827. ev_prepare_stop (EV_A_ &w->prepare);
  3828. ev_fork_stop (EV_A_ &w->fork);
  3829. ev_stop (EV_A_ (W)w);
  3830. EV_FREQUENT_CHECK;
  3831. }
  3832. #endif
  3833. #if EV_FORK_ENABLE
  3834. void
  3835. ev_fork_start (EV_P_ ev_fork *w) EV_THROW
  3836. {
  3837. if (expect_false (ev_is_active (w)))
  3838. return;
  3839. EV_FREQUENT_CHECK;
  3840. ev_start (EV_A_ (W)w, ++forkcnt);
  3841. array_needsize (ev_fork *, forks, forkmax, forkcnt, EMPTY2);
  3842. forks [forkcnt - 1] = w;
  3843. EV_FREQUENT_CHECK;
  3844. }
  3845. void
  3846. ev_fork_stop (EV_P_ ev_fork *w) EV_THROW
  3847. {
  3848. clear_pending (EV_A_ (W)w);
  3849. if (expect_false (!ev_is_active (w)))
  3850. return;
  3851. EV_FREQUENT_CHECK;
  3852. {
  3853. int active = ev_active (w);
  3854. forks [active - 1] = forks [--forkcnt];
  3855. ev_active (forks [active - 1]) = active;
  3856. }
  3857. ev_stop (EV_A_ (W)w);
  3858. EV_FREQUENT_CHECK;
  3859. }
  3860. #endif
  3861. #if EV_CLEANUP_ENABLE
  3862. void
  3863. ev_cleanup_start (EV_P_ ev_cleanup *w) EV_THROW
  3864. {
  3865. if (expect_false (ev_is_active (w)))
  3866. return;
  3867. EV_FREQUENT_CHECK;
  3868. ev_start (EV_A_ (W)w, ++cleanupcnt);
  3869. array_needsize (ev_cleanup *, cleanups, cleanupmax, cleanupcnt, EMPTY2);
  3870. cleanups [cleanupcnt - 1] = w;
  3871. /* cleanup watchers should never keep a refcount on the loop */
  3872. ev_unref (EV_A);
  3873. EV_FREQUENT_CHECK;
  3874. }
  3875. void
  3876. ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_THROW
  3877. {
  3878. clear_pending (EV_A_ (W)w);
  3879. if (expect_false (!ev_is_active (w)))
  3880. return;
  3881. EV_FREQUENT_CHECK;
  3882. ev_ref (EV_A);
  3883. {
  3884. int active = ev_active (w);
  3885. cleanups [active - 1] = cleanups [--cleanupcnt];
  3886. ev_active (cleanups [active - 1]) = active;
  3887. }
  3888. ev_stop (EV_A_ (W)w);
  3889. EV_FREQUENT_CHECK;
  3890. }
  3891. #endif
  3892. #if EV_ASYNC_ENABLE
  3893. void
  3894. ev_async_start (EV_P_ ev_async *w) EV_THROW
  3895. {
  3896. if (expect_false (ev_is_active (w)))
  3897. return;
  3898. w->sent = 0;
  3899. evpipe_init (EV_A);
  3900. EV_FREQUENT_CHECK;
  3901. ev_start (EV_A_ (W)w, ++asynccnt);
  3902. array_needsize (ev_async *, asyncs, asyncmax, asynccnt, EMPTY2);
  3903. asyncs [asynccnt - 1] = w;
  3904. EV_FREQUENT_CHECK;
  3905. }
  3906. void
  3907. ev_async_stop (EV_P_ ev_async *w) EV_THROW
  3908. {
  3909. clear_pending (EV_A_ (W)w);
  3910. if (expect_false (!ev_is_active (w)))
  3911. return;
  3912. EV_FREQUENT_CHECK;
  3913. {
  3914. int active = ev_active (w);
  3915. asyncs [active - 1] = asyncs [--asynccnt];
  3916. ev_active (asyncs [active - 1]) = active;
  3917. }
  3918. ev_stop (EV_A_ (W)w);
  3919. EV_FREQUENT_CHECK;
  3920. }
  3921. void
  3922. ev_async_send (EV_P_ ev_async *w) EV_THROW
  3923. {
  3924. w->sent = 1;
  3925. evpipe_write (EV_A_ &async_pending);
  3926. }
  3927. #endif
  3928. /*****************************************************************************/
  3929. struct ev_once
  3930. {
  3931. ev_io io;
  3932. ev_timer to;
  3933. void (*cb)(int revents, void *arg);
  3934. void *arg;
  3935. };
  3936. static void
  3937. once_cb (EV_P_ struct ev_once *once, int revents)
  3938. {
  3939. void (*cb)(int revents, void *arg) = once->cb;
  3940. void *arg = once->arg;
  3941. ev_io_stop (EV_A_ &once->io);
  3942. ev_timer_stop (EV_A_ &once->to);
  3943. ev_free (once);
  3944. cb (revents, arg);
  3945. }
  3946. static void
  3947. once_cb_io (EV_P_ ev_io *w, int revents)
  3948. {
  3949. struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io));
  3950. once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->to));
  3951. }
  3952. static void
  3953. once_cb_to (EV_P_ ev_timer *w, int revents)
  3954. {
  3955. struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to));
  3956. once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->io));
  3957. }
  3958. void
  3959. ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) EV_THROW
  3960. {
  3961. struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once));
  3962. if (expect_false (!once))
  3963. {
  3964. cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMER, arg);
  3965. return;
  3966. }
  3967. once->cb = cb;
  3968. once->arg = arg;
  3969. ev_init (&once->io, once_cb_io);
  3970. if (fd >= 0)
  3971. {
  3972. ev_io_set (&once->io, fd, events);
  3973. ev_io_start (EV_A_ &once->io);
  3974. }
  3975. ev_init (&once->to, once_cb_to);
  3976. if (timeout >= 0.)
  3977. {
  3978. ev_timer_set (&once->to, timeout, 0.);
  3979. ev_timer_start (EV_A_ &once->to);
  3980. }
  3981. }
  3982. /*****************************************************************************/
  3983. #if EV_WALK_ENABLE
  3984. void ecb_cold
  3985. ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_THROW
  3986. {
  3987. int i, j;
  3988. ev_watcher_list *wl, *wn;
  3989. if (types & (EV_IO | EV_EMBED))
  3990. for (i = 0; i < anfdmax; ++i)
  3991. for (wl = anfds [i].head; wl; )
  3992. {
  3993. wn = wl->next;
  3994. #if EV_EMBED_ENABLE
  3995. if (ev_cb ((ev_io *)wl) == embed_io_cb)
  3996. {
  3997. if (types & EV_EMBED)
  3998. cb (EV_A_ EV_EMBED, ((char *)wl) - offsetof (struct ev_embed, io));
  3999. }
  4000. else
  4001. #endif
  4002. #if EV_USE_INOTIFY
  4003. if (ev_cb ((ev_io *)wl) == infy_cb)
  4004. ;
  4005. else
  4006. #endif
  4007. if ((ev_io *)wl != &pipe_w)
  4008. if (types & EV_IO)
  4009. cb (EV_A_ EV_IO, wl);
  4010. wl = wn;
  4011. }
  4012. if (types & (EV_TIMER | EV_STAT))
  4013. for (i = timercnt + HEAP0; i-- > HEAP0; )
  4014. #if EV_STAT_ENABLE
  4015. /*TODO: timer is not always active*/
  4016. if (ev_cb ((ev_timer *)ANHE_w (timers [i])) == stat_timer_cb)
  4017. {
  4018. if (types & EV_STAT)
  4019. cb (EV_A_ EV_STAT, ((char *)ANHE_w (timers [i])) - offsetof (struct ev_stat, timer));
  4020. }
  4021. else
  4022. #endif
  4023. if (types & EV_TIMER)
  4024. cb (EV_A_ EV_TIMER, ANHE_w (timers [i]));
  4025. #if EV_PERIODIC_ENABLE
  4026. if (types & EV_PERIODIC)
  4027. for (i = periodiccnt + HEAP0; i-- > HEAP0; )
  4028. cb (EV_A_ EV_PERIODIC, ANHE_w (periodics [i]));
  4029. #endif
  4030. #if EV_IDLE_ENABLE
  4031. if (types & EV_IDLE)
  4032. for (j = NUMPRI; j--; )
  4033. for (i = idlecnt [j]; i--; )
  4034. cb (EV_A_ EV_IDLE, idles [j][i]);
  4035. #endif
  4036. #if EV_FORK_ENABLE
  4037. if (types & EV_FORK)
  4038. for (i = forkcnt; i--; )
  4039. if (ev_cb (forks [i]) != embed_fork_cb)
  4040. cb (EV_A_ EV_FORK, forks [i]);
  4041. #endif
  4042. #if EV_ASYNC_ENABLE
  4043. if (types & EV_ASYNC)
  4044. for (i = asynccnt; i--; )
  4045. cb (EV_A_ EV_ASYNC, asyncs [i]);
  4046. #endif
  4047. #if EV_PREPARE_ENABLE
  4048. if (types & EV_PREPARE)
  4049. for (i = preparecnt; i--; )
  4050. # if EV_EMBED_ENABLE
  4051. if (ev_cb (prepares [i]) != embed_prepare_cb)
  4052. # endif
  4053. cb (EV_A_ EV_PREPARE, prepares [i]);
  4054. #endif
  4055. #if EV_CHECK_ENABLE
  4056. if (types & EV_CHECK)
  4057. for (i = checkcnt; i--; )
  4058. cb (EV_A_ EV_CHECK, checks [i]);
  4059. #endif
  4060. #if EV_SIGNAL_ENABLE
  4061. if (types & EV_SIGNAL)
  4062. for (i = 0; i < EV_NSIG - 1; ++i)
  4063. for (wl = signals [i].head; wl; )
  4064. {
  4065. wn = wl->next;
  4066. cb (EV_A_ EV_SIGNAL, wl);
  4067. wl = wn;
  4068. }
  4069. #endif
  4070. #if EV_CHILD_ENABLE
  4071. if (types & EV_CHILD)
  4072. for (i = (EV_PID_HASHSIZE); i--; )
  4073. for (wl = childs [i]; wl; )
  4074. {
  4075. wn = wl->next;
  4076. cb (EV_A_ EV_CHILD, wl);
  4077. wl = wn;
  4078. }
  4079. #endif
  4080. /* EV_STAT 0x00001000 /* stat data changed */
  4081. /* EV_EMBED 0x00010000 /* embedded event loop needs sweep */
  4082. }
  4083. #endif
  4084. #if EV_MULTIPLICITY
  4085. #include "ev_wrap.h"
  4086. #endif