You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

5097 lines
126 KiB

12 years ago
10 years ago
12 years ago
10 years ago
12 years ago
10 years ago
12 years ago
10 years ago
12 years ago
10 years ago
12 years ago
9 years ago
12 years ago
10 years ago
12 years ago
12 years ago
12 years ago
9 years ago
12 years ago
9 years ago
10 years ago
12 years ago
9 years ago
12 years ago
10 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
12 years ago
12 years ago
12 years ago
10 years ago
12 years ago
10 years ago
12 years ago
10 years ago
12 years ago
10 years ago
12 years ago
9 years ago
12 years ago
10 years ago
12 years ago
10 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
10 years ago
9 years ago
10 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
10 years ago
9 years ago
10 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
12 years ago
12 years ago
9 years ago
12 years ago
12 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
12 years ago
12 years ago
9 years ago
12 years ago
12 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
12 years ago
12 years ago
12 years ago
9 years ago
9 years ago
12 years ago
9 years ago
12 years ago
12 years ago
9 years ago
12 years ago
10 years ago
9 years ago
10 years ago
12 years ago
10 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
9 years ago
12 years ago
12 years ago
10 years ago
12 years ago
12 years ago
10 years ago
12 years ago
12 years ago
  1. /*
  2. * libev event processing core, watcher management
  3. *
  4. * Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann <libev@schmorp.de>
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without modifica-
  8. * tion, are permitted provided that the following conditions are met:
  9. *
  10. * 1. Redistributions of source code must retain the above copyright notice,
  11. * this list of conditions and the following disclaimer.
  12. *
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. *
  17. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
  18. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
  19. * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
  20. * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
  21. * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  22. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  23. * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  24. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
  25. * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  26. * OF THE POSSIBILITY OF SUCH DAMAGE.
  27. *
  28. * Alternatively, the contents of this file may be used under the terms of
  29. * the GNU General Public License ("GPL") version 2 or any later version,
  30. * in which case the provisions of the GPL are applicable instead of
  31. * the above. If you wish to allow the use of your version of this file
  32. * only under the terms of the GPL and not to allow others to use your
  33. * version of this file under the BSD license, indicate your decision
  34. * by deleting the provisions above and replace them with the notice
  35. * and other provisions required by the GPL. If you do not delete the
  36. * provisions above, a recipient may use your version of this file under
  37. * either the BSD or the GPL.
  38. */
  39. /* this big block deduces configuration from config.h */
  40. #ifndef EV_STANDALONE
  41. # ifdef EV_CONFIG_H
  42. # include EV_CONFIG_H
  43. # else
  44. # include "config.h"
  45. # endif
  46. # if HAVE_FLOOR
  47. # ifndef EV_USE_FLOOR
  48. # define EV_USE_FLOOR 1
  49. # endif
  50. # endif
  51. # if HAVE_CLOCK_SYSCALL
  52. # ifndef EV_USE_CLOCK_SYSCALL
  53. # define EV_USE_CLOCK_SYSCALL 1
  54. # ifndef EV_USE_REALTIME
  55. # define EV_USE_REALTIME 0
  56. # endif
  57. # ifndef EV_USE_MONOTONIC
  58. # define EV_USE_MONOTONIC 1
  59. # endif
  60. # endif
  61. # elif !defined EV_USE_CLOCK_SYSCALL
  62. # define EV_USE_CLOCK_SYSCALL 0
  63. # endif
  64. # if HAVE_CLOCK_GETTIME
  65. # ifndef EV_USE_MONOTONIC
  66. # define EV_USE_MONOTONIC 1
  67. # endif
  68. # ifndef EV_USE_REALTIME
  69. # define EV_USE_REALTIME 0
  70. # endif
  71. # else
  72. # ifndef EV_USE_MONOTONIC
  73. # define EV_USE_MONOTONIC 0
  74. # endif
  75. # ifndef EV_USE_REALTIME
  76. # define EV_USE_REALTIME 0
  77. # endif
  78. # endif
  79. # if HAVE_NANOSLEEP
  80. # ifndef EV_USE_NANOSLEEP
  81. # define EV_USE_NANOSLEEP EV_FEATURE_OS
  82. # endif
  83. # else
  84. # undef EV_USE_NANOSLEEP
  85. # define EV_USE_NANOSLEEP 0
  86. # endif
  87. # if HAVE_SELECT && HAVE_SYS_SELECT_H
  88. # ifndef EV_USE_SELECT
  89. # define EV_USE_SELECT EV_FEATURE_BACKENDS
  90. # endif
  91. # else
  92. # undef EV_USE_SELECT
  93. # define EV_USE_SELECT 0
  94. # endif
  95. # if HAVE_POLL && HAVE_POLL_H
  96. # ifndef EV_USE_POLL
  97. # define EV_USE_POLL EV_FEATURE_BACKENDS
  98. # endif
  99. # else
  100. # undef EV_USE_POLL
  101. # define EV_USE_POLL 0
  102. # endif
  103. # if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
  104. # ifndef EV_USE_EPOLL
  105. # define EV_USE_EPOLL EV_FEATURE_BACKENDS
  106. # endif
  107. # else
  108. # undef EV_USE_EPOLL
  109. # define EV_USE_EPOLL 0
  110. # endif
  111. # if HAVE_KQUEUE && HAVE_SYS_EVENT_H
  112. # ifndef EV_USE_KQUEUE
  113. # define EV_USE_KQUEUE EV_FEATURE_BACKENDS
  114. # endif
  115. # else
  116. # undef EV_USE_KQUEUE
  117. # define EV_USE_KQUEUE 0
  118. # endif
  119. # if HAVE_PORT_H && HAVE_PORT_CREATE
  120. # ifndef EV_USE_PORT
  121. # define EV_USE_PORT EV_FEATURE_BACKENDS
  122. # endif
  123. # else
  124. # undef EV_USE_PORT
  125. # define EV_USE_PORT 0
  126. # endif
  127. # if HAVE_INOTIFY_INIT && HAVE_SYS_INOTIFY_H
  128. # ifndef EV_USE_INOTIFY
  129. # define EV_USE_INOTIFY EV_FEATURE_OS
  130. # endif
  131. # else
  132. # undef EV_USE_INOTIFY
  133. # define EV_USE_INOTIFY 0
  134. # endif
  135. # if HAVE_SIGNALFD && HAVE_SYS_SIGNALFD_H
  136. # ifndef EV_USE_SIGNALFD
  137. # define EV_USE_SIGNALFD EV_FEATURE_OS
  138. # endif
  139. # else
  140. # undef EV_USE_SIGNALFD
  141. # define EV_USE_SIGNALFD 0
  142. # endif
  143. # if HAVE_EVENTFD
  144. # ifndef EV_USE_EVENTFD
  145. # define EV_USE_EVENTFD EV_FEATURE_OS
  146. # endif
  147. # else
  148. # undef EV_USE_EVENTFD
  149. # define EV_USE_EVENTFD 0
  150. # endif
  151. #endif
  152. #include <stdlib.h>
  153. #include <string.h>
  154. #include <fcntl.h>
  155. #include <stddef.h>
  156. #include <stdio.h>
  157. #include <assert.h>
  158. #include <errno.h>
  159. #include <sys/types.h>
  160. #include <time.h>
  161. #include <limits.h>
  162. #include <signal.h>
  163. #ifdef EV_H
  164. # include EV_H
  165. #else
  166. # include "ev.h"
  167. #endif
  168. #if EV_NO_THREADS
  169. # undef EV_NO_SMP
  170. # define EV_NO_SMP 1
  171. # undef ECB_NO_THREADS
  172. # define ECB_NO_THREADS 1
  173. #endif
  174. #if EV_NO_SMP
  175. # undef EV_NO_SMP
  176. # define ECB_NO_SMP 1
  177. #endif
  178. #ifndef _WIN32
  179. # include <sys/time.h>
  180. # include <sys/wait.h>
  181. # include <unistd.h>
  182. #else
  183. # include <io.h>
  184. # define WIN32_LEAN_AND_MEAN
  185. # include <winsock2.h>
  186. # include <windows.h>
  187. # ifndef EV_SELECT_IS_WINSOCKET
  188. # define EV_SELECT_IS_WINSOCKET 1
  189. # endif
  190. # undef EV_AVOID_STDIO
  191. #endif
  192. /* OS X, in its infinite idiocy, actually HARDCODES
  193. * a limit of 1024 into their select. Where people have brains,
  194. * OS X engineers apparently have a vacuum. Or maybe they were
  195. * ordered to have a vacuum, or they do anything for money.
  196. * This might help. Or not.
  197. */
  198. #define _DARWIN_UNLIMITED_SELECT 1
  199. /* this block tries to deduce configuration from header-defined symbols and defaults */
  200. /* try to deduce the maximum number of signals on this platform */
  201. #if defined EV_NSIG
  202. /* use what's provided */
  203. #elif defined NSIG
  204. # define EV_NSIG (NSIG)
  205. #elif defined _NSIG
  206. # define EV_NSIG (_NSIG)
  207. #elif defined SIGMAX
  208. # define EV_NSIG (SIGMAX+1)
  209. #elif defined SIG_MAX
  210. # define EV_NSIG (SIG_MAX+1)
  211. #elif defined _SIG_MAX
  212. # define EV_NSIG (_SIG_MAX+1)
  213. #elif defined MAXSIG
  214. # define EV_NSIG (MAXSIG+1)
  215. #elif defined MAX_SIG
  216. # define EV_NSIG (MAX_SIG+1)
  217. #elif defined SIGARRAYSIZE
  218. # define EV_NSIG (SIGARRAYSIZE) /* Assume ary[SIGARRAYSIZE] */
  219. #elif defined _sys_nsig
  220. # define EV_NSIG (_sys_nsig) /* Solaris 2.5 */
  221. #else
  222. # define EV_NSIG (8 * sizeof (sigset_t) + 1)
  223. #endif
  224. #ifndef EV_USE_FLOOR
  225. # define EV_USE_FLOOR 0
  226. #endif
  227. #ifndef EV_USE_CLOCK_SYSCALL
  228. # if __linux && __GLIBC__ == 2 && __GLIBC_MINOR__ < 17
  229. # define EV_USE_CLOCK_SYSCALL EV_FEATURE_OS
  230. # else
  231. # define EV_USE_CLOCK_SYSCALL 0
  232. # endif
  233. #endif
  234. #if !(_POSIX_TIMERS > 0)
  235. # ifndef EV_USE_MONOTONIC
  236. # define EV_USE_MONOTONIC 0
  237. # endif
  238. # ifndef EV_USE_REALTIME
  239. # define EV_USE_REALTIME 0
  240. # endif
  241. #endif
  242. #ifndef EV_USE_MONOTONIC
  243. # if defined _POSIX_MONOTONIC_CLOCK && _POSIX_MONOTONIC_CLOCK >= 0
  244. # define EV_USE_MONOTONIC EV_FEATURE_OS
  245. # else
  246. # define EV_USE_MONOTONIC 0
  247. # endif
  248. #endif
  249. #ifndef EV_USE_REALTIME
  250. # define EV_USE_REALTIME !EV_USE_CLOCK_SYSCALL
  251. #endif
  252. #ifndef EV_USE_NANOSLEEP
  253. # if _POSIX_C_SOURCE >= 199309L
  254. # define EV_USE_NANOSLEEP EV_FEATURE_OS
  255. # else
  256. # define EV_USE_NANOSLEEP 0
  257. # endif
  258. #endif
  259. #ifndef EV_USE_SELECT
  260. # define EV_USE_SELECT EV_FEATURE_BACKENDS
  261. #endif
  262. #ifndef EV_USE_POLL
  263. # ifdef _WIN32
  264. # define EV_USE_POLL 0
  265. # else
  266. # define EV_USE_POLL EV_FEATURE_BACKENDS
  267. # endif
  268. #endif
  269. #ifndef EV_USE_EPOLL
  270. # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
  271. # define EV_USE_EPOLL EV_FEATURE_BACKENDS
  272. # else
  273. # define EV_USE_EPOLL 0
  274. # endif
  275. #endif
  276. #ifndef EV_USE_KQUEUE
  277. # define EV_USE_KQUEUE 0
  278. #endif
  279. #ifndef EV_USE_PORT
  280. # define EV_USE_PORT 0
  281. #endif
  282. #ifndef EV_USE_INOTIFY
  283. # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
  284. # define EV_USE_INOTIFY EV_FEATURE_OS
  285. # else
  286. # define EV_USE_INOTIFY 0
  287. # endif
  288. #endif
  289. #ifndef EV_PID_HASHSIZE
  290. # define EV_PID_HASHSIZE EV_FEATURE_DATA ? 16 : 1
  291. #endif
  292. #ifndef EV_INOTIFY_HASHSIZE
  293. # define EV_INOTIFY_HASHSIZE EV_FEATURE_DATA ? 16 : 1
  294. #endif
  295. #ifndef EV_USE_EVENTFD
  296. # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
  297. # define EV_USE_EVENTFD EV_FEATURE_OS
  298. # else
  299. # define EV_USE_EVENTFD 0
  300. # endif
  301. #endif
  302. #ifndef EV_USE_SIGNALFD
  303. # if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
  304. # define EV_USE_SIGNALFD EV_FEATURE_OS
  305. # else
  306. # define EV_USE_SIGNALFD 0
  307. # endif
  308. #endif
  309. #if 0 /* debugging */
  310. # define EV_VERIFY 3
  311. # define EV_USE_4HEAP 1
  312. # define EV_HEAP_CACHE_AT 1
  313. #endif
  314. #ifndef EV_VERIFY
  315. # define EV_VERIFY (EV_FEATURE_API ? 1 : 0)
  316. #endif
  317. #ifndef EV_USE_4HEAP
  318. # define EV_USE_4HEAP EV_FEATURE_DATA
  319. #endif
  320. #ifndef EV_HEAP_CACHE_AT
  321. # define EV_HEAP_CACHE_AT EV_FEATURE_DATA
  322. #endif
  323. #ifdef ANDROID
  324. /* supposedly, android doesn't typedef fd_mask */
  325. # undef EV_USE_SELECT
  326. # define EV_USE_SELECT 0
  327. /* supposedly, we need to include syscall.h, not sys/syscall.h, so just disable */
  328. # undef EV_USE_CLOCK_SYSCALL
  329. # define EV_USE_CLOCK_SYSCALL 0
  330. #endif
  331. /* aix's poll.h seems to cause lots of trouble */
  332. #ifdef _AIX
  333. /* AIX has a completely broken poll.h header */
  334. # undef EV_USE_POLL
  335. # define EV_USE_POLL 0
  336. #endif
  337. /* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */
  338. /* which makes programs even slower. might work on other unices, too. */
  339. #if EV_USE_CLOCK_SYSCALL
  340. # include <sys/syscall.h>
  341. # ifdef SYS_clock_gettime
  342. # define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
  343. # undef EV_USE_MONOTONIC
  344. # define EV_USE_MONOTONIC 1
  345. # else
  346. # undef EV_USE_CLOCK_SYSCALL
  347. # define EV_USE_CLOCK_SYSCALL 0
  348. # endif
  349. #endif
  350. /* this block fixes any misconfiguration where we know we run into trouble otherwise */
  351. #ifndef CLOCK_MONOTONIC
  352. # undef EV_USE_MONOTONIC
  353. # define EV_USE_MONOTONIC 0
  354. #endif
  355. #ifndef CLOCK_REALTIME
  356. # undef EV_USE_REALTIME
  357. # define EV_USE_REALTIME 0
  358. #endif
  359. #if !EV_STAT_ENABLE
  360. # undef EV_USE_INOTIFY
  361. # define EV_USE_INOTIFY 0
  362. #endif
  363. #if !EV_USE_NANOSLEEP
  364. /* hp-ux has it in sys/time.h, which we unconditionally include above */
  365. # if !defined _WIN32 && !defined __hpux
  366. # include <sys/select.h>
  367. # endif
  368. #endif
  369. #if EV_USE_INOTIFY
  370. # include <sys/statfs.h>
  371. # include <sys/inotify.h>
  372. /* some very old inotify.h headers don't have IN_DONT_FOLLOW */
  373. # ifndef IN_DONT_FOLLOW
  374. # undef EV_USE_INOTIFY
  375. # define EV_USE_INOTIFY 0
  376. # endif
  377. #endif
  378. #if EV_USE_EVENTFD
  379. /* our minimum requirement is glibc 2.7 which has the stub, but not the header */
  380. # include <stdint.h>
  381. # ifndef EFD_NONBLOCK
  382. # define EFD_NONBLOCK O_NONBLOCK
  383. # endif
  384. # ifndef EFD_CLOEXEC
  385. # ifdef O_CLOEXEC
  386. # define EFD_CLOEXEC O_CLOEXEC
  387. # else
  388. # define EFD_CLOEXEC 02000000
  389. # endif
  390. # endif
  391. EV_CPP(extern "C") int (eventfd) (unsigned int initval, int flags);
  392. #endif
  393. #if EV_USE_SIGNALFD
  394. /* our minimum requirement is glibc 2.7 which has the stub, but not the header */
  395. # include <stdint.h>
  396. # ifndef SFD_NONBLOCK
  397. # define SFD_NONBLOCK O_NONBLOCK
  398. # endif
  399. # ifndef SFD_CLOEXEC
  400. # ifdef O_CLOEXEC
  401. # define SFD_CLOEXEC O_CLOEXEC
  402. # else
  403. # define SFD_CLOEXEC 02000000
  404. # endif
  405. # endif
  406. EV_CPP (extern "C") int signalfd (int fd, const sigset_t *mask, int flags);
  407. struct signalfd_siginfo
  408. {
  409. uint32_t ssi_signo;
  410. char pad[128 - sizeof (uint32_t)];
  411. };
  412. #endif
  413. /**/
  414. #if EV_VERIFY >= 3
  415. # define EV_FREQUENT_CHECK ev_verify (EV_A)
  416. #else
  417. # define EV_FREQUENT_CHECK do { } while (0)
  418. #endif
  419. /*
  420. * This is used to work around floating point rounding problems.
  421. * This value is good at least till the year 4000.
  422. */
  423. #define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
  424. /*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
  425. #define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
  426. #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
  427. #define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
  428. #define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
  429. /* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
  430. /* ECB.H BEGIN */
  431. /*
  432. * libecb - http://software.schmorp.de/pkg/libecb
  433. *
  434. * Copyright (©) 2009-2015 Marc Alexander Lehmann <libecb@schmorp.de>
  435. * Copyright (©) 2011 Emanuele Giaquinta
  436. * All rights reserved.
  437. *
  438. * Redistribution and use in source and binary forms, with or without modifica-
  439. * tion, are permitted provided that the following conditions are met:
  440. *
  441. * 1. Redistributions of source code must retain the above copyright notice,
  442. * this list of conditions and the following disclaimer.
  443. *
  444. * 2. Redistributions in binary form must reproduce the above copyright
  445. * notice, this list of conditions and the following disclaimer in the
  446. * documentation and/or other materials provided with the distribution.
  447. *
  448. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
  449. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
  450. * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
  451. * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
  452. * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  453. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  454. * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  455. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
  456. * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  457. * OF THE POSSIBILITY OF SUCH DAMAGE.
  458. *
  459. * Alternatively, the contents of this file may be used under the terms of
  460. * the GNU General Public License ("GPL") version 2 or any later version,
  461. * in which case the provisions of the GPL are applicable instead of
  462. * the above. If you wish to allow the use of your version of this file
  463. * only under the terms of the GPL and not to allow others to use your
  464. * version of this file under the BSD license, indicate your decision
  465. * by deleting the provisions above and replace them with the notice
  466. * and other provisions required by the GPL. If you do not delete the
  467. * provisions above, a recipient may use your version of this file under
  468. * either the BSD or the GPL.
  469. */
  470. #ifndef ECB_H
  471. #define ECB_H
  472. /* 16 bits major, 16 bits minor */
  473. #define ECB_VERSION 0x00010005
  474. #ifdef _WIN32
  475. typedef signed char int8_t;
  476. typedef unsigned char uint8_t;
  477. typedef signed short int16_t;
  478. typedef unsigned short uint16_t;
  479. typedef signed int int32_t;
  480. typedef unsigned int uint32_t;
  481. #if __GNUC__
  482. typedef signed long long int64_t;
  483. typedef unsigned long long uint64_t;
  484. #else /* _MSC_VER || __BORLANDC__ */
  485. typedef signed __int64 int64_t;
  486. typedef unsigned __int64 uint64_t;
  487. #endif
  488. #ifdef _WIN64
  489. #define ECB_PTRSIZE 8
  490. typedef uint64_t uintptr_t;
  491. typedef int64_t intptr_t;
  492. #else
  493. #define ECB_PTRSIZE 4
  494. typedef uint32_t uintptr_t;
  495. typedef int32_t intptr_t;
  496. #endif
  497. #else
  498. #include <inttypes.h>
  499. #if (defined INTPTR_MAX ? INTPTR_MAX : ULONG_MAX) > 0xffffffffU
  500. #define ECB_PTRSIZE 8
  501. #else
  502. #define ECB_PTRSIZE 4
  503. #endif
  504. #endif
  505. #define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__)
  506. #define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64)
  507. /* work around x32 idiocy by defining proper macros */
  508. #if ECB_GCC_AMD64 || ECB_MSVC_AMD64
  509. #if _ILP32
  510. #define ECB_AMD64_X32 1
  511. #else
  512. #define ECB_AMD64 1
  513. #endif
  514. #endif
  515. /* many compilers define _GNUC_ to some versions but then only implement
  516. * what their idiot authors think are the "more important" extensions,
  517. * causing enormous grief in return for some better fake benchmark numbers.
  518. * or so.
  519. * we try to detect these and simply assume they are not gcc - if they have
  520. * an issue with that they should have done it right in the first place.
  521. */
  522. #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__
  523. #define ECB_GCC_VERSION(major,minor) 0
  524. #else
  525. #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
  526. #endif
  527. #define ECB_CLANG_VERSION(major,minor) (__clang_major__ > (major) || (__clang_major__ == (major) && __clang_minor__ >= (minor)))
  528. #if __clang__ && defined __has_builtin
  529. #define ECB_CLANG_BUILTIN(x) __has_builtin (x)
  530. #else
  531. #define ECB_CLANG_BUILTIN(x) 0
  532. #endif
  533. #if __clang__ && defined __has_extension
  534. #define ECB_CLANG_EXTENSION(x) __has_extension (x)
  535. #else
  536. #define ECB_CLANG_EXTENSION(x) 0
  537. #endif
  538. #define ECB_CPP (__cplusplus+0)
  539. #define ECB_CPP11 (__cplusplus >= 201103L)
  540. #if ECB_CPP
  541. #define ECB_C 0
  542. #define ECB_STDC_VERSION 0
  543. #else
  544. #define ECB_C 1
  545. #define ECB_STDC_VERSION __STDC_VERSION__
  546. #endif
  547. #define ECB_C99 (ECB_STDC_VERSION >= 199901L)
  548. #define ECB_C11 (ECB_STDC_VERSION >= 201112L)
  549. #if ECB_CPP
  550. #define ECB_EXTERN_C extern "C"
  551. #define ECB_EXTERN_C_BEG ECB_EXTERN_C {
  552. #define ECB_EXTERN_C_END }
  553. #else
  554. #define ECB_EXTERN_C extern
  555. #define ECB_EXTERN_C_BEG
  556. #define ECB_EXTERN_C_END
  557. #endif
  558. /*****************************************************************************/
  559. /* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
  560. /* ECB_NO_SMP - ecb might be used in multiple threads, but only on a single cpu */
  561. #if ECB_NO_THREADS
  562. #define ECB_NO_SMP 1
  563. #endif
  564. #if ECB_NO_SMP
  565. #define ECB_MEMORY_FENCE do { } while (0)
  566. #endif
  567. /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */
  568. #if __xlC__ && ECB_CPP
  569. #include <builtins.h>
  570. #endif
  571. #if 1400 <= _MSC_VER
  572. #include <intrin.h> /* fence functions _ReadBarrier, also bit search functions _BitScanReverse */
  573. #endif
  574. #ifndef ECB_MEMORY_FENCE
  575. #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
  576. #if __i386 || __i386__
  577. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
  578. #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
  579. #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
  580. #elif ECB_GCC_AMD64
  581. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory")
  582. #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory")
  583. #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
  584. #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
  585. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("sync" : : : "memory")
  586. #elif defined __ARM_ARCH_2__ \
  587. || defined __ARM_ARCH_3__ || defined __ARM_ARCH_3M__ \
  588. || defined __ARM_ARCH_4__ || defined __ARM_ARCH_4T__ \
  589. || defined __ARM_ARCH_5__ || defined __ARM_ARCH_5E__ \
  590. || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__ \
  591. || defined __ARM_ARCH_5TEJ__
  592. /* should not need any, unless running old code on newer cpu - arm doesn't support that */
  593. #elif defined __ARM_ARCH_6__ || defined __ARM_ARCH_6J__ \
  594. || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__ \
  595. || defined __ARM_ARCH_6T2__
  596. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
  597. #elif defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ \
  598. || defined __ARM_ARCH_7R__ || defined __ARM_ARCH_7M__
  599. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory")
  600. #elif __aarch64__
  601. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory")
  602. #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8)
  603. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory")
  604. #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory")
  605. #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore")
  606. #elif defined __s390__ || defined __s390x__
  607. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("bcr 15,0" : : : "memory")
  608. #elif defined __mips__
  609. /* GNU/Linux emulates sync on mips1 architectures, so we force its use */
  610. /* anybody else who still uses mips1 is supposed to send in their version, with detection code. */
  611. #define ECB_MEMORY_FENCE __asm__ __volatile__ (".set mips2; sync; .set mips0" : : : "memory")
  612. #elif defined __alpha__
  613. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mb" : : : "memory")
  614. #elif defined __hppa__
  615. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
  616. #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("")
  617. #elif defined __ia64__
  618. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mf" : : : "memory")
  619. #elif defined __m68k__
  620. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
  621. #elif defined __m88k__
  622. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("tb1 0,%%r0,128" : : : "memory")
  623. #elif defined __sh__
  624. #define ECB_MEMORY_FENCE __asm__ __volatile__ ("" : : : "memory")
  625. #endif
  626. #endif
  627. #endif
  628. #ifndef ECB_MEMORY_FENCE
  629. #if ECB_GCC_VERSION(4,7)
  630. /* see comment below (stdatomic.h) about the C11 memory model. */
  631. #define ECB_MEMORY_FENCE __atomic_thread_fence (__ATOMIC_SEQ_CST)
  632. #define ECB_MEMORY_FENCE_ACQUIRE __atomic_thread_fence (__ATOMIC_ACQUIRE)
  633. #define ECB_MEMORY_FENCE_RELEASE __atomic_thread_fence (__ATOMIC_RELEASE)
  634. #elif ECB_CLANG_EXTENSION(c_atomic)
  635. /* see comment below (stdatomic.h) about the C11 memory model. */
  636. #define ECB_MEMORY_FENCE __c11_atomic_thread_fence (__ATOMIC_SEQ_CST)
  637. #define ECB_MEMORY_FENCE_ACQUIRE __c11_atomic_thread_fence (__ATOMIC_ACQUIRE)
  638. #define ECB_MEMORY_FENCE_RELEASE __c11_atomic_thread_fence (__ATOMIC_RELEASE)
  639. #elif ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
  640. #define ECB_MEMORY_FENCE __sync_synchronize ()
  641. #elif _MSC_VER >= 1500 /* VC++ 2008 */
  642. /* apparently, microsoft broke all the memory barrier stuff in Visual Studio 2008... */
  643. #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
  644. #define ECB_MEMORY_FENCE _ReadWriteBarrier (); MemoryBarrier()
  645. #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier (); MemoryBarrier() /* according to msdn, _ReadBarrier is not a load fence */
  646. #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier (); MemoryBarrier()
  647. #elif _MSC_VER >= 1400 /* VC++ 2005 */
  648. #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
  649. #define ECB_MEMORY_FENCE _ReadWriteBarrier ()
  650. #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
  651. #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
  652. #elif defined _WIN32
  653. #include <WinNT.h>
  654. #define ECB_MEMORY_FENCE MemoryBarrier () /* actually just xchg on x86... scary */
  655. #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
  656. #include <mbarrier.h>
  657. #define ECB_MEMORY_FENCE __machine_rw_barrier ()
  658. #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier ()
  659. #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier ()
  660. #elif __xlC__
  661. #define ECB_MEMORY_FENCE __sync ()
  662. #endif
  663. #endif
  664. #ifndef ECB_MEMORY_FENCE
  665. #if ECB_C11 && !defined __STDC_NO_ATOMICS__
  666. /* we assume that these memory fences work on all variables/all memory accesses, */
  667. /* not just C11 atomics and atomic accesses */
  668. #include <stdatomic.h>
  669. /* Unfortunately, neither gcc 4.7 nor clang 3.1 generate any instructions for */
  670. /* any fence other than seq_cst, which isn't very efficient for us. */
  671. /* Why that is, we don't know - either the C11 memory model is quite useless */
  672. /* for most usages, or gcc and clang have a bug */
  673. /* I *currently* lean towards the latter, and inefficiently implement */
  674. /* all three of ecb's fences as a seq_cst fence */
  675. /* Update, gcc-4.8 generates mfence for all c++ fences, but nothing */
  676. /* for all __atomic_thread_fence's except seq_cst */
  677. #define ECB_MEMORY_FENCE atomic_thread_fence (memory_order_seq_cst)
  678. #endif
  679. #endif
  680. #ifndef ECB_MEMORY_FENCE
  681. #if !ECB_AVOID_PTHREADS
  682. /*
  683. * if you get undefined symbol references to pthread_mutex_lock,
  684. * or failure to find pthread.h, then you should implement
  685. * the ECB_MEMORY_FENCE operations for your cpu/compiler
  686. * OR provide pthread.h and link against the posix thread library
  687. * of your system.
  688. */
  689. #include <pthread.h>
  690. #define ECB_NEEDS_PTHREADS 1
  691. #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1
  692. static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER;
  693. #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0)
  694. #endif
  695. #endif
  696. #if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE
  697. #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
  698. #endif
  699. #if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
  700. #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
  701. #endif
  702. /*****************************************************************************/
  703. #if ECB_CPP
  704. #define ecb_inline static inline
  705. #elif ECB_GCC_VERSION(2,5)
  706. #define ecb_inline static __inline__
  707. #elif ECB_C99
  708. #define ecb_inline static inline
  709. #else
  710. #define ecb_inline static
  711. #endif
  712. #if ECB_GCC_VERSION(3,3)
  713. #define ecb_restrict __restrict__
  714. #elif ECB_C99
  715. #define ecb_restrict restrict
  716. #else
  717. #define ecb_restrict
  718. #endif
  719. typedef int ecb_bool;
  720. #define ECB_CONCAT_(a, b) a ## b
  721. #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b)
  722. #define ECB_STRINGIFY_(a) # a
  723. #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
  724. #define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr))
  725. #define ecb_function_ ecb_inline
  726. #if ECB_GCC_VERSION(3,1) || ECB_CLANG_VERSION(2,8)
  727. #define ecb_attribute(attrlist) __attribute__ (attrlist)
  728. #else
  729. #define ecb_attribute(attrlist)
  730. #endif
  731. #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_constant_p)
  732. #define ecb_is_constant(expr) __builtin_constant_p (expr)
  733. #else
  734. /* possible C11 impl for integral types
  735. typedef struct ecb_is_constant_struct ecb_is_constant_struct;
  736. #define ecb_is_constant(expr) _Generic ((1 ? (struct ecb_is_constant_struct *)0 : (void *)((expr) - (expr)), ecb_is_constant_struct *: 0, default: 1)) */
  737. #define ecb_is_constant(expr) 0
  738. #endif
  739. #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_expect)
  740. #define ecb_expect(expr,value) __builtin_expect ((expr),(value))
  741. #else
  742. #define ecb_expect(expr,value) (expr)
  743. #endif
  744. #if ECB_GCC_VERSION(3,1) || ECB_CLANG_BUILTIN(__builtin_prefetch)
  745. #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
  746. #else
  747. #define ecb_prefetch(addr,rw,locality)
  748. #endif
  749. /* no emulation for ecb_decltype */
  750. #if ECB_CPP11
  751. // older implementations might have problems with decltype(x)::type, work around it
  752. template<class T> struct ecb_decltype_t { typedef T type; };
  753. #define ecb_decltype(x) ecb_decltype_t<decltype (x)>::type
  754. #elif ECB_GCC_VERSION(3,0) || ECB_CLANG_VERSION(2,8)
  755. #define ecb_decltype(x) __typeof__ (x)
  756. #endif
  757. #if _MSC_VER >= 1300
  758. #define ecb_deprecated __declspec (deprecated)
  759. #else
  760. #define ecb_deprecated ecb_attribute ((__deprecated__))
  761. #endif
  762. #if _MSC_VER >= 1500
  763. #define ecb_deprecated_message(msg) __declspec (deprecated (msg))
  764. #elif ECB_GCC_VERSION(4,5)
  765. #define ecb_deprecated_message(msg) ecb_attribute ((__deprecated__ (msg))
  766. #else
  767. #define ecb_deprecated_message(msg) ecb_deprecated
  768. #endif
  769. #if _MSC_VER >= 1400
  770. #define ecb_noinline __declspec (noinline)
  771. #else
  772. #define ecb_noinline ecb_attribute ((__noinline__))
  773. #endif
  774. #define ecb_unused ecb_attribute ((__unused__))
  775. #define ecb_const ecb_attribute ((__const__))
  776. #define ecb_pure ecb_attribute ((__pure__))
  777. #if ECB_C11 || __IBMC_NORETURN
  778. /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/language_ref/noreturn.html */
  779. #define ecb_noreturn _Noreturn
  780. #elif ECB_CPP11
  781. #define ecb_noreturn [[noreturn]]
  782. #elif _MSC_VER >= 1200
  783. /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx */
  784. #define ecb_noreturn __declspec (noreturn)
  785. #else
  786. #define ecb_noreturn ecb_attribute ((__noreturn__))
  787. #endif
  788. #if ECB_GCC_VERSION(4,3)
  789. #define ecb_artificial ecb_attribute ((__artificial__))
  790. #define ecb_hot ecb_attribute ((__hot__))
  791. #define ecb_cold ecb_attribute ((__cold__))
  792. #else
  793. #define ecb_artificial
  794. #define ecb_hot
  795. #define ecb_cold
  796. #endif
  797. /* put around conditional expressions if you are very sure that the */
  798. /* expression is mostly true or mostly false. note that these return */
  799. /* booleans, not the expression. */
  800. #define ecb_expect_false(expr) ecb_expect (!!(expr), 0)
  801. #define ecb_expect_true(expr) ecb_expect (!!(expr), 1)
  802. /* for compatibility to the rest of the world */
  803. #define ecb_likely(expr) ecb_expect_true (expr)
  804. #define ecb_unlikely(expr) ecb_expect_false (expr)
  805. /* count trailing zero bits and count # of one bits */
  806. #if ECB_GCC_VERSION(3,4) \
  807. || (ECB_CLANG_BUILTIN(__builtin_clz) && ECB_CLANG_BUILTIN(__builtin_clzll) \
  808. && ECB_CLANG_BUILTIN(__builtin_ctz) && ECB_CLANG_BUILTIN(__builtin_ctzll) \
  809. && ECB_CLANG_BUILTIN(__builtin_popcount))
  810. /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */
  811. #define ecb_ld32(x) (__builtin_clz (x) ^ 31)
  812. #define ecb_ld64(x) (__builtin_clzll (x) ^ 63)
  813. #define ecb_ctz32(x) __builtin_ctz (x)
  814. #define ecb_ctz64(x) __builtin_ctzll (x)
  815. #define ecb_popcount32(x) __builtin_popcount (x)
  816. /* no popcountll */
  817. #else
  818. ecb_function_ ecb_const int ecb_ctz32 (uint32_t x);
  819. ecb_function_ ecb_const int
  820. ecb_ctz32 (uint32_t x)
  821. {
  822. #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
  823. unsigned long r;
  824. _BitScanForward (&r, x);
  825. return (int)r;
  826. #else
  827. int r = 0;
  828. x &= ~x + 1; /* this isolates the lowest bit */
  829. #if ECB_branchless_on_i386
  830. r += !!(x & 0xaaaaaaaa) << 0;
  831. r += !!(x & 0xcccccccc) << 1;
  832. r += !!(x & 0xf0f0f0f0) << 2;
  833. r += !!(x & 0xff00ff00) << 3;
  834. r += !!(x & 0xffff0000) << 4;
  835. #else
  836. if (x & 0xaaaaaaaa) r += 1;
  837. if (x & 0xcccccccc) r += 2;
  838. if (x & 0xf0f0f0f0) r += 4;
  839. if (x & 0xff00ff00) r += 8;
  840. if (x & 0xffff0000) r += 16;
  841. #endif
  842. return r;
  843. #endif
  844. }
  845. ecb_function_ ecb_const int ecb_ctz64 (uint64_t x);
  846. ecb_function_ ecb_const int
  847. ecb_ctz64 (uint64_t x)
  848. {
  849. #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
  850. unsigned long r;
  851. _BitScanForward64 (&r, x);
  852. return (int)r;
  853. #else
  854. int shift = x & 0xffffffff ? 0 : 32;
  855. return ecb_ctz32 (x >> shift) + shift;
  856. #endif
  857. }
  858. ecb_function_ ecb_const int ecb_popcount32 (uint32_t x);
  859. ecb_function_ ecb_const int
  860. ecb_popcount32 (uint32_t x)
  861. {
  862. x -= (x >> 1) & 0x55555555;
  863. x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
  864. x = ((x >> 4) + x) & 0x0f0f0f0f;
  865. x *= 0x01010101;
  866. return x >> 24;
  867. }
  868. ecb_function_ ecb_const int ecb_ld32 (uint32_t x);
  869. ecb_function_ ecb_const int ecb_ld32 (uint32_t x)
  870. {
  871. #if 1400 <= _MSC_VER && (_M_IX86 || _M_X64 || _M_IA64 || _M_ARM)
  872. unsigned long r;
  873. _BitScanReverse (&r, x);
  874. return (int)r;
  875. #else
  876. int r = 0;
  877. if (x >> 16) { x >>= 16; r += 16; }
  878. if (x >> 8) { x >>= 8; r += 8; }
  879. if (x >> 4) { x >>= 4; r += 4; }
  880. if (x >> 2) { x >>= 2; r += 2; }
  881. if (x >> 1) { r += 1; }
  882. return r;
  883. #endif
  884. }
  885. ecb_function_ ecb_const int ecb_ld64 (uint64_t x);
  886. ecb_function_ ecb_const int ecb_ld64 (uint64_t x)
  887. {
  888. #if 1400 <= _MSC_VER && (_M_X64 || _M_IA64 || _M_ARM)
  889. unsigned long r;
  890. _BitScanReverse64 (&r, x);
  891. return (int)r;
  892. #else
  893. int r = 0;
  894. if (x >> 32) { x >>= 32; r += 32; }
  895. return r + ecb_ld32 (x);
  896. #endif
  897. }
  898. #endif
  899. ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x);
  900. ecb_function_ ecb_const ecb_bool ecb_is_pot32 (uint32_t x) { return !(x & (x - 1)); }
  901. ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x);
  902. ecb_function_ ecb_const ecb_bool ecb_is_pot64 (uint64_t x) { return !(x & (x - 1)); }
  903. ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x);
  904. ecb_function_ ecb_const uint8_t ecb_bitrev8 (uint8_t x)
  905. {
  906. return ( (x * 0x0802U & 0x22110U)
  907. | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16;
  908. }
  909. ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x);
  910. ecb_function_ ecb_const uint16_t ecb_bitrev16 (uint16_t x)
  911. {
  912. x = ((x >> 1) & 0x5555) | ((x & 0x5555) << 1);
  913. x = ((x >> 2) & 0x3333) | ((x & 0x3333) << 2);
  914. x = ((x >> 4) & 0x0f0f) | ((x & 0x0f0f) << 4);
  915. x = ( x >> 8 ) | ( x << 8);
  916. return x;
  917. }
  918. ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x);
  919. ecb_function_ ecb_const uint32_t ecb_bitrev32 (uint32_t x)
  920. {
  921. x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
  922. x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2);
  923. x = ((x >> 4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) << 4);
  924. x = ((x >> 8) & 0x00ff00ff) | ((x & 0x00ff00ff) << 8);
  925. x = ( x >> 16 ) | ( x << 16);
  926. return x;
  927. }
  928. /* popcount64 is only available on 64 bit cpus as gcc builtin */
  929. /* so for this version we are lazy */
  930. ecb_function_ ecb_const int ecb_popcount64 (uint64_t x);
  931. ecb_function_ ecb_const int
  932. ecb_popcount64 (uint64_t x)
  933. {
  934. return ecb_popcount32 (x) + ecb_popcount32 (x >> 32);
  935. }
  936. ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count);
  937. ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count);
  938. ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count);
  939. ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count);
  940. ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count);
  941. ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count);
  942. ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count);
  943. ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count);
  944. ecb_inline ecb_const uint8_t ecb_rotl8 (uint8_t x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); }
  945. ecb_inline ecb_const uint8_t ecb_rotr8 (uint8_t x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); }
  946. ecb_inline ecb_const uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); }
  947. ecb_inline ecb_const uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); }
  948. ecb_inline ecb_const uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); }
  949. ecb_inline ecb_const uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); }
  950. ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); }
  951. ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); }
  952. #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64))
  953. #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16)
  954. #define ecb_bswap16(x) __builtin_bswap16 (x)
  955. #else
  956. #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
  957. #endif
  958. #define ecb_bswap32(x) __builtin_bswap32 (x)
  959. #define ecb_bswap64(x) __builtin_bswap64 (x)
  960. #elif _MSC_VER
  961. #include <stdlib.h>
  962. #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x)))
  963. #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x)))
  964. #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x)))
  965. #else
  966. ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x);
  967. ecb_function_ ecb_const uint16_t
  968. ecb_bswap16 (uint16_t x)
  969. {
  970. return ecb_rotl16 (x, 8);
  971. }
  972. ecb_function_ ecb_const uint32_t ecb_bswap32 (uint32_t x);
  973. ecb_function_ ecb_const uint32_t
  974. ecb_bswap32 (uint32_t x)
  975. {
  976. return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16);
  977. }
  978. ecb_function_ ecb_const uint64_t ecb_bswap64 (uint64_t x);
  979. ecb_function_ ecb_const uint64_t
  980. ecb_bswap64 (uint64_t x)
  981. {
  982. return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);
  983. }
  984. #endif
  985. #if ECB_GCC_VERSION(4,5) || ECB_CLANG_BUILTIN(__builtin_unreachable)
  986. #define ecb_unreachable() __builtin_unreachable ()
  987. #else
  988. /* this seems to work fine, but gcc always emits a warning for it :/ */
  989. ecb_inline ecb_noreturn void ecb_unreachable (void);
  990. ecb_inline ecb_noreturn void ecb_unreachable (void) { }
  991. #endif
  992. /* try to tell the compiler that some condition is definitely true */
  993. #define ecb_assume(cond) if (!(cond)) ecb_unreachable (); else 0
  994. ecb_inline ecb_const uint32_t ecb_byteorder_helper (void);
  995. ecb_inline ecb_const uint32_t
  996. ecb_byteorder_helper (void)
  997. {
  998. /* the union code still generates code under pressure in gcc, */
  999. /* but less than using pointers, and always seems to */
  1000. /* successfully return a constant. */
  1001. /* the reason why we have this horrible preprocessor mess */
  1002. /* is to avoid it in all cases, at least on common architectures */
  1003. /* or when using a recent enough gcc version (>= 4.6) */
  1004. #if (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
  1005. || ((__i386 || __i386__ || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64) && !__VOS__)
  1006. #define ECB_LITTLE_ENDIAN 1
  1007. return 0x44332211;
  1008. #elif (defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) \
  1009. || ((__AARCH64EB__ || __MIPSEB__ || __ARMEB__) && !__VOS__)
  1010. #define ECB_BIG_ENDIAN 1
  1011. return 0x11223344;
  1012. #else
  1013. union
  1014. {
  1015. uint8_t c[4];
  1016. uint32_t u;
  1017. } u = { 0x11, 0x22, 0x33, 0x44 };
  1018. return u.u;
  1019. #endif
  1020. }
  1021. ecb_inline ecb_const ecb_bool ecb_big_endian (void);
  1022. ecb_inline ecb_const ecb_bool ecb_big_endian (void) { return ecb_byteorder_helper () == 0x11223344; }
  1023. ecb_inline ecb_const ecb_bool ecb_little_endian (void);
  1024. ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44332211; }
  1025. #if ECB_GCC_VERSION(3,0) || ECB_C99
  1026. #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
  1027. #else
  1028. #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
  1029. #endif
  1030. #if ECB_CPP
  1031. template<typename T>
  1032. static inline T ecb_div_rd (T val, T div)
  1033. {
  1034. return val < 0 ? - ((-val + div - 1) / div) : (val ) / div;
  1035. }
  1036. template<typename T>
  1037. static inline T ecb_div_ru (T val, T div)
  1038. {
  1039. return val < 0 ? - ((-val ) / div) : (val + div - 1) / div;
  1040. }
  1041. #else
  1042. #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val) ) / (div))
  1043. #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val) ) / (div)) : ((val) + (div) - 1) / (div))
  1044. #endif
  1045. #if ecb_cplusplus_does_not_suck
  1046. /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */
  1047. template<typename T, int N>
  1048. static inline int ecb_array_length (const T (&arr)[N])
  1049. {
  1050. return N;
  1051. }
  1052. #else
  1053. #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
  1054. #endif
  1055. ecb_function_ ecb_const uint32_t ecb_binary16_to_binary32 (uint32_t x);
  1056. ecb_function_ ecb_const uint32_t
  1057. ecb_binary16_to_binary32 (uint32_t x)
  1058. {
  1059. unsigned int s = (x & 0x8000) << (31 - 15);
  1060. int e = (x >> 10) & 0x001f;
  1061. unsigned int m = x & 0x03ff;
  1062. if (ecb_expect_false (e == 31))
  1063. /* infinity or NaN */
  1064. e = 255 - (127 - 15);
  1065. else if (ecb_expect_false (!e))
  1066. {
  1067. if (ecb_expect_true (!m))
  1068. /* zero, handled by code below by forcing e to 0 */
  1069. e = 0 - (127 - 15);
  1070. else
  1071. {
  1072. /* subnormal, renormalise */
  1073. unsigned int s = 10 - ecb_ld32 (m);
  1074. m = (m << s) & 0x3ff; /* mask implicit bit */
  1075. e -= s - 1;
  1076. }
  1077. }
  1078. /* e and m now are normalised, or zero, (or inf or nan) */
  1079. e += 127 - 15;
  1080. return s | (e << 23) | (m << (23 - 10));
  1081. }
  1082. ecb_function_ ecb_const uint16_t ecb_binary32_to_binary16 (uint32_t x);
  1083. ecb_function_ ecb_const uint16_t
  1084. ecb_binary32_to_binary16 (uint32_t x)
  1085. {
  1086. unsigned int s = (x >> 16) & 0x00008000; /* sign bit, the easy part */
  1087. unsigned int e = ((x >> 23) & 0x000000ff) - (127 - 15); /* the desired exponent */
  1088. unsigned int m = x & 0x007fffff;
  1089. x &= 0x7fffffff;
  1090. /* if it's within range of binary16 normals, use fast path */
  1091. if (ecb_expect_true (0x38800000 <= x && x <= 0x477fefff))
  1092. {
  1093. /* mantissa round-to-even */
  1094. m += 0x00000fff + ((m >> (23 - 10)) & 1);
  1095. /* handle overflow */
  1096. if (ecb_expect_false (m >= 0x00800000))
  1097. {
  1098. m >>= 1;
  1099. e += 1;
  1100. }
  1101. return s | (e << 10) | (m >> (23 - 10));
  1102. }
  1103. /* handle large numbers and infinity */
  1104. if (ecb_expect_true (0x477fefff < x && x <= 0x7f800000))
  1105. return s | 0x7c00;
  1106. /* handle zero, subnormals and small numbers */
  1107. if (ecb_expect_true (x < 0x38800000))
  1108. {
  1109. /* zero */
  1110. if (ecb_expect_true (!x))
  1111. return s;
  1112. /* handle subnormals */
  1113. /* too small, will be zero */
  1114. if (e < (14 - 24)) /* might not be sharp, but is good enough */
  1115. return s;
  1116. m |= 0x00800000; /* make implicit bit explicit */
  1117. /* very tricky - we need to round to the nearest e (+10) bit value */
  1118. {
  1119. unsigned int bits = 14 - e;
  1120. unsigned int half = (1 << (bits - 1)) - 1;
  1121. unsigned int even = (m >> bits) & 1;
  1122. /* if this overflows, we will end up with a normalised number */
  1123. m = (m + half + even) >> bits;
  1124. }
  1125. return s | m;
  1126. }
  1127. /* handle NaNs, preserve leftmost nan bits, but make sure we don't turn them into infinities */
  1128. m >>= 13;
  1129. return s | 0x7c00 | m | !m;
  1130. }
  1131. /*******************************************************************************/
  1132. /* floating point stuff, can be disabled by defining ECB_NO_LIBM */
  1133. /* basically, everything uses "ieee pure-endian" floating point numbers */
  1134. /* the only noteworthy exception is ancient armle, which uses order 43218765 */
  1135. #if 0 \
  1136. || __i386 || __i386__ \
  1137. || ECB_GCC_AMD64 \
  1138. || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \
  1139. || defined __s390__ || defined __s390x__ \
  1140. || defined __mips__ \
  1141. || defined __alpha__ \
  1142. || defined __hppa__ \
  1143. || defined __ia64__ \
  1144. || defined __m68k__ \
  1145. || defined __m88k__ \
  1146. || defined __sh__ \
  1147. || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \
  1148. || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \
  1149. || defined __aarch64__
  1150. #define ECB_STDFP 1
  1151. #include <string.h> /* for memcpy */
  1152. #else
  1153. #define ECB_STDFP 0
  1154. #endif
  1155. #ifndef ECB_NO_LIBM
  1156. #include <math.h> /* for frexp*, ldexp*, INFINITY, NAN */
  1157. /* only the oldest of old doesn't have this one. solaris. */
  1158. #ifdef INFINITY
  1159. #define ECB_INFINITY INFINITY
  1160. #else
  1161. #define ECB_INFINITY HUGE_VAL
  1162. #endif
  1163. #ifdef NAN
  1164. #define ECB_NAN NAN
  1165. #else
  1166. #define ECB_NAN ECB_INFINITY
  1167. #endif
  1168. #if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L
  1169. #define ecb_ldexpf(x,e) ldexpf ((x), (e))
  1170. #define ecb_frexpf(x,e) frexpf ((x), (e))
  1171. #else
  1172. #define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e))
  1173. #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e))
  1174. #endif
  1175. /* convert a float to ieee single/binary32 */
  1176. ecb_function_ ecb_const uint32_t ecb_float_to_binary32 (float x);
  1177. ecb_function_ ecb_const uint32_t
  1178. ecb_float_to_binary32 (float x)
  1179. {
  1180. uint32_t r;
  1181. #if ECB_STDFP
  1182. memcpy (&r, &x, 4);
  1183. #else
  1184. /* slow emulation, works for anything but -0 */
  1185. uint32_t m;
  1186. int e;
  1187. if (x == 0e0f ) return 0x00000000U;
  1188. if (x > +3.40282346638528860e+38f) return 0x7f800000U;
  1189. if (x < -3.40282346638528860e+38f) return 0xff800000U;
  1190. if (x != x ) return 0x7fbfffffU;
  1191. m = ecb_frexpf (x, &e) * 0x1000000U;
  1192. r = m & 0x80000000U;
  1193. if (r)
  1194. m = -m;
  1195. if (e <= -126)
  1196. {
  1197. m &= 0xffffffU;
  1198. m >>= (-125 - e);
  1199. e = -126;
  1200. }
  1201. r |= (e + 126) << 23;
  1202. r |= m & 0x7fffffU;
  1203. #endif
  1204. return r;
  1205. }
  1206. /* converts an ieee single/binary32 to a float */
  1207. ecb_function_ ecb_const float ecb_binary32_to_float (uint32_t x);
  1208. ecb_function_ ecb_const float
  1209. ecb_binary32_to_float (uint32_t x)
  1210. {
  1211. float r;
  1212. #if ECB_STDFP
  1213. memcpy (&r, &x, 4);
  1214. #else
  1215. /* emulation, only works for normals and subnormals and +0 */
  1216. int neg = x >> 31;
  1217. int e = (x >> 23) & 0xffU;
  1218. x &= 0x7fffffU;
  1219. if (e)
  1220. x |= 0x800000U;
  1221. else
  1222. e = 1;
  1223. /* we distrust ldexpf a bit and do the 2**-24 scaling by an extra multiply */
  1224. r = ecb_ldexpf (x * (0.5f / 0x800000U), e - 126);
  1225. r = neg ? -r : r;
  1226. #endif
  1227. return r;
  1228. }
  1229. /* convert a double to ieee double/binary64 */
  1230. ecb_function_ ecb_const uint64_t ecb_double_to_binary64 (double x);
  1231. ecb_function_ ecb_const uint64_t
  1232. ecb_double_to_binary64 (double x)
  1233. {
  1234. uint64_t r;
  1235. #if ECB_STDFP
  1236. memcpy (&r, &x, 8);
  1237. #else
  1238. /* slow emulation, works for anything but -0 */
  1239. uint64_t m;
  1240. int e;
  1241. if (x == 0e0 ) return 0x0000000000000000U;
  1242. if (x > +1.79769313486231470e+308) return 0x7ff0000000000000U;
  1243. if (x < -1.79769313486231470e+308) return 0xfff0000000000000U;
  1244. if (x != x ) return 0X7ff7ffffffffffffU;
  1245. m = frexp (x, &e) * 0x20000000000000U;
  1246. r = m & 0x8000000000000000;;
  1247. if (r)
  1248. m = -m;
  1249. if (e <= -1022)
  1250. {
  1251. m &= 0x1fffffffffffffU;
  1252. m >>= (-1021 - e);
  1253. e = -1022;
  1254. }
  1255. r |= ((uint64_t)(e + 1022)) << 52;
  1256. r |= m & 0xfffffffffffffU;
  1257. #endif
  1258. return r;
  1259. }
  1260. /* converts an ieee double/binary64 to a double */
  1261. ecb_function_ ecb_const double ecb_binary64_to_double (uint64_t x);
  1262. ecb_function_ ecb_const double
  1263. ecb_binary64_to_double (uint64_t x)
  1264. {
  1265. double r;
  1266. #if ECB_STDFP
  1267. memcpy (&r, &x, 8);
  1268. #else
  1269. /* emulation, only works for normals and subnormals and +0 */
  1270. int neg = x >> 63;
  1271. int e = (x >> 52) & 0x7ffU;
  1272. x &= 0xfffffffffffffU;
  1273. if (e)
  1274. x |= 0x10000000000000U;
  1275. else
  1276. e = 1;
  1277. /* we distrust ldexp a bit and do the 2**-53 scaling by an extra multiply */
  1278. r = ldexp (x * (0.5 / 0x10000000000000U), e - 1022);
  1279. r = neg ? -r : r;
  1280. #endif
  1281. return r;
  1282. }
  1283. /* convert a float to ieee half/binary16 */
  1284. ecb_function_ ecb_const uint16_t ecb_float_to_binary16 (float x);
  1285. ecb_function_ ecb_const uint16_t
  1286. ecb_float_to_binary16 (float x)
  1287. {
  1288. return ecb_binary32_to_binary16 (ecb_float_to_binary32 (x));
  1289. }
  1290. /* convert an ieee half/binary16 to float */
  1291. ecb_function_ ecb_const float ecb_binary16_to_float (uint16_t x);
  1292. ecb_function_ ecb_const float
  1293. ecb_binary16_to_float (uint16_t x)
  1294. {
  1295. return ecb_binary32_to_float (ecb_binary16_to_binary32 (x));
  1296. }
  1297. #endif
  1298. #endif
  1299. /* ECB.H END */
  1300. #if ECB_MEMORY_FENCE_NEEDS_PTHREADS
  1301. /* if your architecture doesn't need memory fences, e.g. because it is
  1302. * single-cpu/core, or if you use libev in a project that doesn't use libev
  1303. * from multiple threads, then you can define ECB_AVOID_PTHREADS when compiling
  1304. * libev, in which cases the memory fences become nops.
  1305. * alternatively, you can remove this #error and link against libpthread,
  1306. * which will then provide the memory fences.
  1307. */
  1308. # error "memory fences not defined for your architecture, please report"
  1309. #endif
  1310. #ifndef ECB_MEMORY_FENCE
  1311. # define ECB_MEMORY_FENCE do { } while (0)
  1312. # define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
  1313. # define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
  1314. #endif
  1315. #define expect_false(cond) ecb_expect_false (cond)
  1316. #define expect_true(cond) ecb_expect_true (cond)
  1317. #define noinline ecb_noinline
  1318. #define inline_size ecb_inline
  1319. #if EV_FEATURE_CODE
  1320. # define inline_speed ecb_inline
  1321. #else
  1322. # define inline_speed static noinline
  1323. #endif
  1324. #define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
  1325. #if EV_MINPRI == EV_MAXPRI
  1326. # define ABSPRI(w) (((W)w), 0)
  1327. #else
  1328. # define ABSPRI(w) (((W)w)->priority - EV_MINPRI)
  1329. #endif
  1330. #define EMPTY /* required for microsofts broken pseudo-c compiler */
  1331. #define EMPTY2(a,b) /* used to suppress some warnings */
  1332. typedef ev_watcher *W;
  1333. typedef ev_watcher_list *WL;
  1334. typedef ev_watcher_time *WT;
  1335. #define ev_active(w) ((W)(w))->active
  1336. #define ev_at(w) ((WT)(w))->at
  1337. #if EV_USE_REALTIME
  1338. /* sig_atomic_t is used to avoid per-thread variables or locking but still */
  1339. /* giving it a reasonably high chance of working on typical architectures */
  1340. static EV_ATOMIC_T have_realtime; /* did clock_gettime (CLOCK_REALTIME) work? */
  1341. #endif
  1342. #if EV_USE_MONOTONIC
  1343. static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
  1344. #endif
  1345. #ifndef EV_FD_TO_WIN32_HANDLE
  1346. # define EV_FD_TO_WIN32_HANDLE(fd) _get_osfhandle (fd)
  1347. #endif
  1348. #ifndef EV_WIN32_HANDLE_TO_FD
  1349. # define EV_WIN32_HANDLE_TO_FD(handle) _open_osfhandle (handle, 0)
  1350. #endif
  1351. #ifndef EV_WIN32_CLOSE_FD
  1352. # define EV_WIN32_CLOSE_FD(fd) close (fd)
  1353. #endif
  1354. #ifdef _WIN32
  1355. # include "ev_win32.c"
  1356. #endif
  1357. /*****************************************************************************/
  1358. /* define a suitable floor function (only used by periodics atm) */
  1359. #if EV_USE_FLOOR
  1360. # include <math.h>
  1361. # define ev_floor(v) floor (v)
  1362. #else
  1363. #include <float.h>
  1364. /* a floor() replacement function, should be independent of ev_tstamp type */
  1365. static ev_tstamp noinline
  1366. ev_floor (ev_tstamp v)
  1367. {
  1368. /* the choice of shift factor is not terribly important */
  1369. #if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */
  1370. const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.;
  1371. #else
  1372. const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
  1373. #endif
  1374. /* argument too large for an unsigned long? */
  1375. if (expect_false (v >= shift))
  1376. {
  1377. ev_tstamp f;
  1378. if (v == v - 1.)
  1379. return v; /* very large number */
  1380. f = shift * ev_floor (v * (1. / shift));
  1381. return f + ev_floor (v - f);
  1382. }
  1383. /* special treatment for negative args? */
  1384. if (expect_false (v < 0.))
  1385. {
  1386. ev_tstamp f = -ev_floor (-v);
  1387. return f - (f == v ? 0 : 1);
  1388. }
  1389. /* fits into an unsigned long */
  1390. return (unsigned long)v;
  1391. }
  1392. #endif
  1393. /*****************************************************************************/
  1394. #ifdef __linux
  1395. # include <sys/utsname.h>
  1396. #endif
  1397. static unsigned int noinline ecb_cold
  1398. ev_linux_version (void)
  1399. {
  1400. #ifdef __linux
  1401. unsigned int v = 0;
  1402. struct utsname buf;
  1403. int i;
  1404. char *p = buf.release;
  1405. if (uname (&buf))
  1406. return 0;
  1407. for (i = 3+1; --i; )
  1408. {
  1409. unsigned int c = 0;
  1410. for (;;)
  1411. {
  1412. if (*p >= '0' && *p <= '9')
  1413. c = c * 10 + *p++ - '0';
  1414. else
  1415. {
  1416. p += *p == '.';
  1417. break;
  1418. }
  1419. }
  1420. v = (v << 8) | c;
  1421. }
  1422. return v;
  1423. #else
  1424. return 0;
  1425. #endif
  1426. }
  1427. /*****************************************************************************/
  1428. #if EV_AVOID_STDIO
  1429. static void noinline ecb_cold
  1430. ev_printerr (const char *msg)
  1431. {
  1432. write (STDERR_FILENO, msg, strlen (msg));
  1433. }
  1434. #endif
  1435. static void (*syserr_cb)(const char *msg) EV_THROW;
  1436. void ecb_cold
  1437. ev_set_syserr_cb (void (*cb)(const char *msg) EV_THROW) EV_THROW
  1438. {
  1439. syserr_cb = cb;
  1440. }
  1441. static void noinline ecb_cold
  1442. ev_syserr (const char *msg)
  1443. {
  1444. if (!msg)
  1445. msg = "(libev) system error";
  1446. if (syserr_cb)
  1447. syserr_cb (msg);
  1448. else
  1449. {
  1450. #if EV_AVOID_STDIO
  1451. ev_printerr (msg);
  1452. ev_printerr (": ");
  1453. ev_printerr (strerror (errno));
  1454. ev_printerr ("\n");
  1455. #else
  1456. perror (msg);
  1457. #endif
  1458. abort ();
  1459. }
  1460. }
  1461. static void *
  1462. ev_realloc_emul (void *ptr, long size) EV_THROW
  1463. {
  1464. /* some systems, notably openbsd and darwin, fail to properly
  1465. * implement realloc (x, 0) (as required by both ansi c-89 and
  1466. * the single unix specification, so work around them here.
  1467. * recently, also (at least) fedora and debian started breaking it,
  1468. * despite documenting it otherwise.
  1469. */
  1470. if (size)
  1471. return realloc (ptr, size);
  1472. free (ptr);
  1473. return 0;
  1474. }
  1475. static void *(*alloc)(void *ptr, long size) EV_THROW = ev_realloc_emul;
  1476. void ecb_cold
  1477. ev_set_allocator (void *(*cb)(void *ptr, long size) EV_THROW) EV_THROW
  1478. {
  1479. alloc = cb;
  1480. }
  1481. inline_speed void *
  1482. ev_realloc (void *ptr, long size)
  1483. {
  1484. ptr = alloc (ptr, size);
  1485. if (!ptr && size)
  1486. {
  1487. #if EV_AVOID_STDIO
  1488. ev_printerr ("(libev) memory allocation failed, aborting.\n");
  1489. #else
  1490. fprintf (stderr, "(libev) cannot allocate %ld bytes, aborting.", size);
  1491. #endif
  1492. abort ();
  1493. }
  1494. return ptr;
  1495. }
  1496. #define ev_malloc(size) ev_realloc (0, (size))
  1497. #define ev_free(ptr) ev_realloc ((ptr), 0)
  1498. /*****************************************************************************/
  1499. /* set in reify when reification needed */
  1500. #define EV_ANFD_REIFY 1
  1501. /* file descriptor info structure */
  1502. typedef struct
  1503. {
  1504. WL head;
  1505. unsigned char events; /* the events watched for */
  1506. unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
  1507. unsigned char emask; /* the epoll backend stores the actual kernel mask in here */
  1508. unsigned char unused;
  1509. #if EV_USE_EPOLL
  1510. unsigned int egen; /* generation counter to counter epoll bugs */
  1511. #endif
  1512. #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
  1513. SOCKET handle;
  1514. #endif
  1515. #if EV_USE_IOCP
  1516. OVERLAPPED or, ow;
  1517. #endif
  1518. } ANFD;
  1519. /* stores the pending event set for a given watcher */
  1520. typedef struct
  1521. {
  1522. W w;
  1523. int events; /* the pending event set for the given watcher */
  1524. } ANPENDING;
  1525. #if EV_USE_INOTIFY
  1526. /* hash table entry per inotify-id */
  1527. typedef struct
  1528. {
  1529. WL head;
  1530. } ANFS;
  1531. #endif
  1532. /* Heap Entry */
  1533. #if EV_HEAP_CACHE_AT
  1534. /* a heap element */
  1535. typedef struct {
  1536. ev_tstamp at;
  1537. WT w;
  1538. } ANHE;
  1539. #define ANHE_w(he) (he).w /* access watcher, read-write */
  1540. #define ANHE_at(he) (he).at /* access cached at, read-only */
  1541. #define ANHE_at_cache(he) (he).at = (he).w->at /* update at from watcher */
  1542. #else
  1543. /* a heap element */
  1544. typedef WT ANHE;
  1545. #define ANHE_w(he) (he)
  1546. #define ANHE_at(he) (he)->at
  1547. #define ANHE_at_cache(he)
  1548. #endif
  1549. #if EV_MULTIPLICITY
  1550. struct ev_loop
  1551. {
  1552. ev_tstamp ev_rt_now;
  1553. #define ev_rt_now ((loop)->ev_rt_now)
  1554. #define VAR(name,decl) decl;
  1555. #include "ev_vars.h"
  1556. #undef VAR
  1557. };
  1558. #include "ev_wrap.h"
  1559. static struct ev_loop default_loop_struct;
  1560. EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */
  1561. #else
  1562. EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */
  1563. #define VAR(name,decl) static decl;
  1564. #include "ev_vars.h"
  1565. #undef VAR
  1566. static int ev_default_loop_ptr;
  1567. #endif
  1568. #if EV_FEATURE_API
  1569. # define EV_RELEASE_CB if (expect_false (release_cb)) release_cb (EV_A)
  1570. # define EV_ACQUIRE_CB if (expect_false (acquire_cb)) acquire_cb (EV_A)
  1571. # define EV_INVOKE_PENDING invoke_cb (EV_A)
  1572. #else
  1573. # define EV_RELEASE_CB (void)0
  1574. # define EV_ACQUIRE_CB (void)0
  1575. # define EV_INVOKE_PENDING ev_invoke_pending (EV_A)
  1576. #endif
  1577. #define EVBREAK_RECURSE 0x80
  1578. /*****************************************************************************/
  1579. #ifndef EV_HAVE_EV_TIME
  1580. ev_tstamp
  1581. ev_time (void) EV_THROW
  1582. {
  1583. #if EV_USE_REALTIME
  1584. if (expect_true (have_realtime))
  1585. {
  1586. struct timespec ts;
  1587. clock_gettime (CLOCK_REALTIME, &ts);
  1588. return ts.tv_sec + ts.tv_nsec * 1e-9;
  1589. }
  1590. #endif
  1591. struct timeval tv;
  1592. gettimeofday (&tv, 0);
  1593. return tv.tv_sec + tv.tv_usec * 1e-6;
  1594. }
  1595. #endif
  1596. inline_size ev_tstamp
  1597. get_clock (void)
  1598. {
  1599. #if EV_USE_MONOTONIC
  1600. if (expect_true (have_monotonic))
  1601. {
  1602. struct timespec ts;
  1603. clock_gettime (CLOCK_MONOTONIC, &ts);
  1604. return ts.tv_sec + ts.tv_nsec * 1e-9;
  1605. }
  1606. #endif
  1607. return ev_time ();
  1608. }
  1609. #if EV_MULTIPLICITY
  1610. ev_tstamp
  1611. ev_now (EV_P) EV_THROW
  1612. {
  1613. return ev_rt_now;
  1614. }
  1615. #endif
  1616. void
  1617. ev_sleep (ev_tstamp delay) EV_THROW
  1618. {
  1619. if (delay > 0.)
  1620. {
  1621. #if EV_USE_NANOSLEEP
  1622. struct timespec ts;
  1623. EV_TS_SET (ts, delay);
  1624. nanosleep (&ts, 0);
  1625. #elif defined _WIN32
  1626. Sleep ((unsigned long)(delay * 1e3));
  1627. #else
  1628. struct timeval tv;
  1629. /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */
  1630. /* something not guaranteed by newer posix versions, but guaranteed */
  1631. /* by older ones */
  1632. EV_TV_SET (tv, delay);
  1633. select (0, 0, 0, 0, &tv);
  1634. #endif
  1635. }
  1636. }
  1637. /*****************************************************************************/
  1638. #define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */
  1639. /* find a suitable new size for the given array, */
  1640. /* hopefully by rounding to a nice-to-malloc size */
  1641. inline_size int
  1642. array_nextsize (int elem, int cur, int cnt)
  1643. {
  1644. int ncur = cur + 1;
  1645. do
  1646. ncur <<= 1;
  1647. while (cnt > ncur);
  1648. /* if size is large, round to MALLOC_ROUND - 4 * longs to accommodate malloc overhead */
  1649. if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4)
  1650. {
  1651. ncur *= elem;
  1652. ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1);
  1653. ncur = ncur - sizeof (void *) * 4;
  1654. ncur /= elem;
  1655. }
  1656. return ncur;
  1657. }
  1658. static void * noinline ecb_cold
  1659. array_realloc (int elem, void *base, int *cur, int cnt)
  1660. {
  1661. *cur = array_nextsize (elem, *cur, cnt);
  1662. return ev_realloc (base, elem * *cur);
  1663. }
  1664. #define array_init_zero(base,count) \
  1665. memset ((void *)(base), 0, sizeof (*(base)) * (count))
  1666. #define array_needsize(type,base,cur,cnt,init) \
  1667. if (expect_false ((cnt) > (cur))) \
  1668. { \
  1669. int ecb_unused ocur_ = (cur); \
  1670. (base) = (type *)array_realloc \
  1671. (sizeof (type), (base), &(cur), (cnt)); \
  1672. init ((base) + (ocur_), (cur) - ocur_); \
  1673. }
  1674. #if 0
  1675. #define array_slim(type,stem) \
  1676. if (stem ## max < array_roundsize (stem ## cnt >> 2)) \
  1677. { \
  1678. stem ## max = array_roundsize (stem ## cnt >> 1); \
  1679. base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\
  1680. fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\
  1681. }
  1682. #endif
  1683. #define array_free(stem, idx) \
  1684. ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0
  1685. /*****************************************************************************/
  1686. /* dummy callback for pending events */
  1687. static void noinline
  1688. pendingcb (EV_P_ ev_prepare *w, int revents)
  1689. {
  1690. }
  1691. void noinline
  1692. ev_feed_event (EV_P_ void *w, int revents) EV_THROW
  1693. {
  1694. W w_ = (W)w;
  1695. int pri = ABSPRI (w_);
  1696. if (expect_false (w_->pending))
  1697. pendings [pri][w_->pending - 1].events |= revents;
  1698. else
  1699. {
  1700. w_->pending = ++pendingcnt [pri];
  1701. array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, EMPTY2);
  1702. pendings [pri][w_->pending - 1].w = w_;
  1703. pendings [pri][w_->pending - 1].events = revents;
  1704. }
  1705. pendingpri = NUMPRI - 1;
  1706. }
  1707. inline_speed void
  1708. feed_reverse (EV_P_ W w)
  1709. {
  1710. array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, EMPTY2);
  1711. rfeeds [rfeedcnt++] = w;
  1712. }
  1713. inline_size void
  1714. feed_reverse_done (EV_P_ int revents)
  1715. {
  1716. do
  1717. ev_feed_event (EV_A_ rfeeds [--rfeedcnt], revents);
  1718. while (rfeedcnt);
  1719. }
  1720. inline_speed void
  1721. queue_events (EV_P_ W *events, int eventcnt, int type)
  1722. {
  1723. int i;
  1724. for (i = 0; i < eventcnt; ++i)
  1725. ev_feed_event (EV_A_ events [i], type);
  1726. }
  1727. /*****************************************************************************/
  1728. inline_speed void
  1729. fd_event_nocheck (EV_P_ int fd, int revents)
  1730. {
  1731. ANFD *anfd = anfds + fd;
  1732. ev_io *w;
  1733. for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
  1734. {
  1735. int ev = w->events & revents;
  1736. if (ev)
  1737. ev_feed_event (EV_A_ (W)w, ev);
  1738. }
  1739. }
  1740. /* do not submit kernel events for fds that have reify set */
  1741. /* because that means they changed while we were polling for new events */
  1742. inline_speed void
  1743. fd_event (EV_P_ int fd, int revents)
  1744. {
  1745. ANFD *anfd = anfds + fd;
  1746. if (expect_true (!anfd->reify))
  1747. fd_event_nocheck (EV_A_ fd, revents);
  1748. }
  1749. void
  1750. ev_feed_fd_event (EV_P_ int fd, int revents) EV_THROW
  1751. {
  1752. if (fd >= 0 && fd < anfdmax)
  1753. fd_event_nocheck (EV_A_ fd, revents);
  1754. }
  1755. /* make sure the external fd watch events are in-sync */
  1756. /* with the kernel/libev internal state */
  1757. inline_size void
  1758. fd_reify (EV_P)
  1759. {
  1760. int i;
  1761. #if EV_SELECT_IS_WINSOCKET || EV_USE_IOCP
  1762. for (i = 0; i < fdchangecnt; ++i)
  1763. {
  1764. int fd = fdchanges [i];
  1765. ANFD *anfd = anfds + fd;
  1766. if (anfd->reify & EV__IOFDSET && anfd->head)
  1767. {
  1768. SOCKET handle = EV_FD_TO_WIN32_HANDLE (fd);
  1769. if (handle != anfd->handle)
  1770. {
  1771. unsigned long arg;
  1772. assert (("libev: only socket fds supported in this configuration", ioctlsocket (handle, FIONREAD, &arg) == 0));
  1773. /* handle changed, but fd didn't - we need to do it in two steps */
  1774. backend_modify (EV_A_ fd, anfd->events, 0);
  1775. anfd->events = 0;
  1776. anfd->handle = handle;
  1777. }
  1778. }
  1779. }
  1780. #endif
  1781. for (i = 0; i < fdchangecnt; ++i)
  1782. {
  1783. int fd = fdchanges [i];
  1784. ANFD *anfd = anfds + fd;
  1785. ev_io *w;
  1786. unsigned char o_events = anfd->events;
  1787. unsigned char o_reify = anfd->reify;
  1788. anfd->reify = 0;
  1789. /*if (expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
  1790. {
  1791. anfd->events = 0;
  1792. for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
  1793. anfd->events |= (unsigned char)w->events;
  1794. if (o_events != anfd->events)
  1795. o_reify = EV__IOFDSET; /* actually |= */
  1796. }
  1797. if (o_reify & EV__IOFDSET)
  1798. backend_modify (EV_A_ fd, o_events, anfd->events);
  1799. }
  1800. fdchangecnt = 0;
  1801. }
  1802. /* something about the given fd changed */
  1803. inline_size void
  1804. fd_change (EV_P_ int fd, int flags)
  1805. {
  1806. unsigned char reify = anfds [fd].reify;
  1807. anfds [fd].reify |= flags;
  1808. if (expect_true (!reify))
  1809. {
  1810. ++fdchangecnt;
  1811. array_needsize (int, fdchanges, fdchangemax, fdchangecnt, EMPTY2);
  1812. fdchanges [fdchangecnt - 1] = fd;
  1813. }
  1814. }
  1815. /* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */
  1816. inline_speed void ecb_cold
  1817. fd_kill (EV_P_ int fd)
  1818. {
  1819. ev_io *w;
  1820. while ((w = (ev_io *)anfds [fd].head))
  1821. {
  1822. ev_io_stop (EV_A_ w);
  1823. ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
  1824. }
  1825. }
  1826. /* check whether the given fd is actually valid, for error recovery */
  1827. inline_size int ecb_cold
  1828. fd_valid (int fd)
  1829. {
  1830. #ifdef _WIN32
  1831. return EV_FD_TO_WIN32_HANDLE (fd) != -1;
  1832. #else
  1833. return fcntl (fd, F_GETFD) != -1;
  1834. #endif
  1835. }
  1836. /* called on EBADF to verify fds */
  1837. static void noinline ecb_cold
  1838. fd_ebadf (EV_P)
  1839. {
  1840. int fd;
  1841. for (fd = 0; fd < anfdmax; ++fd)
  1842. if (anfds [fd].events)
  1843. if (!fd_valid (fd) && errno == EBADF)
  1844. fd_kill (EV_A_ fd);
  1845. }
  1846. /* called on ENOMEM in select/poll to kill some fds and retry */
  1847. static void noinline ecb_cold
  1848. fd_enomem (EV_P)
  1849. {
  1850. int fd;
  1851. for (fd = anfdmax; fd--; )
  1852. if (anfds [fd].events)
  1853. {
  1854. fd_kill (EV_A_ fd);
  1855. break;
  1856. }
  1857. }
  1858. /* usually called after fork if backend needs to re-arm all fds from scratch */
  1859. static void noinline
  1860. fd_rearm_all (EV_P)
  1861. {
  1862. int fd;
  1863. for (fd = 0; fd < anfdmax; ++fd)
  1864. if (anfds [fd].events)
  1865. {
  1866. anfds [fd].events = 0;
  1867. anfds [fd].emask = 0;
  1868. fd_change (EV_A_ fd, EV__IOFDSET | EV_ANFD_REIFY);
  1869. }
  1870. }
  1871. /* used to prepare libev internal fd's */
  1872. /* this is not fork-safe */
  1873. inline_speed void
  1874. fd_intern (int fd)
  1875. {
  1876. #ifdef _WIN32
  1877. unsigned long arg = 1;
  1878. ioctlsocket (EV_FD_TO_WIN32_HANDLE (fd), FIONBIO, &arg);
  1879. #else
  1880. fcntl (fd, F_SETFD, FD_CLOEXEC);
  1881. fcntl (fd, F_SETFL, O_NONBLOCK);
  1882. #endif
  1883. }
  1884. /*****************************************************************************/
  1885. /*
  1886. * the heap functions want a real array index. array index 0 is guaranteed to not
  1887. * be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives
  1888. * the branching factor of the d-tree.
  1889. */
  1890. /*
  1891. * at the moment we allow libev the luxury of two heaps,
  1892. * a small-code-size 2-heap one and a ~1.5kb larger 4-heap
  1893. * which is more cache-efficient.
  1894. * the difference is about 5% with 50000+ watchers.
  1895. */
  1896. #if EV_USE_4HEAP
  1897. #define DHEAP 4
  1898. #define HEAP0 (DHEAP - 1) /* index of first element in heap */
  1899. #define HPARENT(k) ((((k) - HEAP0 - 1) / DHEAP) + HEAP0)
  1900. #define UPHEAP_DONE(p,k) ((p) == (k))
  1901. /* away from the root */
  1902. inline_speed void
  1903. downheap (ANHE *heap, int N, int k)
  1904. {
  1905. ANHE he = heap [k];
  1906. ANHE *E = heap + N + HEAP0;
  1907. for (;;)
  1908. {
  1909. ev_tstamp minat;
  1910. ANHE *minpos;
  1911. ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1;
  1912. /* find minimum child */
  1913. if (expect_true (pos + DHEAP - 1 < E))
  1914. {
  1915. /* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
  1916. if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
  1917. if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
  1918. if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
  1919. }
  1920. else if (pos < E)
  1921. {
  1922. /* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
  1923. if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
  1924. if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
  1925. if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
  1926. }
  1927. else
  1928. break;
  1929. if (ANHE_at (he) <= minat)
  1930. break;
  1931. heap [k] = *minpos;
  1932. ev_active (ANHE_w (*minpos)) = k;
  1933. k = minpos - heap;
  1934. }
  1935. heap [k] = he;
  1936. ev_active (ANHE_w (he)) = k;
  1937. }
  1938. #else /* 4HEAP */
  1939. #define HEAP0 1
  1940. #define HPARENT(k) ((k) >> 1)
  1941. #define UPHEAP_DONE(p,k) (!(p))
  1942. /* away from the root */
  1943. inline_speed void
  1944. downheap (ANHE *heap, int N, int k)
  1945. {
  1946. ANHE he = heap [k];
  1947. for (;;)
  1948. {
  1949. int c = k << 1;
  1950. if (c >= N + HEAP0)
  1951. break;
  1952. c += c + 1 < N + HEAP0 && ANHE_at (heap [c]) > ANHE_at (heap [c + 1])
  1953. ? 1 : 0;
  1954. if (ANHE_at (he) <= ANHE_at (heap [c]))
  1955. break;
  1956. heap [k] = heap [c];
  1957. ev_active (ANHE_w (heap [k])) = k;
  1958. k = c;
  1959. }
  1960. heap [k] = he;
  1961. ev_active (ANHE_w (he)) = k;
  1962. }
  1963. #endif
  1964. /* towards the root */
  1965. inline_speed void
  1966. upheap (ANHE *heap, int k)
  1967. {
  1968. ANHE he = heap [k];
  1969. for (;;)
  1970. {
  1971. int p = HPARENT (k);
  1972. if (UPHEAP_DONE (p, k) || ANHE_at (heap [p]) <= ANHE_at (he))
  1973. break;
  1974. heap [k] = heap [p];
  1975. ev_active (ANHE_w (heap [k])) = k;
  1976. k = p;
  1977. }
  1978. heap [k] = he;
  1979. ev_active (ANHE_w (he)) = k;
  1980. }
  1981. /* move an element suitably so it is in a correct place */
  1982. inline_size void
  1983. adjustheap (ANHE *heap, int N, int k)
  1984. {
  1985. if (k > HEAP0 && ANHE_at (heap [k]) <= ANHE_at (heap [HPARENT (k)]))
  1986. upheap (heap, k);
  1987. else
  1988. downheap (heap, N, k);
  1989. }
  1990. /* rebuild the heap: this function is used only once and executed rarely */
  1991. inline_size void
  1992. reheap (ANHE *heap, int N)
  1993. {
  1994. int i;
  1995. /* we don't use floyds algorithm, upheap is simpler and is more cache-efficient */
  1996. /* also, this is easy to implement and correct for both 2-heaps and 4-heaps */
  1997. for (i = 0; i < N; ++i)
  1998. upheap (heap, i + HEAP0);
  1999. }
  2000. /*****************************************************************************/
  2001. /* associate signal watchers to a signal signal */
  2002. typedef struct
  2003. {
  2004. EV_ATOMIC_T pending;
  2005. #if EV_MULTIPLICITY
  2006. EV_P;
  2007. #endif
  2008. WL head;
  2009. } ANSIG;
  2010. static ANSIG signals [EV_NSIG - 1];
  2011. /*****************************************************************************/
  2012. #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
  2013. static void noinline ecb_cold
  2014. evpipe_init (EV_P)
  2015. {
  2016. if (!ev_is_active (&pipe_w))
  2017. {
  2018. int fds [2];
  2019. # if EV_USE_EVENTFD
  2020. fds [0] = -1;
  2021. fds [1] = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC);
  2022. if (fds [1] < 0 && errno == EINVAL)
  2023. fds [1] = eventfd (0, 0);
  2024. if (fds [1] < 0)
  2025. # endif
  2026. {
  2027. while (pipe (fds))
  2028. ev_syserr ("(libev) error creating signal/async pipe");
  2029. fd_intern (fds [0]);
  2030. }
  2031. evpipe [0] = fds [0];
  2032. if (evpipe [1] < 0)
  2033. evpipe [1] = fds [1]; /* first call, set write fd */
  2034. else
  2035. {
  2036. /* on subsequent calls, do not change evpipe [1] */
  2037. /* so that evpipe_write can always rely on its value. */
  2038. /* this branch does not do anything sensible on windows, */
  2039. /* so must not be executed on windows */
  2040. dup2 (fds [1], evpipe [1]);
  2041. close (fds [1]);
  2042. }
  2043. fd_intern (evpipe [1]);
  2044. ev_io_set (&pipe_w, evpipe [0] < 0 ? evpipe [1] : evpipe [0], EV_READ);
  2045. ev_io_start (EV_A_ &pipe_w);
  2046. ev_unref (EV_A); /* watcher should not keep loop alive */
  2047. }
  2048. }
  2049. inline_speed void
  2050. evpipe_write (EV_P_ EV_ATOMIC_T *flag)
  2051. {
  2052. ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */
  2053. if (expect_true (*flag))
  2054. return;
  2055. *flag = 1;
  2056. ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */
  2057. pipe_write_skipped = 1;
  2058. ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */
  2059. if (pipe_write_wanted)
  2060. {
  2061. int old_errno;
  2062. pipe_write_skipped = 0;
  2063. ECB_MEMORY_FENCE_RELEASE;
  2064. old_errno = errno; /* save errno because write will clobber it */
  2065. #if EV_USE_EVENTFD
  2066. if (evpipe [0] < 0)
  2067. {
  2068. uint64_t counter = 1;
  2069. write (evpipe [1], &counter, sizeof (uint64_t));
  2070. }
  2071. else
  2072. #endif
  2073. {
  2074. #ifdef _WIN32
  2075. WSABUF buf;
  2076. DWORD sent;
  2077. buf.buf = &buf;
  2078. buf.len = 1;
  2079. WSASend (EV_FD_TO_WIN32_HANDLE (evpipe [1]), &buf, 1, &sent, 0, 0, 0);
  2080. #else
  2081. write (evpipe [1], &(evpipe [1]), 1);
  2082. #endif
  2083. }
  2084. errno = old_errno;
  2085. }
  2086. }
  2087. /* called whenever the libev signal pipe */
  2088. /* got some events (signal, async) */
  2089. static void
  2090. pipecb (EV_P_ ev_io *iow, int revents)
  2091. {
  2092. int i;
  2093. if (revents & EV_READ)
  2094. {
  2095. #if EV_USE_EVENTFD
  2096. if (evpipe [0] < 0)
  2097. {
  2098. uint64_t counter;
  2099. read (evpipe [1], &counter, sizeof (uint64_t));
  2100. }
  2101. else
  2102. #endif
  2103. {
  2104. char dummy[4];
  2105. #ifdef _WIN32
  2106. WSABUF buf;
  2107. DWORD recvd;
  2108. DWORD flags = 0;
  2109. buf.buf = dummy;
  2110. buf.len = sizeof (dummy);
  2111. WSARecv (EV_FD_TO_WIN32_HANDLE (evpipe [0]), &buf, 1, &recvd, &flags, 0, 0);
  2112. #else
  2113. read (evpipe [0], &dummy, sizeof (dummy));
  2114. #endif
  2115. }
  2116. }
  2117. pipe_write_skipped = 0;
  2118. ECB_MEMORY_FENCE; /* push out skipped, acquire flags */
  2119. #if EV_SIGNAL_ENABLE
  2120. if (sig_pending)
  2121. {
  2122. sig_pending = 0;
  2123. ECB_MEMORY_FENCE;
  2124. for (i = EV_NSIG - 1; i--; )
  2125. if (expect_false (signals [i].pending))
  2126. ev_feed_signal_event (EV_A_ i + 1);
  2127. }
  2128. #endif
  2129. #if EV_ASYNC_ENABLE
  2130. if (async_pending)
  2131. {
  2132. async_pending = 0;
  2133. ECB_MEMORY_FENCE;
  2134. for (i = asynccnt; i--; )
  2135. if (asyncs [i]->sent)
  2136. {
  2137. asyncs [i]->sent = 0;
  2138. ECB_MEMORY_FENCE_RELEASE;
  2139. ev_feed_event (EV_A_ asyncs [i], EV_ASYNC);
  2140. }
  2141. }
  2142. #endif
  2143. }
  2144. /*****************************************************************************/
  2145. void
  2146. ev_feed_signal (int signum) EV_THROW
  2147. {
  2148. #if EV_MULTIPLICITY
  2149. EV_P;
  2150. ECB_MEMORY_FENCE_ACQUIRE;
  2151. EV_A = signals [signum - 1].loop;
  2152. if (!EV_A)
  2153. return;
  2154. #endif
  2155. signals [signum - 1].pending = 1;
  2156. evpipe_write (EV_A_ &sig_pending);
  2157. }
  2158. static void
  2159. ev_sighandler (int signum)
  2160. {
  2161. #ifdef _WIN32
  2162. signal (signum, ev_sighandler);
  2163. #endif
  2164. ev_feed_signal (signum);
  2165. }
  2166. void noinline
  2167. ev_feed_signal_event (EV_P_ int signum) EV_THROW
  2168. {
  2169. WL w;
  2170. if (expect_false (signum <= 0 || signum >= EV_NSIG))
  2171. return;
  2172. --signum;
  2173. #if EV_MULTIPLICITY
  2174. /* it is permissible to try to feed a signal to the wrong loop */
  2175. /* or, likely more useful, feeding a signal nobody is waiting for */
  2176. if (expect_false (signals [signum].loop != EV_A))
  2177. return;
  2178. #endif
  2179. signals [signum].pending = 0;
  2180. ECB_MEMORY_FENCE_RELEASE;
  2181. for (w = signals [signum].head; w; w = w->next)
  2182. ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
  2183. }
  2184. #if EV_USE_SIGNALFD
  2185. static void
  2186. sigfdcb (EV_P_ ev_io *iow, int revents)
  2187. {
  2188. struct signalfd_siginfo si[2], *sip; /* these structs are big */
  2189. for (;;)
  2190. {
  2191. ssize_t res = read (sigfd, si, sizeof (si));
  2192. /* not ISO-C, as res might be -1, but works with SuS */
  2193. for (sip = si; (char *)sip < (char *)si + res; ++sip)
  2194. ev_feed_signal_event (EV_A_ sip->ssi_signo);
  2195. if (res < (ssize_t)sizeof (si))
  2196. break;
  2197. }
  2198. }
  2199. #endif
  2200. #endif
  2201. /*****************************************************************************/
  2202. #if EV_CHILD_ENABLE
  2203. static WL childs [EV_PID_HASHSIZE];
  2204. static ev_signal childev;
  2205. #ifndef WIFCONTINUED
  2206. # define WIFCONTINUED(status) 0
  2207. #endif
  2208. /* handle a single child status event */
  2209. inline_speed void
  2210. child_reap (EV_P_ int chain, int pid, int status)
  2211. {
  2212. ev_child *w;
  2213. int traced = WIFSTOPPED (status) || WIFCONTINUED (status);
  2214. for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next)
  2215. {
  2216. if ((w->pid == pid || !w->pid)
  2217. && (!traced || (w->flags & 1)))
  2218. {
  2219. ev_set_priority (w, EV_MAXPRI); /* need to do it *now*, this *must* be the same prio as the signal watcher itself */
  2220. w->rpid = pid;
  2221. w->rstatus = status;
  2222. ev_feed_event (EV_A_ (W)w, EV_CHILD);
  2223. }
  2224. }
  2225. }
  2226. #ifndef WCONTINUED
  2227. # define WCONTINUED 0
  2228. #endif
  2229. /* called on sigchld etc., calls waitpid */
  2230. static void
  2231. childcb (EV_P_ ev_signal *sw, int revents)
  2232. {
  2233. int pid, status;
  2234. /* some systems define WCONTINUED but then fail to support it (linux 2.4) */
  2235. if (0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)))
  2236. if (!WCONTINUED
  2237. || errno != EINVAL
  2238. || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED)))
  2239. return;
  2240. /* make sure we are called again until all children have been reaped */
  2241. /* we need to do it this way so that the callback gets called before we continue */
  2242. ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);
  2243. child_reap (EV_A_ pid, pid, status);
  2244. if ((EV_PID_HASHSIZE) > 1)
  2245. child_reap (EV_A_ 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */
  2246. }
  2247. #endif
  2248. /*****************************************************************************/
  2249. #if EV_USE_IOCP
  2250. # include "ev_iocp.c"
  2251. #endif
  2252. #if EV_USE_PORT
  2253. # include "ev_port.c"
  2254. #endif
  2255. #if EV_USE_KQUEUE
  2256. # include "ev_kqueue.c"
  2257. #endif
  2258. #if EV_USE_EPOLL
  2259. # include "ev_epoll.c"
  2260. #endif
  2261. #if EV_USE_POLL
  2262. # include "ev_poll.c"
  2263. #endif
  2264. #if EV_USE_SELECT
  2265. # include "ev_select.c"
  2266. #endif
  2267. int ecb_cold
  2268. ev_version_major (void) EV_THROW
  2269. {
  2270. return EV_VERSION_MAJOR;
  2271. }
  2272. int ecb_cold
  2273. ev_version_minor (void) EV_THROW
  2274. {
  2275. return EV_VERSION_MINOR;
  2276. }
  2277. /* return true if we are running with elevated privileges and should ignore env variables */
  2278. int inline_size ecb_cold
  2279. enable_secure (void)
  2280. {
  2281. #ifdef _WIN32
  2282. return 0;
  2283. #else
  2284. return getuid () != geteuid ()
  2285. || getgid () != getegid ();
  2286. #endif
  2287. }
  2288. unsigned int ecb_cold
  2289. ev_supported_backends (void) EV_THROW
  2290. {
  2291. unsigned int flags = 0;
  2292. if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
  2293. if (EV_USE_KQUEUE) flags |= EVBACKEND_KQUEUE;
  2294. if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
  2295. if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
  2296. if (EV_USE_SELECT) flags |= EVBACKEND_SELECT;
  2297. return flags;
  2298. }
  2299. unsigned int ecb_cold
  2300. ev_recommended_backends (void) EV_THROW
  2301. {
  2302. unsigned int flags = ev_supported_backends ();
  2303. #if !defined(__NetBSD__) && !defined(__FreeBSD__)
  2304. /* kqueue is borked on everything but netbsd apparently */
  2305. /* it usually doesn't work correctly on anything but sockets and pipes */
  2306. flags &= ~EVBACKEND_KQUEUE;
  2307. #endif
  2308. #ifdef __APPLE__
  2309. /* only select works correctly on that "unix-certified" platform */
  2310. flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */
  2311. flags &= ~EVBACKEND_POLL; /* poll is based on kqueue from 10.5 onwards */
  2312. #endif
  2313. #ifdef __FreeBSD__
  2314. flags &= ~EVBACKEND_POLL; /* poll return value is unusable (http://forums.freebsd.org/archive/index.php/t-10270.html) */
  2315. #endif
  2316. return flags;
  2317. }
  2318. unsigned int ecb_cold
  2319. ev_embeddable_backends (void) EV_THROW
  2320. {
  2321. int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
  2322. /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
  2323. if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
  2324. flags &= ~EVBACKEND_EPOLL;
  2325. return flags;
  2326. }
  2327. unsigned int
  2328. ev_backend (EV_P) EV_THROW
  2329. {
  2330. return backend;
  2331. }
  2332. #if EV_FEATURE_API
  2333. unsigned int
  2334. ev_iteration (EV_P) EV_THROW
  2335. {
  2336. return loop_count;
  2337. }
  2338. unsigned int
  2339. ev_depth (EV_P) EV_THROW
  2340. {
  2341. return loop_depth;
  2342. }
  2343. void
  2344. ev_set_io_collect_interval (EV_P_ ev_tstamp interval) EV_THROW
  2345. {
  2346. io_blocktime = interval;
  2347. }
  2348. void
  2349. ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) EV_THROW
  2350. {
  2351. timeout_blocktime = interval;
  2352. }
  2353. void
  2354. ev_set_userdata (EV_P_ void *data) EV_THROW
  2355. {
  2356. userdata = data;
  2357. }
  2358. void *
  2359. ev_userdata (EV_P) EV_THROW
  2360. {
  2361. return userdata;
  2362. }
  2363. void
  2364. ev_set_invoke_pending_cb (EV_P_ ev_loop_callback invoke_pending_cb) EV_THROW
  2365. {
  2366. invoke_cb = invoke_pending_cb;
  2367. }
  2368. void
  2369. ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_THROW, void (*acquire)(EV_P) EV_THROW) EV_THROW
  2370. {
  2371. release_cb = release;
  2372. acquire_cb = acquire;
  2373. }
  2374. #endif
  2375. /* initialise a loop structure, must be zero-initialised */
  2376. static void noinline ecb_cold
  2377. loop_init (EV_P_ unsigned int flags) EV_THROW
  2378. {
  2379. if (!backend)
  2380. {
  2381. origflags = flags;
  2382. #if EV_USE_REALTIME
  2383. if (!have_realtime)
  2384. {
  2385. struct timespec ts;
  2386. if (!clock_gettime (CLOCK_REALTIME, &ts))
  2387. have_realtime = 1;
  2388. }
  2389. #endif
  2390. #if EV_USE_MONOTONIC
  2391. if (!have_monotonic)
  2392. {
  2393. struct timespec ts;
  2394. if (!clock_gettime (CLOCK_MONOTONIC, &ts))
  2395. have_monotonic = 1;
  2396. }
  2397. #endif
  2398. /* pid check not overridable via env */
  2399. #ifndef _WIN32
  2400. if (flags & EVFLAG_FORKCHECK)
  2401. curpid = getpid ();
  2402. #endif
  2403. if (!(flags & EVFLAG_NOENV)
  2404. && !enable_secure ()
  2405. && getenv ("LIBEV_FLAGS"))
  2406. flags = atoi (getenv ("LIBEV_FLAGS"));
  2407. ev_rt_now = ev_time ();
  2408. mn_now = get_clock ();
  2409. now_floor = mn_now;
  2410. rtmn_diff = ev_rt_now - mn_now;
  2411. #if EV_FEATURE_API
  2412. invoke_cb = ev_invoke_pending;
  2413. #endif
  2414. io_blocktime = 0.;
  2415. timeout_blocktime = 0.;
  2416. backend = 0;
  2417. backend_fd = -1;
  2418. sig_pending = 0;
  2419. #if EV_ASYNC_ENABLE
  2420. async_pending = 0;
  2421. #endif
  2422. pipe_write_skipped = 0;
  2423. pipe_write_wanted = 0;
  2424. evpipe [0] = -1;
  2425. evpipe [1] = -1;
  2426. #if EV_USE_INOTIFY
  2427. fs_fd = flags & EVFLAG_NOINOTIFY ? -1 : -2;
  2428. #endif
  2429. #if EV_USE_SIGNALFD
  2430. sigfd = flags & EVFLAG_SIGNALFD ? -2 : -1;
  2431. #endif
  2432. if (!(flags & EVBACKEND_MASK))
  2433. flags |= ev_recommended_backends ();
  2434. #if EV_USE_IOCP
  2435. if (!backend && (flags & EVBACKEND_IOCP )) backend = iocp_init (EV_A_ flags);
  2436. #endif
  2437. #if EV_USE_PORT
  2438. if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
  2439. #endif
  2440. #if EV_USE_KQUEUE
  2441. if (!backend && (flags & EVBACKEND_KQUEUE)) backend = kqueue_init (EV_A_ flags);
  2442. #endif
  2443. #if EV_USE_EPOLL
  2444. if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags);
  2445. #endif
  2446. #if EV_USE_POLL
  2447. if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags);
  2448. #endif
  2449. #if EV_USE_SELECT
  2450. if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init (EV_A_ flags);
  2451. #endif
  2452. ev_prepare_init (&pending_w, pendingcb);
  2453. #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
  2454. ev_init (&pipe_w, pipecb);
  2455. ev_set_priority (&pipe_w, EV_MAXPRI);
  2456. #endif
  2457. }
  2458. }
  2459. /* free up a loop structure */
  2460. void ecb_cold
  2461. ev_loop_destroy (EV_P)
  2462. {
  2463. int i;
  2464. #if EV_MULTIPLICITY
  2465. /* mimic free (0) */
  2466. if (!EV_A)
  2467. return;
  2468. #endif
  2469. #if EV_CLEANUP_ENABLE
  2470. /* queue cleanup watchers (and execute them) */
  2471. if (expect_false (cleanupcnt))
  2472. {
  2473. queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP);
  2474. EV_INVOKE_PENDING;
  2475. }
  2476. #endif
  2477. #if EV_CHILD_ENABLE
  2478. if (ev_is_default_loop (EV_A) && ev_is_active (&childev))
  2479. {
  2480. ev_ref (EV_A); /* child watcher */
  2481. ev_signal_stop (EV_A_ &childev);
  2482. }
  2483. #endif
  2484. if (ev_is_active (&pipe_w))
  2485. {
  2486. /*ev_ref (EV_A);*/
  2487. /*ev_io_stop (EV_A_ &pipe_w);*/
  2488. if (evpipe [0] >= 0) EV_WIN32_CLOSE_FD (evpipe [0]);
  2489. if (evpipe [1] >= 0) EV_WIN32_CLOSE_FD (evpipe [1]);
  2490. }
  2491. #if EV_USE_SIGNALFD
  2492. if (ev_is_active (&sigfd_w))
  2493. close (sigfd);
  2494. #endif
  2495. #if EV_USE_INOTIFY
  2496. if (fs_fd >= 0)
  2497. close (fs_fd);
  2498. #endif
  2499. if (backend_fd >= 0)
  2500. close (backend_fd);
  2501. #if EV_USE_IOCP
  2502. if (backend == EVBACKEND_IOCP ) iocp_destroy (EV_A);
  2503. #endif
  2504. #if EV_USE_PORT
  2505. if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
  2506. #endif
  2507. #if EV_USE_KQUEUE
  2508. if (backend == EVBACKEND_KQUEUE) kqueue_destroy (EV_A);
  2509. #endif
  2510. #if EV_USE_EPOLL
  2511. if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A);
  2512. #endif
  2513. #if EV_USE_POLL
  2514. if (backend == EVBACKEND_POLL ) poll_destroy (EV_A);
  2515. #endif
  2516. #if EV_USE_SELECT
  2517. if (backend == EVBACKEND_SELECT) select_destroy (EV_A);
  2518. #endif
  2519. for (i = NUMPRI; i--; )
  2520. {
  2521. array_free (pending, [i]);
  2522. #if EV_IDLE_ENABLE
  2523. array_free (idle, [i]);
  2524. #endif
  2525. }
  2526. ev_free (anfds); anfds = 0; anfdmax = 0;
  2527. /* have to use the microsoft-never-gets-it-right macro */
  2528. array_free (rfeed, EMPTY);
  2529. array_free (fdchange, EMPTY);
  2530. array_free (timer, EMPTY);
  2531. #if EV_PERIODIC_ENABLE
  2532. array_free (periodic, EMPTY);
  2533. #endif
  2534. #if EV_FORK_ENABLE
  2535. array_free (fork, EMPTY);
  2536. #endif
  2537. #if EV_CLEANUP_ENABLE
  2538. array_free (cleanup, EMPTY);
  2539. #endif
  2540. array_free (prepare, EMPTY);
  2541. array_free (check, EMPTY);
  2542. #if EV_ASYNC_ENABLE
  2543. array_free (async, EMPTY);
  2544. #endif
  2545. backend = 0;
  2546. #if EV_MULTIPLICITY
  2547. if (ev_is_default_loop (EV_A))
  2548. #endif
  2549. ev_default_loop_ptr = 0;
  2550. #if EV_MULTIPLICITY
  2551. else
  2552. ev_free (EV_A);
  2553. #endif
  2554. }
  2555. #if EV_USE_INOTIFY
  2556. inline_size void infy_fork (EV_P);
  2557. #endif
  2558. inline_size void
  2559. loop_fork (EV_P)
  2560. {
  2561. #if EV_USE_PORT
  2562. if (backend == EVBACKEND_PORT ) port_fork (EV_A);
  2563. #endif
  2564. #if EV_USE_KQUEUE
  2565. if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A);
  2566. #endif
  2567. #if EV_USE_EPOLL
  2568. if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
  2569. #endif
  2570. #if EV_USE_INOTIFY
  2571. infy_fork (EV_A);
  2572. #endif
  2573. #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
  2574. if (ev_is_active (&pipe_w) && postfork != 2)
  2575. {
  2576. /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
  2577. ev_ref (EV_A);
  2578. ev_io_stop (EV_A_ &pipe_w);
  2579. if (evpipe [0] >= 0)
  2580. EV_WIN32_CLOSE_FD (evpipe [0]);
  2581. evpipe_init (EV_A);
  2582. /* iterate over everything, in case we missed something before */
  2583. ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
  2584. }
  2585. #endif
  2586. postfork = 0;
  2587. }
  2588. #if EV_MULTIPLICITY
  2589. struct ev_loop * ecb_cold
  2590. ev_loop_new (unsigned int flags) EV_THROW
  2591. {
  2592. EV_P = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop));
  2593. memset (EV_A, 0, sizeof (struct ev_loop));
  2594. loop_init (EV_A_ flags);
  2595. if (ev_backend (EV_A))
  2596. return EV_A;
  2597. ev_free (EV_A);
  2598. return 0;
  2599. }
  2600. #endif /* multiplicity */
  2601. #if EV_VERIFY
  2602. static void noinline ecb_cold
  2603. verify_watcher (EV_P_ W w)
  2604. {
  2605. assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI));
  2606. if (w->pending)
  2607. assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
  2608. }
  2609. static void noinline ecb_cold
  2610. verify_heap (EV_P_ ANHE *heap, int N)
  2611. {
  2612. int i;
  2613. for (i = HEAP0; i < N + HEAP0; ++i)
  2614. {
  2615. assert (("libev: active index mismatch in heap", ev_active (ANHE_w (heap [i])) == i));
  2616. assert (("libev: heap condition violated", i == HEAP0 || ANHE_at (heap [HPARENT (i)]) <= ANHE_at (heap [i])));
  2617. assert (("libev: heap at cache mismatch", ANHE_at (heap [i]) == ev_at (ANHE_w (heap [i]))));
  2618. verify_watcher (EV_A_ (W)ANHE_w (heap [i]));
  2619. }
  2620. }
  2621. static void noinline ecb_cold
  2622. array_verify (EV_P_ W *ws, int cnt)
  2623. {
  2624. while (cnt--)
  2625. {
  2626. assert (("libev: active index mismatch", ev_active (ws [cnt]) == cnt + 1));
  2627. verify_watcher (EV_A_ ws [cnt]);
  2628. }
  2629. }
  2630. #endif
  2631. #if EV_FEATURE_API
  2632. void ecb_cold
  2633. ev_verify (EV_P) EV_THROW
  2634. {
  2635. #if EV_VERIFY
  2636. int i;
  2637. WL w, w2;
  2638. assert (activecnt >= -1);
  2639. assert (fdchangemax >= fdchangecnt);
  2640. for (i = 0; i < fdchangecnt; ++i)
  2641. assert (("libev: negative fd in fdchanges", fdchanges [i] >= 0));
  2642. assert (anfdmax >= 0);
  2643. for (i = 0; i < anfdmax; ++i)
  2644. {
  2645. int j = 0;
  2646. for (w = w2 = anfds [i].head; w; w = w->next)
  2647. {
  2648. verify_watcher (EV_A_ (W)w);
  2649. if (j++ & 1)
  2650. {
  2651. assert (("libev: io watcher list contains a loop", w != w2));
  2652. w2 = w2->next;
  2653. }
  2654. assert (("libev: inactive fd watcher on anfd list", ev_active (w) == 1));
  2655. assert (("libev: fd mismatch between watcher and anfd", ((ev_io *)w)->fd == i));
  2656. }
  2657. }
  2658. assert (timermax >= timercnt);
  2659. verify_heap (EV_A_ timers, timercnt);
  2660. #if EV_PERIODIC_ENABLE
  2661. assert (periodicmax >= periodiccnt);
  2662. verify_heap (EV_A_ periodics, periodiccnt);
  2663. #endif
  2664. for (i = NUMPRI; i--; )
  2665. {
  2666. assert (pendingmax [i] >= pendingcnt [i]);
  2667. #if EV_IDLE_ENABLE
  2668. assert (idleall >= 0);
  2669. assert (idlemax [i] >= idlecnt [i]);
  2670. array_verify (EV_A_ (W *)idles [i], idlecnt [i]);
  2671. #endif
  2672. }
  2673. #if EV_FORK_ENABLE
  2674. assert (forkmax >= forkcnt);
  2675. array_verify (EV_A_ (W *)forks, forkcnt);
  2676. #endif
  2677. #if EV_CLEANUP_ENABLE
  2678. assert (cleanupmax >= cleanupcnt);
  2679. array_verify (EV_A_ (W *)cleanups, cleanupcnt);
  2680. #endif
  2681. #if EV_ASYNC_ENABLE
  2682. assert (asyncmax >= asynccnt);
  2683. array_verify (EV_A_ (W *)asyncs, asynccnt);
  2684. #endif
  2685. #if EV_PREPARE_ENABLE
  2686. assert (preparemax >= preparecnt);
  2687. array_verify (EV_A_ (W *)prepares, preparecnt);
  2688. #endif
  2689. #if EV_CHECK_ENABLE
  2690. assert (checkmax >= checkcnt);
  2691. array_verify (EV_A_ (W *)checks, checkcnt);
  2692. #endif
  2693. # if 0
  2694. #if EV_CHILD_ENABLE
  2695. for (w = (ev_child *)childs [chain & ((EV_PID_HASHSIZE) - 1)]; w; w = (ev_child *)((WL)w)->next)
  2696. for (signum = EV_NSIG; signum--; ) if (signals [signum].pending)
  2697. #endif
  2698. # endif
  2699. #endif
  2700. }
  2701. #endif
  2702. #if EV_MULTIPLICITY
  2703. struct ev_loop * ecb_cold
  2704. #else
  2705. int
  2706. #endif
  2707. ev_default_loop (unsigned int flags) EV_THROW
  2708. {
  2709. if (!ev_default_loop_ptr)
  2710. {
  2711. #if EV_MULTIPLICITY
  2712. EV_P = ev_default_loop_ptr = &default_loop_struct;
  2713. #else
  2714. ev_default_loop_ptr = 1;
  2715. #endif
  2716. loop_init (EV_A_ flags);
  2717. if (ev_backend (EV_A))
  2718. {
  2719. #if EV_CHILD_ENABLE
  2720. ev_signal_init (&childev, childcb, SIGCHLD);
  2721. ev_set_priority (&childev, EV_MAXPRI);
  2722. ev_signal_start (EV_A_ &childev);
  2723. ev_unref (EV_A); /* child watcher should not keep loop alive */
  2724. #endif
  2725. }
  2726. else
  2727. ev_default_loop_ptr = 0;
  2728. }
  2729. return ev_default_loop_ptr;
  2730. }
  2731. void
  2732. ev_loop_fork (EV_P) EV_THROW
  2733. {
  2734. postfork = 1;
  2735. }
  2736. /*****************************************************************************/
  2737. void
  2738. ev_invoke (EV_P_ void *w, int revents)
  2739. {
  2740. EV_CB_INVOKE ((W)w, revents);
  2741. }
  2742. unsigned int
  2743. ev_pending_count (EV_P) EV_THROW
  2744. {
  2745. int pri;
  2746. unsigned int count = 0;
  2747. for (pri = NUMPRI; pri--; )
  2748. count += pendingcnt [pri];
  2749. return count;
  2750. }
  2751. void noinline
  2752. ev_invoke_pending (EV_P)
  2753. {
  2754. pendingpri = NUMPRI;
  2755. while (pendingpri) /* pendingpri possibly gets modified in the inner loop */
  2756. {
  2757. --pendingpri;
  2758. while (pendingcnt [pendingpri])
  2759. {
  2760. ANPENDING *p = pendings [pendingpri] + --pendingcnt [pendingpri];
  2761. p->w->pending = 0;
  2762. EV_CB_INVOKE (p->w, p->events);
  2763. EV_FREQUENT_CHECK;
  2764. }
  2765. }
  2766. }
  2767. #if EV_IDLE_ENABLE
  2768. /* make idle watchers pending. this handles the "call-idle */
  2769. /* only when higher priorities are idle" logic */
  2770. inline_size void
  2771. idle_reify (EV_P)
  2772. {
  2773. if (expect_false (idleall))
  2774. {
  2775. int pri;
  2776. for (pri = NUMPRI; pri--; )
  2777. {
  2778. if (pendingcnt [pri])
  2779. break;
  2780. if (idlecnt [pri])
  2781. {
  2782. queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE);
  2783. break;
  2784. }
  2785. }
  2786. }
  2787. }
  2788. #endif
  2789. /* make timers pending */
  2790. inline_size void
  2791. timers_reify (EV_P)
  2792. {
  2793. EV_FREQUENT_CHECK;
  2794. if (timercnt && ANHE_at (timers [HEAP0]) < mn_now)
  2795. {
  2796. do
  2797. {
  2798. ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]);
  2799. /*assert (("libev: inactive timer on timer heap detected", ev_is_active (w)));*/
  2800. /* first reschedule or stop timer */
  2801. if (w->repeat)
  2802. {
  2803. ev_at (w) += w->repeat;
  2804. if (ev_at (w) < mn_now)
  2805. ev_at (w) = mn_now;
  2806. assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.));
  2807. ANHE_at_cache (timers [HEAP0]);
  2808. downheap (timers, timercnt, HEAP0);
  2809. }
  2810. else
  2811. ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
  2812. EV_FREQUENT_CHECK;
  2813. feed_reverse (EV_A_ (W)w);
  2814. }
  2815. while (timercnt && ANHE_at (timers [HEAP0]) < mn_now);
  2816. feed_reverse_done (EV_A_ EV_TIMER);
  2817. }
  2818. }
  2819. #if EV_PERIODIC_ENABLE
  2820. static void noinline
  2821. periodic_recalc (EV_P_ ev_periodic *w)
  2822. {
  2823. ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL;
  2824. ev_tstamp at = w->offset + interval * ev_floor ((ev_rt_now - w->offset) / interval);
  2825. /* the above almost always errs on the low side */
  2826. while (at <= ev_rt_now)
  2827. {
  2828. ev_tstamp nat = at + w->interval;
  2829. /* when resolution fails us, we use ev_rt_now */
  2830. if (expect_false (nat == at))
  2831. {
  2832. at = ev_rt_now;
  2833. break;
  2834. }
  2835. at = nat;
  2836. }
  2837. ev_at (w) = at;
  2838. }
  2839. /* make periodics pending */
  2840. inline_size void
  2841. periodics_reify (EV_P)
  2842. {
  2843. EV_FREQUENT_CHECK;
  2844. while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now)
  2845. {
  2846. do
  2847. {
  2848. ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]);
  2849. /*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/
  2850. /* first reschedule or stop timer */
  2851. if (w->reschedule_cb)
  2852. {
  2853. ev_at (w) = w->reschedule_cb (w, ev_rt_now);
  2854. assert (("libev: ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now));
  2855. ANHE_at_cache (periodics [HEAP0]);
  2856. downheap (periodics, periodiccnt, HEAP0);
  2857. }
  2858. else if (w->interval)
  2859. {
  2860. periodic_recalc (EV_A_ w);
  2861. ANHE_at_cache (periodics [HEAP0]);
  2862. downheap (periodics, periodiccnt, HEAP0);
  2863. }
  2864. else
  2865. ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
  2866. EV_FREQUENT_CHECK;
  2867. feed_reverse (EV_A_ (W)w);
  2868. }
  2869. while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now);
  2870. feed_reverse_done (EV_A_ EV_PERIODIC);
  2871. }
  2872. }
  2873. /* simply recalculate all periodics */
  2874. /* TODO: maybe ensure that at least one event happens when jumping forward? */
  2875. static void noinline ecb_cold
  2876. periodics_reschedule (EV_P)
  2877. {
  2878. int i;
  2879. /* adjust periodics after time jump */
  2880. for (i = HEAP0; i < periodiccnt + HEAP0; ++i)
  2881. {
  2882. ev_periodic *w = (ev_periodic *)ANHE_w (periodics [i]);
  2883. if (w->reschedule_cb)
  2884. ev_at (w) = w->reschedule_cb (w, ev_rt_now);
  2885. else if (w->interval)
  2886. periodic_recalc (EV_A_ w);
  2887. ANHE_at_cache (periodics [i]);
  2888. }
  2889. reheap (periodics, periodiccnt);
  2890. }
  2891. #endif
  2892. /* adjust all timers by a given offset */
  2893. static void noinline ecb_cold
  2894. timers_reschedule (EV_P_ ev_tstamp adjust)
  2895. {
  2896. int i;
  2897. for (i = 0; i < timercnt; ++i)
  2898. {
  2899. ANHE *he = timers + i + HEAP0;
  2900. ANHE_w (*he)->at += adjust;
  2901. ANHE_at_cache (*he);
  2902. }
  2903. }
  2904. /* fetch new monotonic and realtime times from the kernel */
  2905. /* also detect if there was a timejump, and act accordingly */
  2906. inline_speed void
  2907. time_update (EV_P_ ev_tstamp max_block)
  2908. {
  2909. #if EV_USE_MONOTONIC
  2910. if (expect_true (have_monotonic))
  2911. {
  2912. int i;
  2913. ev_tstamp odiff = rtmn_diff;
  2914. mn_now = get_clock ();
  2915. /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
  2916. /* interpolate in the meantime */
  2917. if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
  2918. {
  2919. ev_rt_now = rtmn_diff + mn_now;
  2920. return;
  2921. }
  2922. now_floor = mn_now;
  2923. ev_rt_now = ev_time ();
  2924. /* loop a few times, before making important decisions.
  2925. * on the choice of "4": one iteration isn't enough,
  2926. * in case we get preempted during the calls to
  2927. * ev_time and get_clock. a second call is almost guaranteed
  2928. * to succeed in that case, though. and looping a few more times
  2929. * doesn't hurt either as we only do this on time-jumps or
  2930. * in the unlikely event of having been preempted here.
  2931. */
  2932. for (i = 4; --i; )
  2933. {
  2934. ev_tstamp diff;
  2935. rtmn_diff = ev_rt_now - mn_now;
  2936. diff = odiff - rtmn_diff;
  2937. if (expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP))
  2938. return; /* all is well */
  2939. ev_rt_now = ev_time ();
  2940. mn_now = get_clock ();
  2941. now_floor = mn_now;
  2942. }
  2943. /* no timer adjustment, as the monotonic clock doesn't jump */
  2944. /* timers_reschedule (EV_A_ rtmn_diff - odiff) */
  2945. # if EV_PERIODIC_ENABLE
  2946. periodics_reschedule (EV_A);
  2947. # endif
  2948. }
  2949. else
  2950. #endif
  2951. {
  2952. ev_rt_now = ev_time ();
  2953. if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP))
  2954. {
  2955. /* adjust timers. this is easy, as the offset is the same for all of them */
  2956. timers_reschedule (EV_A_ ev_rt_now - mn_now);
  2957. #if EV_PERIODIC_ENABLE
  2958. periodics_reschedule (EV_A);
  2959. #endif
  2960. }
  2961. mn_now = ev_rt_now;
  2962. }
  2963. }
  2964. int
  2965. ev_run (EV_P_ int flags)
  2966. {
  2967. #if EV_FEATURE_API
  2968. ++loop_depth;
  2969. #endif
  2970. assert (("libev: ev_loop recursion during release detected", loop_done != EVBREAK_RECURSE));
  2971. loop_done = EVBREAK_CANCEL;
  2972. EV_INVOKE_PENDING; /* in case we recurse, ensure ordering stays nice and clean */
  2973. do
  2974. {
  2975. #if EV_VERIFY >= 2
  2976. ev_verify (EV_A);
  2977. #endif
  2978. #ifndef _WIN32
  2979. if (expect_false (curpid)) /* penalise the forking check even more */
  2980. if (expect_false (getpid () != curpid))
  2981. {
  2982. curpid = getpid ();
  2983. postfork = 1;
  2984. }
  2985. #endif
  2986. #if EV_FORK_ENABLE
  2987. /* we might have forked, so queue fork handlers */
  2988. if (expect_false (postfork))
  2989. if (forkcnt)
  2990. {
  2991. queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
  2992. EV_INVOKE_PENDING;
  2993. }
  2994. #endif
  2995. #if EV_PREPARE_ENABLE
  2996. /* queue prepare watchers (and execute them) */
  2997. if (expect_false (preparecnt))
  2998. {
  2999. queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
  3000. EV_INVOKE_PENDING;
  3001. }
  3002. #endif
  3003. if (expect_false (loop_done))
  3004. break;
  3005. /* we might have forked, so reify kernel state if necessary */
  3006. if (expect_false (postfork))
  3007. loop_fork (EV_A);
  3008. /* update fd-related kernel structures */
  3009. fd_reify (EV_A);
  3010. /* calculate blocking time */
  3011. {
  3012. ev_tstamp waittime = 0.;
  3013. ev_tstamp sleeptime = 0.;
  3014. /* remember old timestamp for io_blocktime calculation */
  3015. ev_tstamp prev_mn_now = mn_now;
  3016. /* update time to cancel out callback processing overhead */
  3017. time_update (EV_A_ 1e100);
  3018. /* from now on, we want a pipe-wake-up */
  3019. pipe_write_wanted = 1;
  3020. ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
  3021. if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
  3022. {
  3023. waittime = MAX_BLOCKTIME;
  3024. if (timercnt)
  3025. {
  3026. ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now;
  3027. if (waittime > to) waittime = to;
  3028. }
  3029. #if EV_PERIODIC_ENABLE
  3030. if (periodiccnt)
  3031. {
  3032. ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now;
  3033. if (waittime > to) waittime = to;
  3034. }
  3035. #endif
  3036. /* don't let timeouts decrease the waittime below timeout_blocktime */
  3037. if (expect_false (waittime < timeout_blocktime))
  3038. waittime = timeout_blocktime;
  3039. /* at this point, we NEED to wait, so we have to ensure */
  3040. /* to pass a minimum nonzero value to the backend */
  3041. if (expect_false (waittime < backend_mintime))
  3042. waittime = backend_mintime;
  3043. /* extra check because io_blocktime is commonly 0 */
  3044. if (expect_false (io_blocktime))
  3045. {
  3046. sleeptime = io_blocktime - (mn_now - prev_mn_now);
  3047. if (sleeptime > waittime - backend_mintime)
  3048. sleeptime = waittime - backend_mintime;
  3049. if (expect_true (sleeptime > 0.))
  3050. {
  3051. ev_sleep (sleeptime);
  3052. waittime -= sleeptime;
  3053. }
  3054. }
  3055. }
  3056. #if EV_FEATURE_API
  3057. ++loop_count;
  3058. #endif
  3059. assert ((loop_done = EVBREAK_RECURSE, 1)); /* assert for side effect */
  3060. backend_poll (EV_A_ waittime);
  3061. assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */
  3062. pipe_write_wanted = 0; /* just an optimisation, no fence needed */
  3063. ECB_MEMORY_FENCE_ACQUIRE;
  3064. if (pipe_write_skipped)
  3065. {
  3066. assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w)));
  3067. ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
  3068. }
  3069. /* update ev_rt_now, do magic */
  3070. time_update (EV_A_ waittime + sleeptime);
  3071. }
  3072. /* queue pending timers and reschedule them */
  3073. timers_reify (EV_A); /* relative timers called last */
  3074. #if EV_PERIODIC_ENABLE
  3075. periodics_reify (EV_A); /* absolute timers called first */
  3076. #endif
  3077. #if EV_IDLE_ENABLE
  3078. /* queue idle watchers unless other events are pending */
  3079. idle_reify (EV_A);
  3080. #endif
  3081. #if EV_CHECK_ENABLE
  3082. /* queue check watchers, to be executed first */
  3083. if (expect_false (checkcnt))
  3084. queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
  3085. #endif
  3086. EV_INVOKE_PENDING;
  3087. }
  3088. while (expect_true (
  3089. activecnt
  3090. && !loop_done
  3091. && !(flags & (EVRUN_ONCE | EVRUN_NOWAIT))
  3092. ));
  3093. if (loop_done == EVBREAK_ONE)
  3094. loop_done = EVBREAK_CANCEL;
  3095. #if EV_FEATURE_API
  3096. --loop_depth;
  3097. #endif
  3098. return activecnt;
  3099. }
  3100. void
  3101. ev_break (EV_P_ int how) EV_THROW
  3102. {
  3103. loop_done = how;
  3104. }
  3105. void
  3106. ev_ref (EV_P) EV_THROW
  3107. {
  3108. ++activecnt;
  3109. }
  3110. void
  3111. ev_unref (EV_P) EV_THROW
  3112. {
  3113. --activecnt;
  3114. }
  3115. void
  3116. ev_now_update (EV_P) EV_THROW
  3117. {
  3118. time_update (EV_A_ 1e100);
  3119. }
  3120. void
  3121. ev_suspend (EV_P) EV_THROW
  3122. {
  3123. ev_now_update (EV_A);
  3124. }
  3125. void
  3126. ev_resume (EV_P) EV_THROW
  3127. {
  3128. ev_tstamp mn_prev = mn_now;
  3129. ev_now_update (EV_A);
  3130. timers_reschedule (EV_A_ mn_now - mn_prev);
  3131. #if EV_PERIODIC_ENABLE
  3132. /* TODO: really do this? */
  3133. periodics_reschedule (EV_A);
  3134. #endif
  3135. }
  3136. /*****************************************************************************/
  3137. /* singly-linked list management, used when the expected list length is short */
  3138. inline_size void
  3139. wlist_add (WL *head, WL elem)
  3140. {
  3141. elem->next = *head;
  3142. *head = elem;
  3143. }
  3144. inline_size void
  3145. wlist_del (WL *head, WL elem)
  3146. {
  3147. while (*head)
  3148. {
  3149. if (expect_true (*head == elem))
  3150. {
  3151. *head = elem->next;
  3152. break;
  3153. }
  3154. head = &(*head)->next;
  3155. }
  3156. }
  3157. /* internal, faster, version of ev_clear_pending */
  3158. inline_speed void
  3159. clear_pending (EV_P_ W w)
  3160. {
  3161. if (w->pending)
  3162. {
  3163. pendings [ABSPRI (w)][w->pending - 1].w = (W)&pending_w;
  3164. w->pending = 0;
  3165. }
  3166. }
  3167. int
  3168. ev_clear_pending (EV_P_ void *w) EV_THROW
  3169. {
  3170. W w_ = (W)w;
  3171. int pending = w_->pending;
  3172. if (expect_true (pending))
  3173. {
  3174. ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
  3175. p->w = (W)&pending_w;
  3176. w_->pending = 0;
  3177. return p->events;
  3178. }
  3179. else
  3180. return 0;
  3181. }
  3182. inline_size void
  3183. pri_adjust (EV_P_ W w)
  3184. {
  3185. int pri = ev_priority (w);
  3186. pri = pri < EV_MINPRI ? EV_MINPRI : pri;
  3187. pri = pri > EV_MAXPRI ? EV_MAXPRI : pri;
  3188. ev_set_priority (w, pri);
  3189. }
  3190. inline_speed void
  3191. ev_start (EV_P_ W w, int active)
  3192. {
  3193. pri_adjust (EV_A_ w);
  3194. w->active = active;
  3195. ev_ref (EV_A);
  3196. }
  3197. inline_size void
  3198. ev_stop (EV_P_ W w)
  3199. {
  3200. ev_unref (EV_A);
  3201. w->active = 0;
  3202. }
  3203. /*****************************************************************************/
  3204. void noinline
  3205. ev_io_start (EV_P_ ev_io *w) EV_THROW
  3206. {
  3207. int fd = w->fd;
  3208. if (expect_false (ev_is_active (w)))
  3209. return;
  3210. assert (("libev: ev_io_start called with negative fd", fd >= 0));
  3211. assert (("libev: ev_io_start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE))));
  3212. EV_FREQUENT_CHECK;
  3213. ev_start (EV_A_ (W)w, 1);
  3214. array_needsize (ANFD, anfds, anfdmax, fd + 1, array_init_zero);
  3215. wlist_add (&anfds[fd].head, (WL)w);
  3216. /* common bug, apparently */
  3217. assert (("libev: ev_io_start called with corrupted watcher", ((WL)w)->next != (WL)w));
  3218. fd_change (EV_A_ fd, w->events & EV__IOFDSET | EV_ANFD_REIFY);
  3219. w->events &= ~EV__IOFDSET;
  3220. EV_FREQUENT_CHECK;
  3221. }
  3222. void noinline
  3223. ev_io_stop (EV_P_ ev_io *w) EV_THROW
  3224. {
  3225. clear_pending (EV_A_ (W)w);
  3226. if (expect_false (!ev_is_active (w)))
  3227. return;
  3228. assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
  3229. EV_FREQUENT_CHECK;
  3230. wlist_del (&anfds[w->fd].head, (WL)w);
  3231. ev_stop (EV_A_ (W)w);
  3232. fd_change (EV_A_ w->fd, EV_ANFD_REIFY);
  3233. EV_FREQUENT_CHECK;
  3234. }
  3235. void noinline
  3236. ev_timer_start (EV_P_ ev_timer *w) EV_THROW
  3237. {
  3238. if (expect_false (ev_is_active (w)))
  3239. return;
  3240. ev_at (w) += mn_now;
  3241. assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
  3242. EV_FREQUENT_CHECK;
  3243. ++timercnt;
  3244. ev_start (EV_A_ (W)w, timercnt + HEAP0 - 1);
  3245. array_needsize (ANHE, timers, timermax, ev_active (w) + 1, EMPTY2);
  3246. ANHE_w (timers [ev_active (w)]) = (WT)w;
  3247. ANHE_at_cache (timers [ev_active (w)]);
  3248. upheap (timers, ev_active (w));
  3249. EV_FREQUENT_CHECK;
  3250. /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
  3251. }
  3252. void noinline
  3253. ev_timer_stop (EV_P_ ev_timer *w) EV_THROW
  3254. {
  3255. clear_pending (EV_A_ (W)w);
  3256. if (expect_false (!ev_is_active (w)))
  3257. return;
  3258. EV_FREQUENT_CHECK;
  3259. {
  3260. int active = ev_active (w);
  3261. assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w));
  3262. --timercnt;
  3263. if (expect_true (active < timercnt + HEAP0))
  3264. {
  3265. timers [active] = timers [timercnt + HEAP0];
  3266. adjustheap (timers, timercnt, active);
  3267. }
  3268. }
  3269. ev_at (w) -= mn_now;
  3270. ev_stop (EV_A_ (W)w);
  3271. EV_FREQUENT_CHECK;
  3272. }
  3273. void noinline
  3274. ev_timer_again (EV_P_ ev_timer *w) EV_THROW
  3275. {
  3276. EV_FREQUENT_CHECK;
  3277. clear_pending (EV_A_ (W)w);
  3278. if (ev_is_active (w))
  3279. {
  3280. if (w->repeat)
  3281. {
  3282. ev_at (w) = mn_now + w->repeat;
  3283. ANHE_at_cache (timers [ev_active (w)]);
  3284. adjustheap (timers, timercnt, ev_active (w));
  3285. }
  3286. else
  3287. ev_timer_stop (EV_A_ w);
  3288. }
  3289. else if (w->repeat)
  3290. {
  3291. ev_at (w) = w->repeat;
  3292. ev_timer_start (EV_A_ w);
  3293. }
  3294. EV_FREQUENT_CHECK;
  3295. }
  3296. ev_tstamp
  3297. ev_timer_remaining (EV_P_ ev_timer *w) EV_THROW
  3298. {
  3299. return ev_at (w) - (ev_is_active (w) ? mn_now : 0.);
  3300. }
  3301. #if EV_PERIODIC_ENABLE
  3302. void noinline
  3303. ev_periodic_start (EV_P_ ev_periodic *w) EV_THROW
  3304. {
  3305. if (expect_false (ev_is_active (w)))
  3306. return;
  3307. if (w->reschedule_cb)
  3308. ev_at (w) = w->reschedule_cb (w, ev_rt_now);
  3309. else if (w->interval)
  3310. {
  3311. assert (("libev: ev_periodic_start called with negative interval value", w->interval >= 0.));
  3312. periodic_recalc (EV_A_ w);
  3313. }
  3314. else
  3315. ev_at (w) = w->offset;
  3316. EV_FREQUENT_CHECK;
  3317. ++periodiccnt;
  3318. ev_start (EV_A_ (W)w, periodiccnt + HEAP0 - 1);
  3319. array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, EMPTY2);
  3320. ANHE_w (periodics [ev_active (w)]) = (WT)w;
  3321. ANHE_at_cache (periodics [ev_active (w)]);
  3322. upheap (periodics, ev_active (w));
  3323. EV_FREQUENT_CHECK;
  3324. /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
  3325. }
  3326. void noinline
  3327. ev_periodic_stop (EV_P_ ev_periodic *w) EV_THROW
  3328. {
  3329. clear_pending (EV_A_ (W)w);
  3330. if (expect_false (!ev_is_active (w)))
  3331. return;
  3332. EV_FREQUENT_CHECK;
  3333. {
  3334. int active = ev_active (w);
  3335. assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w));
  3336. --periodiccnt;
  3337. if (expect_true (active < periodiccnt + HEAP0))
  3338. {
  3339. periodics [active] = periodics [periodiccnt + HEAP0];
  3340. adjustheap (periodics, periodiccnt, active);
  3341. }
  3342. }
  3343. ev_stop (EV_A_ (W)w);
  3344. EV_FREQUENT_CHECK;
  3345. }
  3346. void noinline
  3347. ev_periodic_again (EV_P_ ev_periodic *w) EV_THROW
  3348. {
  3349. /* TODO: use adjustheap and recalculation */
  3350. ev_periodic_stop (EV_A_ w);
  3351. ev_periodic_start (EV_A_ w);
  3352. }
  3353. #endif
  3354. #ifndef SA_RESTART
  3355. # define SA_RESTART 0
  3356. #endif
  3357. #if EV_SIGNAL_ENABLE
  3358. void noinline
  3359. ev_signal_start (EV_P_ ev_signal *w) EV_THROW
  3360. {
  3361. if (expect_false (ev_is_active (w)))
  3362. return;
  3363. assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG));
  3364. #if EV_MULTIPLICITY
  3365. assert (("libev: a signal must not be attached to two different loops",
  3366. !signals [w->signum - 1].loop || signals [w->signum - 1].loop == loop));
  3367. signals [w->signum - 1].loop = EV_A;
  3368. ECB_MEMORY_FENCE_RELEASE;
  3369. #endif
  3370. EV_FREQUENT_CHECK;
  3371. #if EV_USE_SIGNALFD
  3372. if (sigfd == -2)
  3373. {
  3374. sigfd = signalfd (-1, &sigfd_set, SFD_NONBLOCK | SFD_CLOEXEC);
  3375. if (sigfd < 0 && errno == EINVAL)
  3376. sigfd = signalfd (-1, &sigfd_set, 0); /* retry without flags */
  3377. if (sigfd >= 0)
  3378. {
  3379. fd_intern (sigfd); /* doing it twice will not hurt */
  3380. sigemptyset (&sigfd_set);
  3381. ev_io_init (&sigfd_w, sigfdcb, sigfd, EV_READ);
  3382. ev_set_priority (&sigfd_w, EV_MAXPRI);
  3383. ev_io_start (EV_A_ &sigfd_w);
  3384. ev_unref (EV_A); /* signalfd watcher should not keep loop alive */
  3385. }
  3386. }
  3387. if (sigfd >= 0)
  3388. {
  3389. /* TODO: check .head */
  3390. sigaddset (&sigfd_set, w->signum);
  3391. sigprocmask (SIG_BLOCK, &sigfd_set, 0);
  3392. signalfd (sigfd, &sigfd_set, 0);
  3393. }
  3394. #endif
  3395. ev_start (EV_A_ (W)w, 1);
  3396. wlist_add (&signals [w->signum - 1].head, (WL)w);
  3397. if (!((WL)w)->next)
  3398. # if EV_USE_SIGNALFD
  3399. if (sigfd < 0) /*TODO*/
  3400. # endif
  3401. {
  3402. # ifdef _WIN32
  3403. evpipe_init (EV_A);
  3404. signal (w->signum, ev_sighandler);
  3405. # else
  3406. struct sigaction sa;
  3407. evpipe_init (EV_A);
  3408. sa.sa_handler = ev_sighandler;
  3409. sigfillset (&sa.sa_mask);
  3410. sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */
  3411. sigaction (w->signum, &sa, 0);
  3412. if (origflags & EVFLAG_NOSIGMASK)
  3413. {
  3414. sigemptyset (&sa.sa_mask);
  3415. sigaddset (&sa.sa_mask, w->signum);
  3416. sigprocmask (SIG_UNBLOCK, &sa.sa_mask, 0);
  3417. }
  3418. #endif
  3419. }
  3420. EV_FREQUENT_CHECK;
  3421. }
  3422. void noinline
  3423. ev_signal_stop (EV_P_ ev_signal *w) EV_THROW
  3424. {
  3425. clear_pending (EV_A_ (W)w);
  3426. if (expect_false (!ev_is_active (w)))
  3427. return;
  3428. EV_FREQUENT_CHECK;
  3429. wlist_del (&signals [w->signum - 1].head, (WL)w);
  3430. ev_stop (EV_A_ (W)w);
  3431. if (!signals [w->signum - 1].head)
  3432. {
  3433. #if EV_MULTIPLICITY
  3434. signals [w->signum - 1].loop = 0; /* unattach from signal */
  3435. #endif
  3436. #if EV_USE_SIGNALFD
  3437. if (sigfd >= 0)
  3438. {
  3439. sigset_t ss;
  3440. sigemptyset (&ss);
  3441. sigaddset (&ss, w->signum);
  3442. sigdelset (&sigfd_set, w->signum);
  3443. signalfd (sigfd, &sigfd_set, 0);
  3444. sigprocmask (SIG_UNBLOCK, &ss, 0);
  3445. }
  3446. else
  3447. #endif
  3448. signal (w->signum, SIG_DFL);
  3449. }
  3450. EV_FREQUENT_CHECK;
  3451. }
  3452. #endif
  3453. #if EV_CHILD_ENABLE
  3454. void
  3455. ev_child_start (EV_P_ ev_child *w) EV_THROW
  3456. {
  3457. #if EV_MULTIPLICITY
  3458. assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
  3459. #endif
  3460. if (expect_false (ev_is_active (w)))
  3461. return;
  3462. EV_FREQUENT_CHECK;
  3463. ev_start (EV_A_ (W)w, 1);
  3464. wlist_add (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w);
  3465. EV_FREQUENT_CHECK;
  3466. }
  3467. void
  3468. ev_child_stop (EV_P_ ev_child *w) EV_THROW
  3469. {
  3470. clear_pending (EV_A_ (W)w);
  3471. if (expect_false (!ev_is_active (w)))
  3472. return;
  3473. EV_FREQUENT_CHECK;
  3474. wlist_del (&childs [w->pid & ((EV_PID_HASHSIZE) - 1)], (WL)w);
  3475. ev_stop (EV_A_ (W)w);
  3476. EV_FREQUENT_CHECK;
  3477. }
  3478. #endif
  3479. #if EV_STAT_ENABLE
  3480. # ifdef _WIN32
  3481. # undef lstat
  3482. # define lstat(a,b) _stati64 (a,b)
  3483. # endif
  3484. #define DEF_STAT_INTERVAL 5.0074891
  3485. #define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */
  3486. #define MIN_STAT_INTERVAL 0.1074891
  3487. static void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents);
  3488. #if EV_USE_INOTIFY
  3489. /* the * 2 is to allow for alignment padding, which for some reason is >> 8 */
  3490. # define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
  3491. static void noinline
  3492. infy_add (EV_P_ ev_stat *w)
  3493. {
  3494. w->wd = inotify_add_watch (fs_fd, w->path,
  3495. IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY
  3496. | IN_CREATE | IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO
  3497. | IN_DONT_FOLLOW | IN_MASK_ADD);
  3498. if (w->wd >= 0)
  3499. {
  3500. struct statfs sfs;
  3501. /* now local changes will be tracked by inotify, but remote changes won't */
  3502. /* unless the filesystem is known to be local, we therefore still poll */
  3503. /* also do poll on <2.6.25, but with normal frequency */
  3504. if (!fs_2625)
  3505. w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
  3506. else if (!statfs (w->path, &sfs)
  3507. && (sfs.f_type == 0x1373 /* devfs */
  3508. || sfs.f_type == 0x4006 /* fat */
  3509. || sfs.f_type == 0x4d44 /* msdos */
  3510. || sfs.f_type == 0xEF53 /* ext2/3 */
  3511. || sfs.f_type == 0x72b6 /* jffs2 */
  3512. || sfs.f_type == 0x858458f6 /* ramfs */
  3513. || sfs.f_type == 0x5346544e /* ntfs */
  3514. || sfs.f_type == 0x3153464a /* jfs */
  3515. || sfs.f_type == 0x9123683e /* btrfs */
  3516. || sfs.f_type == 0x52654973 /* reiser3 */
  3517. || sfs.f_type == 0x01021994 /* tmpfs */
  3518. || sfs.f_type == 0x58465342 /* xfs */))
  3519. w->timer.repeat = 0.; /* filesystem is local, kernel new enough */
  3520. else
  3521. w->timer.repeat = w->interval ? w->interval : NFS_STAT_INTERVAL; /* remote, use reduced frequency */
  3522. }
  3523. else
  3524. {
  3525. /* can't use inotify, continue to stat */
  3526. w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
  3527. /* if path is not there, monitor some parent directory for speedup hints */
  3528. /* note that exceeding the hardcoded path limit is not a correctness issue, */
  3529. /* but an efficiency issue only */
  3530. if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096)
  3531. {
  3532. char path [4096];
  3533. strcpy (path, w->path);
  3534. do
  3535. {
  3536. int mask = IN_MASK_ADD | IN_DELETE_SELF | IN_MOVE_SELF
  3537. | (errno == EACCES ? IN_ATTRIB : IN_CREATE | IN_MOVED_TO);
  3538. char *pend = strrchr (path, '/');
  3539. if (!pend || pend == path)
  3540. break;
  3541. *pend = 0;
  3542. w->wd = inotify_add_watch (fs_fd, path, mask);
  3543. }
  3544. while (w->wd < 0 && (errno == ENOENT || errno == EACCES));
  3545. }
  3546. }
  3547. if (w->wd >= 0)
  3548. wlist_add (&fs_hash [w->wd & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w);
  3549. /* now re-arm timer, if required */
  3550. if (ev_is_active (&w->timer)) ev_ref (EV_A);
  3551. ev_timer_again (EV_A_ &w->timer);
  3552. if (ev_is_active (&w->timer)) ev_unref (EV_A);
  3553. }
  3554. static void noinline
  3555. infy_del (EV_P_ ev_stat *w)
  3556. {
  3557. int slot;
  3558. int wd = w->wd;
  3559. if (wd < 0)
  3560. return;
  3561. w->wd = -2;
  3562. slot = wd & ((EV_INOTIFY_HASHSIZE) - 1);
  3563. wlist_del (&fs_hash [slot].head, (WL)w);
  3564. /* remove this watcher, if others are watching it, they will rearm */
  3565. inotify_rm_watch (fs_fd, wd);
  3566. }
  3567. static void noinline
  3568. infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
  3569. {
  3570. if (slot < 0)
  3571. /* overflow, need to check for all hash slots */
  3572. for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot)
  3573. infy_wd (EV_A_ slot, wd, ev);
  3574. else
  3575. {
  3576. WL w_;
  3577. for (w_ = fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head; w_; )
  3578. {
  3579. ev_stat *w = (ev_stat *)w_;
  3580. w_ = w_->next; /* lets us remove this watcher and all before it */
  3581. if (w->wd == wd || wd == -1)
  3582. {
  3583. if (ev->mask & (IN_IGNORED | IN_UNMOUNT | IN_DELETE_SELF))
  3584. {
  3585. wlist_del (&fs_hash [slot & ((EV_INOTIFY_HASHSIZE) - 1)].head, (WL)w);
  3586. w->wd = -1;
  3587. infy_add (EV_A_ w); /* re-add, no matter what */
  3588. }
  3589. stat_timer_cb (EV_A_ &w->timer, 0);
  3590. }
  3591. }
  3592. }
  3593. }
  3594. static void
  3595. infy_cb (EV_P_ ev_io *w, int revents)
  3596. {
  3597. char buf [EV_INOTIFY_BUFSIZE];
  3598. int ofs;
  3599. int len = read (fs_fd, buf, sizeof (buf));
  3600. for (ofs = 0; ofs < len; )
  3601. {
  3602. struct inotify_event *ev = (struct inotify_event *)(buf + ofs);
  3603. infy_wd (EV_A_ ev->wd, ev->wd, ev);
  3604. ofs += sizeof (struct inotify_event) + ev->len;
  3605. }
  3606. }
  3607. inline_size void ecb_cold
  3608. ev_check_2625 (EV_P)
  3609. {
  3610. /* kernels < 2.6.25 are borked
  3611. * http://www.ussg.indiana.edu/hypermail/linux/kernel/0711.3/1208.html
  3612. */
  3613. if (ev_linux_version () < 0x020619)
  3614. return;
  3615. fs_2625 = 1;
  3616. }
  3617. inline_size int
  3618. infy_newfd (void)
  3619. {
  3620. #if defined IN_CLOEXEC && defined IN_NONBLOCK
  3621. int fd = inotify_init1 (IN_CLOEXEC | IN_NONBLOCK);
  3622. if (fd >= 0)
  3623. return fd;
  3624. #endif
  3625. return inotify_init ();
  3626. }
  3627. inline_size void
  3628. infy_init (EV_P)
  3629. {
  3630. if (fs_fd != -2)
  3631. return;
  3632. fs_fd = -1;
  3633. ev_check_2625 (EV_A);
  3634. fs_fd = infy_newfd ();
  3635. if (fs_fd >= 0)
  3636. {
  3637. fd_intern (fs_fd);
  3638. ev_io_init (&fs_w, infy_cb, fs_fd, EV_READ);
  3639. ev_set_priority (&fs_w, EV_MAXPRI);
  3640. ev_io_start (EV_A_ &fs_w);
  3641. ev_unref (EV_A);
  3642. }
  3643. }
  3644. inline_size void
  3645. infy_fork (EV_P)
  3646. {
  3647. int slot;
  3648. if (fs_fd < 0)
  3649. return;
  3650. ev_ref (EV_A);
  3651. ev_io_stop (EV_A_ &fs_w);
  3652. close (fs_fd);
  3653. fs_fd = infy_newfd ();
  3654. if (fs_fd >= 0)
  3655. {
  3656. fd_intern (fs_fd);
  3657. ev_io_set (&fs_w, fs_fd, EV_READ);
  3658. ev_io_start (EV_A_ &fs_w);
  3659. ev_unref (EV_A);
  3660. }
  3661. for (slot = 0; slot < (EV_INOTIFY_HASHSIZE); ++slot)
  3662. {
  3663. WL w_ = fs_hash [slot].head;
  3664. fs_hash [slot].head = 0;
  3665. while (w_)
  3666. {
  3667. ev_stat *w = (ev_stat *)w_;
  3668. w_ = w_->next; /* lets us add this watcher */
  3669. w->wd = -1;
  3670. if (fs_fd >= 0)
  3671. infy_add (EV_A_ w); /* re-add, no matter what */
  3672. else
  3673. {
  3674. w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
  3675. if (ev_is_active (&w->timer)) ev_ref (EV_A);
  3676. ev_timer_again (EV_A_ &w->timer);
  3677. if (ev_is_active (&w->timer)) ev_unref (EV_A);
  3678. }
  3679. }
  3680. }
  3681. }
  3682. #endif
  3683. #ifdef _WIN32
  3684. # define EV_LSTAT(p,b) _stati64 (p, b)
  3685. #else
  3686. # define EV_LSTAT(p,b) lstat (p, b)
  3687. #endif
  3688. void
  3689. ev_stat_stat (EV_P_ ev_stat *w) EV_THROW
  3690. {
  3691. if (lstat (w->path, &w->attr) < 0)
  3692. w->attr.st_nlink = 0;
  3693. else if (!w->attr.st_nlink)
  3694. w->attr.st_nlink = 1;
  3695. }
  3696. static void noinline
  3697. stat_timer_cb (EV_P_ ev_timer *w_, int revents)
  3698. {
  3699. ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer));
  3700. ev_statdata prev = w->attr;
  3701. ev_stat_stat (EV_A_ w);
  3702. /* memcmp doesn't work on netbsd, they.... do stuff to their struct stat */
  3703. if (
  3704. prev.st_dev != w->attr.st_dev
  3705. || prev.st_ino != w->attr.st_ino
  3706. || prev.st_mode != w->attr.st_mode
  3707. || prev.st_nlink != w->attr.st_nlink
  3708. || prev.st_uid != w->attr.st_uid
  3709. || prev.st_gid != w->attr.st_gid
  3710. || prev.st_rdev != w->attr.st_rdev
  3711. || prev.st_size != w->attr.st_size
  3712. || prev.st_atime != w->attr.st_atime
  3713. || prev.st_mtime != w->attr.st_mtime
  3714. || prev.st_ctime != w->attr.st_ctime
  3715. ) {
  3716. /* we only update w->prev on actual differences */
  3717. /* in case we test more often than invoke the callback, */
  3718. /* to ensure that prev is always different to attr */
  3719. w->prev = prev;
  3720. #if EV_USE_INOTIFY
  3721. if (fs_fd >= 0)
  3722. {
  3723. infy_del (EV_A_ w);
  3724. infy_add (EV_A_ w);
  3725. ev_stat_stat (EV_A_ w); /* avoid race... */
  3726. }
  3727. #endif
  3728. ev_feed_event (EV_A_ w, EV_STAT);
  3729. }
  3730. }
  3731. void
  3732. ev_stat_start (EV_P_ ev_stat *w) EV_THROW
  3733. {
  3734. if (expect_false (ev_is_active (w)))
  3735. return;
  3736. ev_stat_stat (EV_A_ w);
  3737. if (w->interval < MIN_STAT_INTERVAL && w->interval)
  3738. w->interval = MIN_STAT_INTERVAL;
  3739. ev_timer_init (&w->timer, stat_timer_cb, 0., w->interval ? w->interval : DEF_STAT_INTERVAL);
  3740. ev_set_priority (&w->timer, ev_priority (w));
  3741. #if EV_USE_INOTIFY
  3742. infy_init (EV_A);
  3743. if (fs_fd >= 0)
  3744. infy_add (EV_A_ w);
  3745. else
  3746. #endif
  3747. {
  3748. ev_timer_again (EV_A_ &w->timer);
  3749. ev_unref (EV_A);
  3750. }
  3751. ev_start (EV_A_ (W)w, 1);
  3752. EV_FREQUENT_CHECK;
  3753. }
  3754. void
  3755. ev_stat_stop (EV_P_ ev_stat *w) EV_THROW
  3756. {
  3757. clear_pending (EV_A_ (W)w);
  3758. if (expect_false (!ev_is_active (w)))
  3759. return;
  3760. EV_FREQUENT_CHECK;
  3761. #if EV_USE_INOTIFY
  3762. infy_del (EV_A_ w);
  3763. #endif
  3764. if (ev_is_active (&w->timer))
  3765. {
  3766. ev_ref (EV_A);
  3767. ev_timer_stop (EV_A_ &w->timer);
  3768. }
  3769. ev_stop (EV_A_ (W)w);
  3770. EV_FREQUENT_CHECK;
  3771. }
  3772. #endif
  3773. #if EV_IDLE_ENABLE
  3774. void
  3775. ev_idle_start (EV_P_ ev_idle *w) EV_THROW
  3776. {
  3777. if (expect_false (ev_is_active (w)))
  3778. return;
  3779. pri_adjust (EV_A_ (W)w);
  3780. EV_FREQUENT_CHECK;
  3781. {
  3782. int active = ++idlecnt [ABSPRI (w)];
  3783. ++idleall;
  3784. ev_start (EV_A_ (W)w, active);
  3785. array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, EMPTY2);
  3786. idles [ABSPRI (w)][active - 1] = w;
  3787. }
  3788. EV_FREQUENT_CHECK;
  3789. }
  3790. void
  3791. ev_idle_stop (EV_P_ ev_idle *w) EV_THROW
  3792. {
  3793. clear_pending (EV_A_ (W)w);
  3794. if (expect_false (!ev_is_active (w)))
  3795. return;
  3796. EV_FREQUENT_CHECK;
  3797. {
  3798. int active = ev_active (w);
  3799. idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]];
  3800. ev_active (idles [ABSPRI (w)][active - 1]) = active;
  3801. ev_stop (EV_A_ (W)w);
  3802. --idleall;
  3803. }
  3804. EV_FREQUENT_CHECK;
  3805. }
  3806. #endif
  3807. #if EV_PREPARE_ENABLE
  3808. void
  3809. ev_prepare_start (EV_P_ ev_prepare *w) EV_THROW
  3810. {
  3811. if (expect_false (ev_is_active (w)))
  3812. return;
  3813. EV_FREQUENT_CHECK;
  3814. ev_start (EV_A_ (W)w, ++preparecnt);
  3815. array_needsize (ev_prepare *, prepares, preparemax, preparecnt, EMPTY2);
  3816. prepares [preparecnt - 1] = w;
  3817. EV_FREQUENT_CHECK;
  3818. }
  3819. void
  3820. ev_prepare_stop (EV_P_ ev_prepare *w) EV_THROW
  3821. {
  3822. clear_pending (EV_A_ (W)w);
  3823. if (expect_false (!ev_is_active (w)))
  3824. return;
  3825. EV_FREQUENT_CHECK;
  3826. {
  3827. int active = ev_active (w);
  3828. prepares [active - 1] = prepares [--preparecnt];
  3829. ev_active (prepares [active - 1]) = active;
  3830. }
  3831. ev_stop (EV_A_ (W)w);
  3832. EV_FREQUENT_CHECK;
  3833. }
  3834. #endif
  3835. #if EV_CHECK_ENABLE
  3836. void
  3837. ev_check_start (EV_P_ ev_check *w) EV_THROW
  3838. {
  3839. if (expect_false (ev_is_active (w)))
  3840. return;
  3841. EV_FREQUENT_CHECK;
  3842. ev_start (EV_A_ (W)w, ++checkcnt);
  3843. array_needsize (ev_check *, checks, checkmax, checkcnt, EMPTY2);
  3844. checks [checkcnt - 1] = w;
  3845. EV_FREQUENT_CHECK;
  3846. }
  3847. void
  3848. ev_check_stop (EV_P_ ev_check *w) EV_THROW
  3849. {
  3850. clear_pending (EV_A_ (W)w);
  3851. if (expect_false (!ev_is_active (w)))
  3852. return;
  3853. EV_FREQUENT_CHECK;
  3854. {
  3855. int active = ev_active (w);
  3856. checks [active - 1] = checks [--checkcnt];
  3857. ev_active (checks [active - 1]) = active;
  3858. }
  3859. ev_stop (EV_A_ (W)w);
  3860. EV_FREQUENT_CHECK;
  3861. }
  3862. #endif
  3863. #if EV_EMBED_ENABLE
  3864. void noinline
  3865. ev_embed_sweep (EV_P_ ev_embed *w) EV_THROW
  3866. {
  3867. ev_run (w->other, EVRUN_NOWAIT);
  3868. }
  3869. static void
  3870. embed_io_cb (EV_P_ ev_io *io, int revents)
  3871. {
  3872. ev_embed *w = (ev_embed *)(((char *)io) - offsetof (ev_embed, io));
  3873. if (ev_cb (w))
  3874. ev_feed_event (EV_A_ (W)w, EV_EMBED);
  3875. else
  3876. ev_run (w->other, EVRUN_NOWAIT);
  3877. }
  3878. static void
  3879. embed_prepare_cb (EV_P_ ev_prepare *prepare, int revents)
  3880. {
  3881. ev_embed *w = (ev_embed *)(((char *)prepare) - offsetof (ev_embed, prepare));
  3882. {
  3883. EV_P = w->other;
  3884. while (fdchangecnt)
  3885. {
  3886. fd_reify (EV_A);
  3887. ev_run (EV_A_ EVRUN_NOWAIT);
  3888. }
  3889. }
  3890. }
  3891. static void
  3892. embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
  3893. {
  3894. ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork));
  3895. ev_embed_stop (EV_A_ w);
  3896. {
  3897. EV_P = w->other;
  3898. ev_loop_fork (EV_A);
  3899. ev_run (EV_A_ EVRUN_NOWAIT);
  3900. }
  3901. ev_embed_start (EV_A_ w);
  3902. }
  3903. #if 0
  3904. static void
  3905. embed_idle_cb (EV_P_ ev_idle *idle, int revents)
  3906. {
  3907. ev_idle_stop (EV_A_ idle);
  3908. }
  3909. #endif
  3910. void
  3911. ev_embed_start (EV_P_ ev_embed *w) EV_THROW
  3912. {
  3913. if (expect_false (ev_is_active (w)))
  3914. return;
  3915. {
  3916. EV_P = w->other;
  3917. assert (("libev: loop to be embedded is not embeddable", backend & ev_embeddable_backends ()));
  3918. ev_io_init (&w->io, embed_io_cb, backend_fd, EV_READ);
  3919. }
  3920. EV_FREQUENT_CHECK;
  3921. ev_set_priority (&w->io, ev_priority (w));
  3922. ev_io_start (EV_A_ &w->io);
  3923. ev_prepare_init (&w->prepare, embed_prepare_cb);
  3924. ev_set_priority (&w->prepare, EV_MINPRI);
  3925. ev_prepare_start (EV_A_ &w->prepare);
  3926. ev_fork_init (&w->fork, embed_fork_cb);
  3927. ev_fork_start (EV_A_ &w->fork);
  3928. /*ev_idle_init (&w->idle, e,bed_idle_cb);*/
  3929. ev_start (EV_A_ (W)w, 1);
  3930. EV_FREQUENT_CHECK;
  3931. }
  3932. void
  3933. ev_embed_stop (EV_P_ ev_embed *w) EV_THROW
  3934. {
  3935. clear_pending (EV_A_ (W)w);
  3936. if (expect_false (!ev_is_active (w)))
  3937. return;
  3938. EV_FREQUENT_CHECK;
  3939. ev_io_stop (EV_A_ &w->io);
  3940. ev_prepare_stop (EV_A_ &w->prepare);
  3941. ev_fork_stop (EV_A_ &w->fork);
  3942. ev_stop (EV_A_ (W)w);
  3943. EV_FREQUENT_CHECK;
  3944. }
  3945. #endif
  3946. #if EV_FORK_ENABLE
  3947. void
  3948. ev_fork_start (EV_P_ ev_fork *w) EV_THROW
  3949. {
  3950. if (expect_false (ev_is_active (w)))
  3951. return;
  3952. EV_FREQUENT_CHECK;
  3953. ev_start (EV_A_ (W)w, ++forkcnt);
  3954. array_needsize (ev_fork *, forks, forkmax, forkcnt, EMPTY2);
  3955. forks [forkcnt - 1] = w;
  3956. EV_FREQUENT_CHECK;
  3957. }
  3958. void
  3959. ev_fork_stop (EV_P_ ev_fork *w) EV_THROW
  3960. {
  3961. clear_pending (EV_A_ (W)w);
  3962. if (expect_false (!ev_is_active (w)))
  3963. return;
  3964. EV_FREQUENT_CHECK;
  3965. {
  3966. int active = ev_active (w);
  3967. forks [active - 1] = forks [--forkcnt];
  3968. ev_active (forks [active - 1]) = active;
  3969. }
  3970. ev_stop (EV_A_ (W)w);
  3971. EV_FREQUENT_CHECK;
  3972. }
  3973. #endif
  3974. #if EV_CLEANUP_ENABLE
  3975. void
  3976. ev_cleanup_start (EV_P_ ev_cleanup *w) EV_THROW
  3977. {
  3978. if (expect_false (ev_is_active (w)))
  3979. return;
  3980. EV_FREQUENT_CHECK;
  3981. ev_start (EV_A_ (W)w, ++cleanupcnt);
  3982. array_needsize (ev_cleanup *, cleanups, cleanupmax, cleanupcnt, EMPTY2);
  3983. cleanups [cleanupcnt - 1] = w;
  3984. /* cleanup watchers should never keep a refcount on the loop */
  3985. ev_unref (EV_A);
  3986. EV_FREQUENT_CHECK;
  3987. }
  3988. void
  3989. ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_THROW
  3990. {
  3991. clear_pending (EV_A_ (W)w);
  3992. if (expect_false (!ev_is_active (w)))
  3993. return;
  3994. EV_FREQUENT_CHECK;
  3995. ev_ref (EV_A);
  3996. {
  3997. int active = ev_active (w);
  3998. cleanups [active - 1] = cleanups [--cleanupcnt];
  3999. ev_active (cleanups [active - 1]) = active;
  4000. }
  4001. ev_stop (EV_A_ (W)w);
  4002. EV_FREQUENT_CHECK;
  4003. }
  4004. #endif
  4005. #if EV_ASYNC_ENABLE
  4006. void
  4007. ev_async_start (EV_P_ ev_async *w) EV_THROW
  4008. {
  4009. if (expect_false (ev_is_active (w)))
  4010. return;
  4011. w->sent = 0;
  4012. evpipe_init (EV_A);
  4013. EV_FREQUENT_CHECK;
  4014. ev_start (EV_A_ (W)w, ++asynccnt);
  4015. array_needsize (ev_async *, asyncs, asyncmax, asynccnt, EMPTY2);
  4016. asyncs [asynccnt - 1] = w;
  4017. EV_FREQUENT_CHECK;
  4018. }
  4019. void
  4020. ev_async_stop (EV_P_ ev_async *w) EV_THROW
  4021. {
  4022. clear_pending (EV_A_ (W)w);
  4023. if (expect_false (!ev_is_active (w)))
  4024. return;
  4025. EV_FREQUENT_CHECK;
  4026. {
  4027. int active = ev_active (w);
  4028. asyncs [active - 1] = asyncs [--asynccnt];
  4029. ev_active (asyncs [active - 1]) = active;
  4030. }
  4031. ev_stop (EV_A_ (W)w);
  4032. EV_FREQUENT_CHECK;
  4033. }
  4034. void
  4035. ev_async_send (EV_P_ ev_async *w) EV_THROW
  4036. {
  4037. w->sent = 1;
  4038. evpipe_write (EV_A_ &async_pending);
  4039. }
  4040. #endif
  4041. /*****************************************************************************/
  4042. struct ev_once
  4043. {
  4044. ev_io io;
  4045. ev_timer to;
  4046. void (*cb)(int revents, void *arg);
  4047. void *arg;
  4048. };
  4049. static void
  4050. once_cb (EV_P_ struct ev_once *once, int revents)
  4051. {
  4052. void (*cb)(int revents, void *arg) = once->cb;
  4053. void *arg = once->arg;
  4054. ev_io_stop (EV_A_ &once->io);
  4055. ev_timer_stop (EV_A_ &once->to);
  4056. ev_free (once);
  4057. cb (revents, arg);
  4058. }
  4059. static void
  4060. once_cb_io (EV_P_ ev_io *w, int revents)
  4061. {
  4062. struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io));
  4063. once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->to));
  4064. }
  4065. static void
  4066. once_cb_to (EV_P_ ev_timer *w, int revents)
  4067. {
  4068. struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to));
  4069. once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->io));
  4070. }
  4071. void
  4072. ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) EV_THROW
  4073. {
  4074. struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once));
  4075. if (expect_false (!once))
  4076. {
  4077. cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMER, arg);
  4078. return;
  4079. }
  4080. once->cb = cb;
  4081. once->arg = arg;
  4082. ev_init (&once->io, once_cb_io);
  4083. if (fd >= 0)
  4084. {
  4085. ev_io_set (&once->io, fd, events);
  4086. ev_io_start (EV_A_ &once->io);
  4087. }
  4088. ev_init (&once->to, once_cb_to);
  4089. if (timeout >= 0.)
  4090. {
  4091. ev_timer_set (&once->to, timeout, 0.);
  4092. ev_timer_start (EV_A_ &once->to);
  4093. }
  4094. }
  4095. /*****************************************************************************/
  4096. #if EV_WALK_ENABLE
  4097. void ecb_cold
  4098. ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_THROW
  4099. {
  4100. int i, j;
  4101. ev_watcher_list *wl, *wn;
  4102. if (types & (EV_IO | EV_EMBED))
  4103. for (i = 0; i < anfdmax; ++i)
  4104. for (wl = anfds [i].head; wl; )
  4105. {
  4106. wn = wl->next;
  4107. #if EV_EMBED_ENABLE
  4108. if (ev_cb ((ev_io *)wl) == embed_io_cb)
  4109. {
  4110. if (types & EV_EMBED)
  4111. cb (EV_A_ EV_EMBED, ((char *)wl) - offsetof (struct ev_embed, io));
  4112. }
  4113. else
  4114. #endif
  4115. #if EV_USE_INOTIFY
  4116. if (ev_cb ((ev_io *)wl) == infy_cb)
  4117. ;
  4118. else
  4119. #endif
  4120. if ((ev_io *)wl != &pipe_w)
  4121. if (types & EV_IO)
  4122. cb (EV_A_ EV_IO, wl);
  4123. wl = wn;
  4124. }
  4125. if (types & (EV_TIMER | EV_STAT))
  4126. for (i = timercnt + HEAP0; i-- > HEAP0; )
  4127. #if EV_STAT_ENABLE
  4128. /*TODO: timer is not always active*/
  4129. if (ev_cb ((ev_timer *)ANHE_w (timers [i])) == stat_timer_cb)
  4130. {
  4131. if (types & EV_STAT)
  4132. cb (EV_A_ EV_STAT, ((char *)ANHE_w (timers [i])) - offsetof (struct ev_stat, timer));
  4133. }
  4134. else
  4135. #endif
  4136. if (types & EV_TIMER)
  4137. cb (EV_A_ EV_TIMER, ANHE_w (timers [i]));
  4138. #if EV_PERIODIC_ENABLE
  4139. if (types & EV_PERIODIC)
  4140. for (i = periodiccnt + HEAP0; i-- > HEAP0; )
  4141. cb (EV_A_ EV_PERIODIC, ANHE_w (periodics [i]));
  4142. #endif
  4143. #if EV_IDLE_ENABLE
  4144. if (types & EV_IDLE)
  4145. for (j = NUMPRI; j--; )
  4146. for (i = idlecnt [j]; i--; )
  4147. cb (EV_A_ EV_IDLE, idles [j][i]);
  4148. #endif
  4149. #if EV_FORK_ENABLE
  4150. if (types & EV_FORK)
  4151. for (i = forkcnt; i--; )
  4152. if (ev_cb (forks [i]) != embed_fork_cb)
  4153. cb (EV_A_ EV_FORK, forks [i]);
  4154. #endif
  4155. #if EV_ASYNC_ENABLE
  4156. if (types & EV_ASYNC)
  4157. for (i = asynccnt; i--; )
  4158. cb (EV_A_ EV_ASYNC, asyncs [i]);
  4159. #endif
  4160. #if EV_PREPARE_ENABLE
  4161. if (types & EV_PREPARE)
  4162. for (i = preparecnt; i--; )
  4163. # if EV_EMBED_ENABLE
  4164. if (ev_cb (prepares [i]) != embed_prepare_cb)
  4165. # endif
  4166. cb (EV_A_ EV_PREPARE, prepares [i]);
  4167. #endif
  4168. #if EV_CHECK_ENABLE
  4169. if (types & EV_CHECK)
  4170. for (i = checkcnt; i--; )
  4171. cb (EV_A_ EV_CHECK, checks [i]);
  4172. #endif
  4173. #if EV_SIGNAL_ENABLE
  4174. if (types & EV_SIGNAL)
  4175. for (i = 0; i < EV_NSIG - 1; ++i)
  4176. for (wl = signals [i].head; wl; )
  4177. {
  4178. wn = wl->next;
  4179. cb (EV_A_ EV_SIGNAL, wl);
  4180. wl = wn;
  4181. }
  4182. #endif
  4183. #if EV_CHILD_ENABLE
  4184. if (types & EV_CHILD)
  4185. for (i = (EV_PID_HASHSIZE); i--; )
  4186. for (wl = childs [i]; wl; )
  4187. {
  4188. wn = wl->next;
  4189. cb (EV_A_ EV_CHILD, wl);
  4190. wl = wn;
  4191. }
  4192. #endif
  4193. /* EV_STAT 0x00001000 /* stat data changed */
  4194. /* EV_EMBED 0x00010000 /* embedded event loop needs sweep */
  4195. }
  4196. #endif
  4197. #if EV_MULTIPLICITY
  4198. #include "ev_wrap.h"
  4199. #endif