From dc542b5742da70098945177e530f9ac54653a419 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Fri, 25 May 2007 18:34:46 +0000 Subject: Jean-Christian de Rivaz writes: I actually suspect this code into the file uClibc/libc/sysdeps/linux/common/poll.c: tval.tv_nsec = (timeout % 1000) *1000; <==== make only usec! From milisecond this really needs a * 1000000 to make nanosecond. Without this a 1100 milisecond timeout is converted into a 1 seconde and 100 microsecond timeout! This can explain the weird result of the test code. --- libc/sysdeps/linux/common/poll.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'libc/sysdeps/linux') diff --git a/libc/sysdeps/linux/common/poll.c b/libc/sysdeps/linux/common/poll.c index a8366cd27..4fc5a3267 100644 --- a/libc/sysdeps/linux/common/poll.c +++ b/libc/sysdeps/linux/common/poll.c @@ -36,7 +36,7 @@ int __libc_poll(struct pollfd *fds, nfds_t nfds, int timeout) struct timespec *ts = NULL, tval; if (timeout > 0) { tval.tv_sec = timeout / 1000; - tval.tv_nsec = (timeout % 1000) *1000; + tval.tv_nsec = (timeout % 1000) * 1000000; ts = &tval; } return ppoll(fds, nfds, ts, NULL); -- cgit v1.2.3