From 0c4fad8e9a13cefebb4c858f9f34b90748b3e680 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Wed, 26 Oct 2016 11:31:29 +0200 Subject: [PATCH 01/53] multiprocessing: always exit subprocess Exceptions during the execution of the target function in the subprocess caused the execution to continue and return from the start() function. --- multiprocessing/multiprocessing.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/multiprocessing/multiprocessing.py b/multiprocessing/multiprocessing.py index 9d68df1b3..0711f57b8 100644 --- a/multiprocessing/multiprocessing.py +++ b/multiprocessing/multiprocessing.py @@ -15,10 +15,12 @@ def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): def start(self): self.pid = os.fork() if not self.pid: - if self.r: - self.r.close() - self.target(*self.args, **self.kwargs) - os._exit(0) + try: + if self.r: + self.r.close() + self.target(*self.args, **self.kwargs) + finally: + os._exit(0) else: if self.w: self.w.close() From ea8e4ca16a2da45aea8f9326eb061e75205dab83 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Wed, 26 Oct 2016 11:32:30 +0200 Subject: [PATCH 02/53] os.linux: add package --- os.linux/metadata.txt | 4 ++ os.linux/os/linux/__init__.py | 41 +++++++++++++++ os.linux/os/linux/blkdev.py | 31 +++++++++++ os.linux/os/linux/evdev.py | 18 +++++++ os.linux/os/linux/i2c.py | 13 +++++ os.linux/os/linux/ioctl.py | 27 ++++++++++ os.linux/os/linux/mtd.py | 67 ++++++++++++++++++++++++ os.linux/os/linux/spidev.py | 17 ++++++ os.linux/os/linux/syslog.py | 99 +++++++++++++++++++++++++++++++++++ os.linux/setup.py | 17 ++++++ 10 files changed, 334 insertions(+) create mode 100644 os.linux/metadata.txt create mode 100644 os.linux/os/linux/__init__.py create mode 100644 os.linux/os/linux/blkdev.py create mode 100644 os.linux/os/linux/evdev.py create mode 100644 os.linux/os/linux/i2c.py create mode 100644 os.linux/os/linux/ioctl.py create mode 100644 os.linux/os/linux/mtd.py create mode 100644 os.linux/os/linux/spidev.py create mode 100644 os.linux/os/linux/syslog.py create mode 100644 os.linux/setup.py diff --git a/os.linux/metadata.txt b/os.linux/metadata.txt new file mode 100644 index 000000000..59295ca73 --- /dev/null +++ b/os.linux/metadata.txt @@ -0,0 +1,4 @@ +srctype = micropython-lib +type = package +version = 0.0.1 +author = Delio Brignoli diff --git a/os.linux/os/linux/__init__.py b/os.linux/os/linux/__init__.py new file mode 100644 index 000000000..463c0c24b --- /dev/null +++ b/os.linux/os/linux/__init__.py @@ -0,0 +1,41 @@ + +import ffilib +import os + + +libc = ffilib.libc() +sleep = libc.func('I', 'sleep', 'I') +_mount = libc.func('i', 'mount', 'sssLs') +_umount = libc.func('i', 'umount', 's') +_setenv = libc.func('i', 'setenv', 'ssi') + + +def mount(source, target, fstype, flags = 0, opts = None): + e = _mount(source, target, fstype, flags, opts) + os.check_error(e) + + +def umount(target): + e = _umount(target) + os.check_error(e) + + +def execv(path, args = []): + assert args, '`args` argument cannot be empty' + _args = [path] + args + [None] + _execl = libc.func('i', 'execl', 's'*len(_args)) + e = _execl(*_args) + os.check_error(e) + + +def execvp(executable, args = []): + assert args, '`args` argument cannot be empty' + _args = [executable] + args + [None] + _execlp = libc.func('i', 'execlp', 's'*len(_args)) + e = _execlp(*_args) + os.check_error(e) + + +def setenv(name, value, overwrite = True): + e = _setenv(name, value, 1 if overwrite else 0) + os.check_error(e) diff --git a/os.linux/os/linux/blkdev.py b/os.linux/os/linux/blkdev.py new file mode 100644 index 000000000..78f368299 --- /dev/null +++ b/os.linux/os/linux/blkdev.py @@ -0,0 +1,31 @@ + +# buffer data from src_fobj (it could be the stdin pipe) before writing +# it out in blocksz bytes chunks +def block_copy(src_fobj, dst_fobj, byte_count = 0, blocksz=512, progress_func=None): + buf = bytearray(blocksz) + buf_view = memoryview(buf) + read_bytes = 0 + left_to_read = byte_count + total_written_bytes = 0 + while True: + sz = src_fobj.readinto(buf_view[read_bytes:]) + #print('readinto() -> {}, {}'.format(read_bytes, sz), file=sys.stderr) + # in blocking mode sz will be zero only on EOF + left_to_read -= sz + read_bytes += sz + if not sz or (byte_count != 0 and left_to_read <= 0): + cnt = read_bytes + if byte_count != 0 and left_to_read <= 0: + cnt += left_to_read + dst_fobj.write(buf_view[:cnt]) + total_written_bytes += cnt + if progress_func is not None: + progress_func(total_written_bytes) + break + if read_bytes == blocksz: + dst_fobj.write(buf_view) + #print('write({})'.format(read_bytes), file=sys.stderr) + total_written_bytes += read_bytes + read_bytes = 0 + if progress_func is not None: + progress_func(total_written_bytes) diff --git a/os.linux/os/linux/evdev.py b/os.linux/os/linux/evdev.py new file mode 100644 index 000000000..ddedf6a8b --- /dev/null +++ b/os.linux/os/linux/evdev.py @@ -0,0 +1,18 @@ + +import builtins +import os +from os.linux import ioctl + +DEV_PATH_TPL = '/dev/input/event{}' + +KEY_SELECT = 0x161 + +def EVIOCGKEY(len): + return ioctl._ioc(ioctl._IOC_READ, ord('E'), 0x18, len) + +def get_global_keystate(dev, byte_array): + r = ioctl.ioctl_p(dev.fileno(), EVIOCGKEY(len(byte_array)), byte_array) + os.check_error(r) + +def open(index=0): + return builtins.open(DEV_PATH_TPL.format(index), 'r+b') diff --git a/os.linux/os/linux/i2c.py b/os.linux/os/linux/i2c.py new file mode 100644 index 000000000..28aeed439 --- /dev/null +++ b/os.linux/os/linux/i2c.py @@ -0,0 +1,13 @@ + +import builtins +from os.linux import ioctl + +DEV_PATH_TPL = '/dev/i2c-{}' + +I2C_SLAVE = 0x0703 + +def set_slave_addr(dev_fd, addr): + ioctl.ioctl_l(dev_fd, I2C_SLAVE, addr) + +def open(index=0): + return builtins.open(DEV_PATH_TPL.format(index), 'r+b') diff --git a/os.linux/os/linux/ioctl.py b/os.linux/os/linux/ioctl.py new file mode 100644 index 000000000..2af183573 --- /dev/null +++ b/os.linux/os/linux/ioctl.py @@ -0,0 +1,27 @@ + +import ffilib + +libc = ffilib.libc() + +TIOCCONS = 0x541D + +_IOC_WRITE = 1 +_IOC_READ = 2 + +_IOC_NRBITS = 8 +_IOC_TYPEBITS = 8 +_IOC_SIZEBITS = 14 +_IOC_DIRBITS = 2 + +_IOC_NRSHIFT = 0 +_IOC_TYPESHIFT = (_IOC_NRSHIFT+_IOC_NRBITS) +_IOC_SIZESHIFT = (_IOC_TYPESHIFT+_IOC_TYPEBITS) +_IOC_DIRSHIFT = (_IOC_SIZESHIFT+_IOC_SIZEBITS) + +def _ioc(dir, type, num, size): + return ((num << _IOC_NRSHIFT) | (type << _IOC_TYPESHIFT) | + (size << _IOC_SIZESHIFT) | (dir << _IOC_DIRSHIFT)) + +ioctl_p = libc.func("i", "ioctl", "iip") +ioctl_l = libc.func("i", "ioctl", "iil") +del libc diff --git a/os.linux/os/linux/mtd.py b/os.linux/os/linux/mtd.py new file mode 100644 index 000000000..6c12a2fb5 --- /dev/null +++ b/os.linux/os/linux/mtd.py @@ -0,0 +1,67 @@ + +import os +from os.linux import ioctl +try: + import ustruct as struct +except: + import struct + + +# struct erase_info_user { +# __u32 start; +# __u32 length; +# }; + +MTD_NORFLASH = 3 + +def _mtd_ioc(dir, num, size): + return ioctl._ioc(dir, ord('M'), num, size) + +MEM_INFO_STRUCT = 'BIIIIIII' +_MEM_INFO_LEN = struct.calcsize(MEM_INFO_STRUCT) +ERASE_INFO_STRUCT = 'II' +_ERASE_INFO_LEN = struct.calcsize(ERASE_INFO_STRUCT) +MEMGETINFO = _mtd_ioc(ioctl._IOC_READ, 1, _MEM_INFO_LEN) +MEMERASE = _mtd_ioc(ioctl._IOC_WRITE, 2, _ERASE_INFO_LEN) +MEMUNLOCK = _mtd_ioc(ioctl._IOC_WRITE, 6, _ERASE_INFO_LEN) + + +def _pack_erase_info(start, length): + return struct.pack(ERASE_INFO_STRUCT, start, length) + + +def _check_offset(offset, eblock): + assert offset % eblock == 0, \ + 'offset {} is not a multiple of erase block {}'.format(offset, eblock) + + +def info(mtd_dev): + mem_info = bytearray(_MEM_INFO_LEN) + e = ioctl.ioctl_p(mtd_dev.fileno(), MEMGETINFO, mem_info) + os.check_error(e) + # do not return padding words + return struct.unpack(MEM_INFO_STRUCT, mem_info)[:-2] + + +def unlock(mtd_dev, offset, eblock_size): + _check_offset(offset, eblock_size) + erase_info_user = _pack_erase_info(offset, eblock_size) + e = ioctl.ioctl_p(mtd_dev.fileno(), MEMUNLOCK, erase_info_user) + os.check_error(e) + + +def erase(mtd_dev, offset, eblock_size): + _check_offset(offset, eblock_size) + erase_info_user = _pack_erase_info(offset, eblock_size) + e = ioctl.ioctl_p(mtd_dev.fileno(), MEMERASE, erase_info_user) + os.check_error(e) + + +def read(mtd_dev, offset, length): + assert mtd_dev.seek(offset) == offset + return mtd_dev.read(length) + + +def write(mtd_dev, offset, data): + assert mtd_dev.seek(offset) == offset + return mtd_dev.write(data) diff --git a/os.linux/os/linux/spidev.py b/os.linux/os/linux/spidev.py new file mode 100644 index 000000000..ebf642ef3 --- /dev/null +++ b/os.linux/os/linux/spidev.py @@ -0,0 +1,17 @@ + +import builtins +from os.linux import ioctl + +DEV_PATH_TPL = '/dev/spidev{}.{}' + +def _spidev_ioc(dir, num, size): + return ioctl._ioc(dir, ord('k'), num, size) + +SPI_IOC_RD_BITS_PER_WORD = _spidev_ioc(ioctl._IOC_READ, 3, 1) +SPI_IOC_WR_BITS_PER_WORD = _spidev_ioc(ioctl._IOC_WRITE, 3, 1) + +SPI_IOC_RD_MAX_SPEED_HZ = _spidev_ioc(ioctl._IOC_READ, 4, 4) +SPI_IOC_WR_MAX_SPEED_HZ = _spidev_ioc(ioctl._IOC_WRITE, 4, 4) + +def open(bus_idx=0, cs_idx=0): + return builtins.open(DEV_PATH_TPL.format(bus_idx, cs_idx), 'r+b') diff --git a/os.linux/os/linux/syslog.py b/os.linux/os/linux/syslog.py new file mode 100644 index 000000000..81d1e27ba --- /dev/null +++ b/os.linux/os/linux/syslog.py @@ -0,0 +1,99 @@ + +import sys +import os +from os import libc + +openlog_ = libc.func("v", "openlog", "sii") +setlogmask_ = libc.func("i", "setlogmask", "i") +syslog_ = libc.func("v", "syslog", "is") +isatty_ = libc.func("i", "isatty", "i") + +# Syslog priorities +CRITICAL = 2 +ERROR = 3 +WARNING = 4 +NOTICE = 5 +INFO = 6 +DEBUG = 7 +NOTSET = 0 + +# Facility codes +LOG_USER = (1<<3) # random user-level messages + +# Option flags for openlog +LOG_PID = 0x01 # log the pid with each message +LOG_CONS = 0x02 # log on the console if errors in sending +LOG_ODELAY = 0x04 # delay open until first syslog() (default) +LOG_NDELAY = 0x08 # don't delay open +LOG_NOWAIT = 0x10 # don't wait for console forks: DEPRECATED +LOG_PERROR = 0x20 # log to stderr as well + +def _logmask_upto(pri): + return ((1<<((pri)+1))-1) + +class Logger: + + def __init__(self, name): + self.name = name + + def log(self, level, msg, *args): + if self.name is not None: + s = ('%s:'+ msg) % ((self.name,) + args) + else: + s = msg % args + syslog_(level, s) + + def debug(self, msg, *args): + self.log(DEBUG, msg, *args) + + def info(self, msg, *args): + self.log(INFO, msg, *args) + + def warning(self, msg, *args): + self.log(WARNING, msg, *args) + + def error(self, msg, *args): + self.log(ERROR, msg, *args) + + def critical(self, msg, *args): + self.log(CRITICAL, msg, *args) + + +_level = ERROR +_loggers = {} + +r = isatty_(sys.stdout.fileno()) +os.check_error(r) + +flags = LOG_CONS | LOG_PID +# if we are outputting to a tty log also to stderr +if r > 0: + flags |= LOG_PERROR +ident = 'python' +if len(sys.argv): + ident = sys.argv[0] +openlog_(ident, flags, LOG_USER) + +r = setlogmask_(_logmask_upto(_level)) +os.check_error(r) + +def getLogger(name): + if name in _loggers: + return _loggers[name] + l = Logger(name) + _loggers[name] = l + return l + +def info(msg, *args): + getLogger(None).info(msg, *args) + +def debug(msg, *args): + getLogger(None).debug(msg, *args) + +def basicConfig(level=INFO, filename=None, format=None): + global _level + _level = level + if filename is not None: + print("logging.basicConfig: filename arg is not supported") + if format is not None: + print("logging.basicConfig: format arg is not supported") diff --git a/os.linux/setup.py b/os.linux/setup.py new file mode 100644 index 000000000..aa8214738 --- /dev/null +++ b/os.linux/setup.py @@ -0,0 +1,17 @@ +import sys +# Remove current dir from sys.path, otherwise setuptools will peek up our +# module instead of system. +sys.path.pop(0) +from setuptools import setup + + +setup(name='micropython-os.linux', + version='0.0.1', + description='os.linux module for MicroPython', + long_description="Linux specific OS functions not included in micropython-lib's os module", + author='Delio Brignoli', + author_email='dbrignoli@audioscience.com', + maintainer='Delio Brignoli', + maintainer_email='dbrignoli@audioscience.com', + license='MIT', + packages=['os.linux']) From 2dbcd8cd64ba005420e73ab6c14e266ccdeda1cb Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Wed, 26 Oct 2016 11:33:06 +0200 Subject: [PATCH 03/53] itertools: add product() implementation --- itertools/itertools.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/itertools/itertools.py b/itertools/itertools.py index 5c00952ff..eacad309b 100644 --- a/itertools/itertools.py +++ b/itertools/itertools.py @@ -55,3 +55,12 @@ def tee(iterable, n=2): def starmap(function, iterable): for args in iterable: yield function(*args) + +def product(*args, repeat=1): + if not args: + yield () + else: + args = args*repeat + for a in args[0]: + for prod in product(*args[1:]): + yield (a,)+prod From 120087415aa7afd1938614075c3bcbb3bc9e53a4 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Wed, 26 Oct 2016 11:33:47 +0200 Subject: [PATCH 04/53] functools: fix partial(), return value from partial closure invocation --- functools/functools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functools/functools.py b/functools/functools.py index ae8c2cf0e..244842650 100644 --- a/functools/functools.py +++ b/functools/functools.py @@ -2,7 +2,7 @@ def partial(func, *args, **kwargs): def _partial(*more_args, **more_kwargs): kw = kwargs.copy() kw.update(more_kwargs) - func(*(args + more_args), **kw) + return func(*(args + more_args), **kw) return _partial From e79cfcba4d745b1881b96fb7eb149d77edee7f97 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Wed, 26 Oct 2016 11:34:32 +0200 Subject: [PATCH 05/53] uasyncio: fix calls to log.warning() --- uasyncio/uasyncio/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/uasyncio/uasyncio/__init__.py b/uasyncio/uasyncio/__init__.py index 504f1e928..5d0c0e5c6 100644 --- a/uasyncio/uasyncio/__init__.py +++ b/uasyncio/uasyncio/__init__.py @@ -81,7 +81,7 @@ def read(self, n=-1): res = self.s.read(n) if res is not None: break - log.warn("Empty read") + log.warning("Empty read") if not res: yield IOReadDone(self.s) return res @@ -96,7 +96,7 @@ def readline(self): res = self.s.readline() if res is not None: break - log.warn("Empty read") + log.warning("Empty read") if not res: yield IOReadDone(self.s) if __debug__: From 99bccc91d924fd3c271dc9e624a02f7b89ef0f4c Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Wed, 26 Oct 2016 11:36:09 +0200 Subject: [PATCH 06/53] uasyncio.core: fix early return from wait() wait(delay) will return earlier than the specified delay if some I/O event occurs, in that case the old code would immediately run the task dequeue before calling wait(). After this commit if the task isn't due yet it gets re-queued and the queue is re-examined after wait() returns. --- uasyncio.core/uasyncio/core.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/uasyncio.core/uasyncio/core.py b/uasyncio.core/uasyncio/core.py index 87d0e605b..2286383c7 100644 --- a/uasyncio.core/uasyncio/core.py +++ b/uasyncio.core/uasyncio/core.py @@ -55,7 +55,9 @@ def run_forever(self): tnow = self.time() delay = t - tnow if delay > 0: + self.call_at(t, cb, *args) self.wait(delay) + continue else: self.wait(-1) # Assuming IO completion scheduled some tasks From 12f41c7e0bf3f7284705c1696c1d71cf805e8e31 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Thu, 18 Aug 2016 00:49:33 +0200 Subject: [PATCH 07/53] logging: add module level error() function --- logging/logging.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/logging/logging.py b/logging/logging.py index 1c3ef0d84..900ad0603 100644 --- a/logging/logging.py +++ b/logging/logging.py @@ -64,6 +64,9 @@ def info(msg, *args): def debug(msg, *args): getLogger(None).debug(msg, *args) +def error(msg, *args): + getLogger(None).error(msg, *args) + def basicConfig(level=INFO, filename=None, stream=None, format=None): global _level, _stream _level = level From fd1a13dc0a9b86dbdbdf4a03353a010c10fa1ba4 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Thu, 18 Aug 2016 00:50:30 +0200 Subject: [PATCH 08/53] uasyncio: add support for task cancellation --- uasyncio.core/uasyncio/core.py | 39 ++++++++++++++++++++++++++-------- uasyncio/uasyncio/__init__.py | 6 ++++++ 2 files changed, 36 insertions(+), 9 deletions(-) diff --git a/uasyncio.core/uasyncio/core.py b/uasyncio.core/uasyncio/core.py index 2286383c7..236cdfc1f 100644 --- a/uasyncio.core/uasyncio/core.py +++ b/uasyncio.core/uasyncio/core.py @@ -5,6 +5,8 @@ import uheapq as heapq import logging +class CancelledError(Exception): + pass log = logging.getLogger("asyncio") @@ -30,11 +32,11 @@ def call_soon(self, callback, *args): def call_later(self, delay, callback, *args): self.call_at(self.time() + delay, callback, *args) - def call_at(self, time, callback, *args): + def call_at(self, time, callback, *args, exc=None): # Including self.cnt is a workaround per heapq docs if __debug__: - log.debug("Scheduling %s", (time, self.cnt, callback, args)) - heapq.heappush(self.q, (time, self.cnt, callback, args)) + log.debug("Scheduling %s", (time, self.cnt, callback, args, exc)) + heapq.heappush(self.q, (time, self.cnt, callback, args, exc)) # print(self.q) self.cnt += 1 @@ -45,16 +47,28 @@ def wait(self, delay): log.debug("Sleeping for: %s", delay) time.sleep(delay) + def cancel(self, callback, exc = CancelledError): + _id = id(callback) + for idx, item in enumerate(self.q): + t, cnt, cb, args, exc = item + if id(cb) != _id: + continue + del self.q[idx] + heapq.heapify(self.q) + self.call_at(0, cb, *args, exc=exc) + return + self.remove_polled_cb(callback) + def run_forever(self): while True: if self.q: - t, cnt, cb, args = heapq.heappop(self.q) + t, cnt, cb, args, exc = heapq.heappop(self.q) if __debug__: - log.debug("Next coroutine to run: %s", (t, cnt, cb, args)) + log.debug("Next coroutine to run: %s", (t, cnt, cb, args, exc)) # __main__.mem_info() tnow = self.time() delay = t - tnow - if delay > 0: + if delay > 0 and not exc: self.call_at(t, cb, *args) self.wait(delay) continue @@ -62,14 +76,21 @@ def run_forever(self): self.wait(-1) # Assuming IO completion scheduled some tasks continue - if callable(cb): + # cancelled callbacks aren't called and nor rescheduled + if callable(cb) and not exc: cb(*args) else: delay = 0 try: if __debug__: - log.debug("Coroutine %s send args: %s", cb, args) - if args == (): + log.debug("Coroutine %s send args: %s, %s", cb, args, exc) + if exc: + try: + ret = cb.throw(exc) + except exc: + # ret == None reschedules a canceled task, next round it should raise StopIteration + ret = None + elif args == (): ret = next(cb) else: ret = cb.send(*args) diff --git a/uasyncio/uasyncio/__init__.py b/uasyncio/uasyncio/__init__.py index 5d0c0e5c6..244598e1e 100644 --- a/uasyncio/uasyncio/__init__.py +++ b/uasyncio/uasyncio/__init__.py @@ -11,6 +11,12 @@ def __init__(self): self.poller = select.poll() self.objmap = {} + def remove_polled_cb(self, _id): + for fd, cb in self.objmap.items(): + if id(cb) == _id: + self.poller.unregister(fd) + break + def add_reader(self, fd, cb, *args): if __debug__: log.debug("add_reader%s", (fd, cb, args)) From 08ccfa807e89db03c518e287ac91f8dd1ed4ef91 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Thu, 18 Aug 2016 10:53:28 +0200 Subject: [PATCH 09/53] uasyncio: WIP lots of fixes and changes needs splitting and documenting --- uasyncio.core/uasyncio/core.py | 33 +++++++----- uasyncio/uasyncio/__init__.py | 96 +++++++++++++++++++++------------- 2 files changed, 81 insertions(+), 48 deletions(-) diff --git a/uasyncio.core/uasyncio/core.py b/uasyncio.core/uasyncio/core.py index 236cdfc1f..84994944b 100644 --- a/uasyncio.core/uasyncio/core.py +++ b/uasyncio.core/uasyncio/core.py @@ -36,7 +36,7 @@ def call_at(self, time, callback, *args, exc=None): # Including self.cnt is a workaround per heapq docs if __debug__: log.debug("Scheduling %s", (time, self.cnt, callback, args, exc)) - heapq.heappush(self.q, (time, self.cnt, callback, args, exc)) + heapq.heappush(self.q, (time, self.cnt, callback, args, exc, False)) # print(self.q) self.cnt += 1 @@ -50,24 +50,30 @@ def wait(self, delay): def cancel(self, callback, exc = CancelledError): _id = id(callback) for idx, item in enumerate(self.q): - t, cnt, cb, args, exc = item + t, cnt, cb, args, _exc = item if id(cb) != _id: continue - del self.q[idx] - heapq.heapify(self.q) + if __debug__: + log.debug("Setting discard flag on: %s at index %d", (t, cnt, cb, args, _exc), idx) + self.q[idx] = t, cnt, cb, args, _exc, True self.call_at(0, cb, *args, exc=exc) - return self.remove_polled_cb(callback) def run_forever(self): while True: if self.q: - t, cnt, cb, args, exc = heapq.heappop(self.q) - if __debug__: - log.debug("Next coroutine to run: %s", (t, cnt, cb, args, exc)) -# __main__.mem_info() tnow = self.time() + if __debug__: + log.debug('*'*20+' sched step start at %s, num tasks in queue %d', tnow, len(self.q)) + t, cnt, cb, args, exc, discard = heapq.heappop(self.q) delay = t - tnow + if __debug__: + log.debug("Next coroutine to run in %s: %s", delay, (t, cnt, cb, args, exc)) + if discard: + if __debug__: + log.debug("Discarding: %s", (t, cnt, cb, args, exc, discard)) + continue +# __main__.mem_info() if delay > 0 and not exc: self.call_at(t, cb, *args) self.wait(delay) @@ -77,8 +83,9 @@ def run_forever(self): # Assuming IO completion scheduled some tasks continue # cancelled callbacks aren't called and nor rescheduled - if callable(cb) and not exc: - cb(*args) + if callable(cb): + if not exc: + cb(*args) else: delay = 0 try: @@ -111,9 +118,9 @@ def run_forever(self): self.add_writer(arg.fileno(), cb) continue elif isinstance(ret, IOReadDone): - self.remove_reader(arg.fileno()) + self.remove_reader(arg.fileno(), cb) elif isinstance(ret, IOWriteDone): - self.remove_writer(arg.fileno()) + self.remove_writer(arg.fileno(), cb) elif isinstance(ret, StopLoop): return arg elif isinstance(ret, type_gen): diff --git a/uasyncio/uasyncio/__init__.py b/uasyncio/uasyncio/__init__.py index 244598e1e..39f13b45a 100644 --- a/uasyncio/uasyncio/__init__.py +++ b/uasyncio/uasyncio/__init__.py @@ -11,55 +11,64 @@ def __init__(self): self.poller = select.poll() self.objmap = {} - def remove_polled_cb(self, _id): - for fd, cb in self.objmap.items(): - if id(cb) == _id: - self.poller.unregister(fd) - break + def _unregister_fd(self, fd): + self.objmap.pop(fd, None) + try: + self.poller.unregister(fd) + except OSError as e: + if e.args[0] != errno.ENOENT: + raise + + def remove_polled_cb(self, cb): + _id = id(cb) + for fd, cbs in self.objmap.items(): + cbs.pop(id(cb), None) + if not cbs: + self._unregister_fd(fd) def add_reader(self, fd, cb, *args): if __debug__: log.debug("add_reader%s", (fd, cb, args)) + cbs = self.objmap.setdefault(fd, {}) + self.poller.register(fd, select.POLLIN) if args: - self.poller.register(fd, select.POLLIN) - self.objmap[fd] = (cb, args) + cbs[id(cb)] = (cb, args) else: - self.poller.register(fd, select.POLLIN) - self.objmap[fd] = cb + cbs[id(cb)] = (cb, None) - def remove_reader(self, fd): + def remove_reader(self, fd, cb): if __debug__: - log.debug("remove_reader(%s)", fd) - self.poller.unregister(fd) - del self.objmap[fd] + log.debug("remove_reader(%s)", (fd, cb)) + cbs = self.objmap.get(fd, {}) + cbs.pop(id(cb), None) + if not cbs: + self._unregister_fd(fd) def add_writer(self, fd, cb, *args): if __debug__: log.debug("add_writer%s", (fd, cb, args)) + cbs = self.objmap.setdefault(fd, {}) + self.poller.register(fd, select.POLLOUT) if args: - self.poller.register(fd, select.POLLOUT) - self.objmap[fd] = (cb, args) + cbs[id(cb)] = (cb, args) else: - self.poller.register(fd, select.POLLOUT) - self.objmap[fd] = cb + cbs[id(cb)] = (cb, None) - def remove_writer(self, fd): + def remove_writer(self, fd, cb): if __debug__: log.debug("remove_writer(%s)", fd) - try: - self.poller.unregister(fd) - self.objmap.pop(fd, None) - except OSError as e: - # StreamWriter.awrite() first tries to write to an fd, - # and if that succeeds, yield IOWrite may never be called - # for that fd, and it will never be added to poller. So, - # ignore such error. - if e.args[0] != errno.ENOENT: - raise + cbs = self.objmap.get(fd, {}) + cbs.pop(id(cb), None) + if not cbs: + self._unregister_fd(fd) def wait(self, delay): if __debug__: - log.debug("epoll.wait(%d)", delay) + log.debug("epoll.wait(%s)", delay) + for fd, cbs in self.objmap.items(): + for cb, args in cbs.values(): + log.debug("epoll.registered(%d) %s", fd, (cb, args)) + # We need one-shot behavior (second arg of 1 to .poll()) if delay == -1: res = self.poller.poll(-1, 1) @@ -67,13 +76,30 @@ def wait(self, delay): res = self.poller.poll(int(delay * 1000), 1) #log.debug("epoll result: %s", res) for fd, ev in res: - cb = self.objmap[fd] + # Remove the registered callbacks dictionary from its parent + # so when callbacks are invoked they can add their registrations + # to a fresh dictionary. + cbs = self.objmap.pop(fd, {}) + if not cbs: + log.error("Event %d on fd %r but no callback registered", ev, fd) + continue if __debug__: - log.debug("Calling IO callback: %r", cb) - if isinstance(cb, tuple): - cb[0](*cb[1]) - else: - self.call_soon(cb) + s = '\n'.join(str(v) for v in cbs.values()) + log.debug("Matching IO callbacks for %r:\n%s", (fd, ev), s) + while cbs: + _id, data = cbs.popitem() + cb, args = data + if args is None: + if __debug__: + log.debug("Scheduling IO coro: %r", (fd, ev, cb)) + self.call_soon(cb) + else: + if __debug__: + log.debug("Calling IO callback: %r", (fd, ev, cb, args)) + cb(*args) + # If no callback registered an event for this fd unregister it + if not self.objmap.get(fd, None): + self._unregister_fd(fd) class StreamReader: From d011a316c7b465e7bbd535b603742a6d82cdd484 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Wed, 26 Oct 2016 15:12:21 +0200 Subject: [PATCH 10/53] os.linux: add time module --- os.linux/os/linux/time.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 os.linux/os/linux/time.py diff --git a/os.linux/os/linux/time.py b/os.linux/os/linux/time.py new file mode 100644 index 000000000..a8f8adefe --- /dev/null +++ b/os.linux/os/linux/time.py @@ -0,0 +1,27 @@ + +import ustruct +import ffilib +import os + + +TIMESPEC_FMT = 'll' +TIMESPEC_SIZE = ustruct.calcsize(TIMESPEC_FMT) + +CLOCK_MONOTONIC = 1 +CLOCK_MONOTONIC_RAW = 4 + +librt = ffilib.open('librt') +_clock_gettime = librt.func('i', 'clock_gettime', 'ip') +_ts_buf = bytearray(TIMESPEC_SIZE) + +def clock_gettime(clk_id): + e = _clock_gettime(clk_id, _ts_buf) + os.check_error(e) + s, ns = ustruct.unpack(TIMESPEC_FMT, _ts_buf) + return (s*10**9)+ns + +def monotime(): + return clock_gettime(CLOCK_MONOTONIC) + +def monotime_raw(): + return clock_gettime(CLOCK_MONOTONIC_RAW) From e6f01b3995f3352f86106dc67194490bb4d15e2f Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Wed, 26 Oct 2016 15:22:12 +0200 Subject: [PATCH 11/53] ubus: add package --- ubus/metadata.txt | 4 + ubus/setup.py | 17 + ubus/ubus/__init__.py | 655 ++++++++++++++++++++++++++++++++++++ ubus/ubus/uasyncio_utils.py | 141 ++++++++ 4 files changed, 817 insertions(+) create mode 100644 ubus/metadata.txt create mode 100644 ubus/setup.py create mode 100644 ubus/ubus/__init__.py create mode 100644 ubus/ubus/uasyncio_utils.py diff --git a/ubus/metadata.txt b/ubus/metadata.txt new file mode 100644 index 000000000..59295ca73 --- /dev/null +++ b/ubus/metadata.txt @@ -0,0 +1,4 @@ +srctype = micropython-lib +type = package +version = 0.0.1 +author = Delio Brignoli diff --git a/ubus/setup.py b/ubus/setup.py new file mode 100644 index 000000000..592002653 --- /dev/null +++ b/ubus/setup.py @@ -0,0 +1,17 @@ +import sys +# Remove current dir from sys.path, otherwise setuptools will peek up our +# module instead of system. +sys.path.pop(0) +from setuptools import setup + + +setup(name='micropython-ubus', + version='0.0.1', + description='ubus interface for MicroPython', + long_description="", + author='Delio Brignoli', + author_email='brignoli.delio@gmail.com', + maintainer='Delio Brignoli', + maintainer_email='brignoli.delio@gmail.com', + license='MIT', + packages=['ubus']) diff --git a/ubus/ubus/__init__.py b/ubus/ubus/__init__.py new file mode 100644 index 000000000..738cd025b --- /dev/null +++ b/ubus/ubus/__init__.py @@ -0,0 +1,655 @@ + +import os +import _ubus +import contextlib +import uasyncio +import logging +import functools +from . import uasyncio_utils + + +def check_error(retval): + if retval == 0: + return retval + elif retval < 0: + return os.check_error(retval) + else: + raise RuntimeError(retval) + + +_instances = {} + +def _complete_handler(deferred, _ctx, _req, _ret): + deferred.set_result(_ret) + + +def blob_len(blob_attr): + return (blob_attr.id_len & 0x00ffffff) - uct.sizeof(blob_attr) + + +def _req_data_accumulator(_list): + def _data_cb(_ctx, _req, _type, _msg): + _list.append(_msg) + return _data_cb + + +async def retry_ubus_func(peer, func, *args): + while True: + ret = func(*args) + if ret == _ubus.UBUS_STATUS_OK or peer.conn.up: + return ret + logging.info('retry_ubus_func() %s %s returned %s', func, args, ret) + await peer.conn.until_up() + + +async def call_once_conn_up(conn, func, *args): + while True: + try: + if not conn.up: + await conn.until_up() + return await func(*args) + except uasyncio.CancelledError: + continue + + +async def call_once_object_available(peer, obj, func, *args): + while True: + try: + await peer.waitfor_obj(obj) + return await func(*args) + except uasyncio.CancelledError: + continue + + +def process_recv_data(ctx, ev_loop, fd): + # process incoming messages + ctx.process_recv_data() + # re-arm poll fd + ev_loop.add_reader(fd, process_recv_data, ctx, ev_loop, fd) + + +class RemoteObjectRemoved(Exception): + pass + + +class UBusObjNotificationSubscriber: + def __init__(self, ubus_peer, obj_proxy, subscriber_obj): + self.peer = ubus_peer + self.obj_proxy = obj_proxy + self._subscribed = False + self._subscriber_obj = subscriber_obj + wq = uasyncio_utils.AFuture() + self.notification_wq = wq + self.obj_proxy._notification_wqs[wq] = wq + + async def __aenter__(self): + await self.setup() + return self + + async def __aexit__(self, exc_type, exc, tb): + self.teardown() + + async def setup(self): + return await call_once_object_available(self.peer, self.obj_proxy, self._setup) + + def teardown(self): + self.obj_proxy._notification_wqs.pop(self.notification_wq, None) + + async def _setup(self): + if self.notification_wq.cancelled or not self._subscribed: + ctx = self.peer.conn.ctx + # (re)subscribe to notification by object id + ret = ctx.register_subscriber(self._subscriber_obj) + if ret in (_ubus.UBUS_STATUS_CONNECTION_FAILED, _ubus.UBUS_STATUS_TIMEOUT, _ubus.UBUS_STATUS_NOT_FOUND): + logging.error('register_subscriber() ret %s', ret) + raise uasyncio.CancelledError + check_error(ret) + ret = ctx.subscribe(self._subscriber_obj, self.obj_proxy.obj_id) + if ret in (_ubus.UBUS_STATUS_CONNECTION_FAILED, _ubus.UBUS_STATUS_TIMEOUT, _ubus.UBUS_STATUS_NOT_FOUND): + logging.error('ctx.subscribe() ret %s', ret) + raise uasyncio.CancelledError + check_error(ret) + self.notification_wq.reset() + self._subscribed = True + + async def next_notification(self): + if not self.notification_wq.cancelled and self.notification_wq.has_result(): + return self.notification_wq.result_nowait() + # Subscribe if not subscribed + if self.notification_wq.cancelled or not self._subscribed: + await self.setup() + # Wait for next notification + return await self.notification_wq.result() + + +class UBusObjProxy: + def __init__(self, ubus_peer, obj_path, obj_id, type_id): + self.peer = ubus_peer + self.path = obj_path + self.type_id = type_id + self.obj_id = obj_id + self._subscribed = False + self._subscriber_obj = _ubus.subscriber(self._handle_obj_notification) + self._cancel_on_remove = {} + self._notification_wqs = {} + + def is_stale(self): + """Return True if the remote object was removed at some point. + + This is useful to know in advance if a remote call may wait, possibly + forever, for an object to become available again. + """ + return not self.peer.obj_cache.is_obj_cached(self) + + def update_id(self, obj_path, obj_id, type_id): + if self.path != obj_path: + raise ValueError('Identity of proxy cannot change {} -> {}'.format(self.path, obj_path)) + self.obj_id = obj_id + self.type_id = type_id + + # cache known objects by proxy_obj's reference (.id() not ubus_id) + # a proxy_obj's path is what identifies the object globally + # so when a call is placed if the object .id() is not present it means + # the object went away and we should wait for its *path* to come back + # once the path is available again the new ubus_id is assigned to the + # existing proxy_obj instance. If type_id has changed an exception is raised. + + def cancel_on_remove_nocontext(self, future): + if self.is_stale(): + future.cancel() + else: + if future not in self._cancel_on_remove: + self._cancel_on_remove[future] = future + + def cancel_on_remove_discard(self, future): + self._cancel_on_remove.pop(future, None) + + @contextlib.contextmanager + def cancel_on_remove(self, future): + try: + self.cancel_on_remove_nocontext(future) + yield + finally: + self._cancel_on_remove.pop(future, None) + + def removed(self): + for future in self._cancel_on_remove.values(): + future.cancel() + self._cancel_on_remove.clear() + # Cancelled but not removed + for future in self._notification_wqs.values(): + future.cancel() + + async def invoke_method(self, method, data=None): + try: + return await call_once_object_available(self.peer, self, self._invoke_method, method, data) + finally: + self.peer.conn.ctx.process_pending() + + async def _invoke_method(self, method, data): + conn = self.peer.conn + ctx = self.peer.conn.ctx + # FIXME: turn _complete_handler into a lambda + with uasyncio_utils.AsyncCallback(_complete_handler) as req, \ + self.cancel_on_remove(req): + try: + res = [] + _req = _ubus.request(req.cb, _req_data_accumulator(res)) + ret = ctx.invoke_async(self.obj_id, method, data, _req) + if ret in (_ubus.UBUS_STATUS_CONNECTION_FAILED, _ubus.UBUS_STATUS_TIMEOUT, _ubus.UBUS_STATUS_NOT_FOUND): + raise uasyncio.CancelledError + check_error(ret) + ctx.complete_request_async(_req) + status = await req.done() + return (status, res, -1) + except: + ctx.abort_request(_req) + raise + + # Notifications + + def _handle_obj_notification(self, _ctx, _sub, notification, msg): + for wq in self._notification_wqs: + wq.set_result((self, notification, [msg])) + + def notification_subscriber(self): + return UBusObjNotificationSubscriber(self.peer, self, self._subscriber_obj) + + +class UBusObjInstance: + def __init__(self, peer, ubus_obj_name, dispatch_func): + self.peer = peer + self._obj = _ubus.object(ubus_obj_name, dispatch_func) + + async def publish(self): + return await call_once_conn_up(self.peer.conn, self._publish) + + async def _publish(self): + ret = self.peer.conn.ctx.add_object(self._obj) + if ret in (_ubus.UBUS_STATUS_CONNECTION_FAILED, _ubus.UBUS_STATUS_TIMEOUT): + raise uasyncio.CancelledError + check_error(ret) + return ret + + async def withdraw(self): + return await call_once_conn_up(self.peer.conn, self._withdraw) + + async def _withdraw(self): + ret = self.peer.conn.ctx.remove_object(self._obj) + if ret in (_ubus.UBUS_STATUS_CONNECTION_FAILED, _ubus.UBUS_STATUS_TIMEOUT): + raise uasyncio.CancelledError + check_error(ret) + return ret + + def notify_subscribers(self, notification, msg): + ret = self.peer.conn.ctx.notify(self._obj, notification, msg) + check_error(ret) + + async def wait_request(self, payload): + pass + + async def send_reply(self, request, payload): + pass + + +class UBusObjProxyCache: + def __init__(self): + self._obj_by_path = {} + self._ubusid_by_obj = {} + self._obj_by_id = {} + + def is_obj_cached(self, proxy_obj): + return proxy_obj in self._ubusid_by_obj + + def lookup(self, path): + return self._obj_by_path.get(path, None) + + def lookup_id(self, _id): + return self._obj_by_id.get(_id, None) + + def add(self, num_id, path, obj): + assert path not in self._obj_by_path + self._obj_by_path[path] = obj + assert obj not in self._ubusid_by_obj + self._ubusid_by_obj[obj] = num_id + assert id not in self._obj_by_id + self._obj_by_id[num_id] = obj + + def remove(self, path): + obj = self._obj_by_path.get(path, None) + if obj: + del self._obj_by_path[path] + del self._ubusid_by_obj[obj] + del self._obj_by_id[obj.obj_id] + obj.removed() + return obj + + def flush(self): + for obj in self._obj_by_path.values(): + obj.removed() + self._obj_by_path.clear() + self._ubusid_by_obj.clear() + self._obj_by_id.clear() + + +class UBusConnection: + def __init__(self, socket_path, peer_disconnect_cb, obj_event_handler=None): + self.path = socket_path + self.ctx = _ubus.ctx() + self.up = False + self.established = uasyncio_utils.AFuture() + self.peer_disconnect_cb = peer_disconnect_cb + self.lost = uasyncio_utils.AsyncCallback(self._disconnect_cb) + self.lost.set_result(True) + self.obj_event_handler = obj_event_handler + self._cancel_on_disconnect = {} + + def _disconnect_cb(self, deferred, _ctx): + # Hold everyone up until we are connected again + self.up = False + deferred.set_result(True) + self.established.reset() + self.peer_disconnect_cb() + for future in self._cancel_on_disconnect.values(): + future.cancel() + self._cancel_on_disconnect.clear() + + async def until_up(self): + return await self.established.result(consume=False) + + async def until_down(self): + return await self.lost.result(consume=False) + + def try_connect(self): + ctx = self.ctx + path = self.path + ev_loop = uasyncio.get_event_loop() + try: + ret = ctx.connect(path, self.lost.cb) + check_error(ret) + if self.obj_event_handler: + ret = ctx.register_event_handler(self.obj_event_handler, 'ubus.object.add') + check_error(ret) + ret = ctx.register_event_handler(self.obj_event_handler, 'ubus.object.remove') + check_error(ret) + logging.info('connecting to {} succeded'.format(path)) + except Exception as e: + logging.error('connecting to {} failed: {}'.format(path, e)) + raise + # Setup I/O callback + ev_loop = uasyncio.get_event_loop() + fd = ctx.fileno() + ev_loop.add_reader(fd, process_recv_data, ctx, ev_loop, fd) + # Signal we are connected + self.up = True + self.established.set_result(True) + self.lost.reset() + + async def _retry_connect(self, retry_interval): + while True: + with contextlib.suppress(Exception): + self.try_connect() + break + await uasyncio.sleep(retry_interval) + return True + + async def connect_and_maintain(self, retry_interval=1): + ctx = self.ctx + path = self.path + connection_lost = self.lost.result + ev_loop = uasyncio.get_event_loop() + try: + while True: + if not self.up: + # Keep trying to reconnect until we succeed + await self._retry_connect(retry_interval) + # Wait until connection is severed + if not await connection_lost(): + continue + logging.info('disconnected from {}'.format(path)) + # Stop polling the socket's file descriptor + ev_loop.remove_reader(ctx.fileno(), process_recv_data) + finally: + with contextlib.suppress(KeyError): + uasyncio.get_event_loop().remove_reader(ctx.fileno(), process_recv_data) + if self.obj_event_handler: + ctx.unregister_event_handler(self.obj_event_handler) + ctx.shutdown() + self.established.reset() + + def cancel_on_disconnect_nocontext(self, future): + if not self.up: + future.cancel() + else: + if future not in self._cancel_on_disconnect: + self._cancel_on_disconnect[future] = future + + def cancel_on_disconnect_discard(self, future): + self._cancel_on_disconnect.pop(future, None) + + @contextlib.contextmanager + def cancel_on_disconnect(self, future): + try: + self.cancel_on_disconnect_nocontext(future) + yield + finally: + self._cancel_on_disconnect.pop(future, None) + + +class UBusEventSubscriber: + def __init__(self, peer): + self.peer = peer + self._ev_handler = _ubus.event_handler(self._ev_callback) + self.patterns = [] + self.ev_q = uasyncio_utils.AFuture() + peer.conn.cancel_on_disconnect_nocontext(self.ev_q) + + def _ev_callback(self, _ctx, _ev, ev, msg): + self.ev_q.set_result((ev, msg)) + + async def _refresh_registrations(self): + for pattern in self.patterns: + ret = self.peer.conn.ctx.register_event_handler(self._ev_handler, pattern) + if ret in (_ubus.UBUS_STATUS_CONNECTION_FAILED, _ubus.UBUS_STATUS_TIMEOUT): + raise uasyncio.CancelledError + check_error(ret) + + async def register_pattern(self, pattern): + return await call_once_conn_up(self.peer.conn, self._register_pattern, pattern) + + async def _register_pattern(self, pattern): + if pattern in self.patterns: + return + conn = self.peer.conn + conn.cancel_on_disconnect_nocontext(self.ev_q) + if self.ev_q.cancelled: + await self._refresh_registrations() + self.ev_q.reset() + conn.cancel_on_disconnect_nocontext(self.ev_q) + ret = conn.ctx.register_event_handler(self._ev_handler, pattern) + if ret in (_ubus.UBUS_STATUS_CONNECTION_FAILED, _ubus.UBUS_STATUS_TIMEOUT): + raise uasyncio.CancelledError + check_error(ret) + self.patterns.append(pattern) + + async def event(self): + return await call_once_conn_up(self.peer.conn, self._event) + + async def _event(self): + if not self.ev_q.cancelled and self.ev_q.has_result(): + return self.ev_q.result_nowait() + conn = self.peer.conn + conn.cancel_on_disconnect_nocontext(self.ev_q) + if self.ev_q.cancelled: + await self._refresh_registrations() + self.ev_q.reset() + conn.cancel_on_disconnect_nocontext(self.ev_q) + return await self.ev_q.result() + + def close(self): + self.patterns.clear() + self.peer.conn.cancel_on_disconnect_discard(self.ev_q) + ret = self.peer.conn.ctx.unregister_event_handler(self._ev_handler) + # Ignore failures to unregister to avoid errors on shutdown + if ret in (_ubus.UBUS_STATUS_CONNECTION_FAILED, _ubus.UBUS_STATUS_TIMEOUT): + return + check_error(ret) + + +class UBusPeer: + def __init__(self, socket_path): + ev = _ubus.event_handler(self._handle_obj_event) + self.conn = UBusConnection(socket_path, self._disconnect_cb, obj_event_handler=ev) + self.obj_cache = UBusObjProxyCache() + self._run_task = None + self._waiting_objs = {} + + # Peer lifecycle management + + def __enter__(self): + self.conn.try_connect() + self.run() + return self + + def __exit__(self, type, value, tb): + self.shutdown() + + async def __aenter__(self): + self.run() + await self.conn.until_up() + return self + + async def __aexit__(self, type, value, tb): + self.shutdown() + + def try_connect(self): + self.conn.try_connect() + + def run(self, connect_retry_interval=1): + ubus_connect = self.conn.connect_and_maintain(retry_interval=connect_retry_interval) + self._run_task = uasyncio.ensure_future(ubus_connect) + + def shutdown(self): + if self._run_task: + self._run_task.close() + + def _disconnect_cb(self): + # Make all circulating proxy objs stale + self.obj_cache.flush() + # Wake up anyone waiting for a path with an exception + for wq_list in self._waiting_objs.values(): + for wq in wq_list: + wq.cancel() + self._waiting_objs.clear() + + # Object lookup and caching + + def _handle_obj_event(self, _ctx, _ev, ev, msg): + attrs = dict(_ubus.blob_decode(msg)) + path = attrs['path'] + if ev == 'ubus.object.add': + self._process_obj_added(path) + elif ev == 'ubus.object.remove': + self._process_obj_removed(path) + else: + logging.error('Unexpected object event: %s', attrs) + + def _process_obj_added(self, path): + # FIXME: error on adding a wait queue when the connection is down + wait_queues = self._waiting_objs.get(path, ()) + for q in wait_queues: + q.set_result(path) + if wait_queues: + del self._waiting_objs[path] + + def _process_obj_removed(self, path): + self.obj_cache.remove(path) + + async def _lookup(self, path): + if path.endswith('*'): + raise ValueError('wildcard lookup not implemented') + lookup_cb = lambda f,r:f.set_result(r) + conn = self.conn + with uasyncio_utils.AsyncCallback(lookup_cb) as lookup_future, \ + conn.cancel_on_disconnect(lookup_future): + ret = conn.ctx.lookup(path, lookup_future.cb) + if ret == _ubus.UBUS_STATUS_CONNECTION_FAILED: + raise uasyncio.CancelledError + if ret == _ubus.UBUS_STATUS_NOT_FOUND: + return None + check_error(ret) + return await lookup_future.result() + + async def lookup(self, path, proxy_factory = UBusObjProxy): + cached = self.obj_cache.lookup(path) + if cached: + return cached + # not cached, look it up on the server + res = await call_once_conn_up(self.conn, self._lookup, path) + if not res: + return res + # create proxy object and cache it + o = proxy_factory(self, *res) + path, num_id, type_id = res + self.obj_cache.add(num_id, path, o) + return o + + # Event management + async def send_event(self, event, payload): + try: + return await call_once_conn_up(self.conn, self._send_event, event, payload) + finally: + self.conn.ctx.process_pending() + + async def _send_event(self, event, payload): + conn = self.conn + ctx = self.conn.ctx + ret = ctx.send_event(event, payload) + if ret == _ubus.UBUS_STATUS_CONNECTION_FAILED: + raise uasyncio.CancelledError + check_error(ret) + + @contextlib.contextmanager + def event_subscriber(self): + ev_sub = UBusEventSubscriber(self) + try: + yield ev_sub + finally: + ev_sub.close() + + async def waitfor_paths(self, obj_paths): + return await call_once_conn_up(self.conn, self._waitfor_paths, obj_paths) + + async def _waitfor_paths(self, obj_paths): + conn = self.conn + with uasyncio_utils.AFuture() as wq, \ + conn.cancel_on_disconnect(wq): + for path in obj_paths: + wq_set = self._waiting_objs.setdefault(path, set()) + wq_set.add(wq) + outstanding = set(obj_paths) + for path in obj_paths: + res = await self._lookup(path) + if res: + assert res[0] == path + outstanding.remove(path) + self._waiting_objs.get(path, set()).discard(wq) + while outstanding: + path = await wq.result() + outstanding.discard(path) + return obj_paths + + async def waitfor_obj(self, obj_proxy): + return await call_once_conn_up(self.conn, self._waitfor_obj, obj_proxy) + + async def _waitfor_obj(self, obj_proxy): + while not self.obj_cache.is_obj_cached(obj_proxy): + res = await self._lookup(obj_proxy.path) + if not res: + await self._waitfor_paths([obj_proxy.path]) + continue + obj_proxy.update_id(*res) + path, num_id, type_id = res + self.obj_cache.add(num_id, path, obj_proxy) + break + return obj_proxy + + # Local object creation + + def object(self, ubus_obj_name, dispatch_func): + return UBusObjInstance(self, ubus_obj_name, dispatch_func) + + # Remote method invokation is handled by the obj proxy + + +def peer(socket_path=None): + """ + >>> p.peer() + >>> try: + >>> p.startup() + >>> # do something with p + >>> finally: + >>> p.shutdown() + + or + + >>> with peer() as p: + >>> # do something with p + >>> + """ + if socket_path not in _instances: + _instances[socket_path] = UBusPeer(socket_path) + return _instances[socket_path] + + +async def connected_peer(socket_path=None): + """ + >>> try: + >>> p = await connected_peer() + >>> # do something with p + >>> finally: + >>> p.shutdown() + """ + p = peer(socket_path=socket_path).startup() + await p.conn.until_up() + return p diff --git a/ubus/ubus/uasyncio_utils.py b/ubus/ubus/uasyncio_utils.py new file mode 100644 index 000000000..00f519f0d --- /dev/null +++ b/ubus/ubus/uasyncio_utils.py @@ -0,0 +1,141 @@ + +import os +import ffi +import uasyncio +import functools +import logging +from collections.deque import deque + + +class TimeoutError(Exception): + pass + + +class ATimeout: + def __init__(self, task, timeout_seconds, exc = TimeoutError): + self._task = task + self._timeout_task = self._fire(exc) + self._timeout = timeout_seconds + + def __enter__(self): + return self.start() + + def __exit__(self, type, value, tb): + self.cancel() + + def start(self): + uasyncio.get_event_loop().call_later(self._timeout, self._timeout_task) + return self + + def cancel(self): + self._timeout_task.close() + + async def run(self): + self.start() + res = await self._task + self.cancel() + return res + + async def _fire(self, exc): + self._task.throw(exc) + + +class AFuture: + def __init__(self, coro = None): + self._coro = coro + self.cancelled = False + self.closed = False + self._result = deque() + _in, _out = os.pipe() + self._in, self._out = open(_in, 'rb'), open(_out, 'wb') + self.done = self.result + self.waiting = 0 + self.pending_bytes = 0 + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.close() + + async def task(self): + try: + res = await self._coro + self.set_result(res) + except Exception as e: + self.set_result(e) + raise + + def cancel(self): + if self.closed: + if self.waiting: + raise RuntimeError('Canceling a closed future %s with %d waiting tasks', self, self.waiting) + return + if not self.cancelled: + self.cancelled = True + self._wake_waiting() + if self._coro: + uasyncio.get_event_loop().cancel(self._coro) + + def close(self): + if self.waiting: + logging.warning('Closing %s with %d waiting tasks', self, self.waiting) + if not self.closed: + self.cancel() + self.reset() + self._in.close() + self._out.close() + self.closed = True + + def has_result(self): + return len(self._result) > 0 + + def reset(self): + if self.closed: + raise ValueError('reset() on a closed future') + self._in.read(self.pending_bytes) + self.pending_bytes = 0 + self.cancelled = False + # No clear() in upy's ucollections implementation! + while len(self._result): + self._result.pop() + + def _wake_waiting(self): + self.pending_bytes += 1 + self._out.write('-') + + + def set_result(self, result): + #if self.cancelled or self.closed: + # raise RuntimeError('set_result() with cancelled %s and closed %s'.format(self.cancelled, self.closed)) + self._result.append(result) + # Signal result is available + self._wake_waiting() + + def result_nowait(self, consume=True, flush=False): + # No subscripting in upy's ucollections implementation, so need to pop + # then append :( + res = self._result.popleft() + if flush: + self.reset() + elif not consume: + self._result.appendleft(res) + return res + + async def result(self, consume=True, flush=False): + self.waiting += 1 + while not len(self._result) and not self.cancelled: + yield uasyncio.IORead(self._in) + if consume: + self._in.read(1) + self.pending_bytes -= 1 + self.waiting -= 1 + if self.cancelled: + raise uasyncio.CancelledError + return self.result_nowait(consume=consume, flush=flush) + + +class AsyncCallback(AFuture): + def __init__(self, _callable): + super().__init__() + self.cb = functools.partial(_callable, self) From 25fb042c3d0095d49e5aa12603f6bac09da641d7 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Fri, 16 Dec 2016 16:50:58 +0100 Subject: [PATCH 12/53] os.linux: add clock_gettimeofday() to time module --- os.linux/os/linux/time.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/os.linux/os/linux/time.py b/os.linux/os/linux/time.py index a8f8adefe..694bf2322 100644 --- a/os.linux/os/linux/time.py +++ b/os.linux/os/linux/time.py @@ -6,13 +6,26 @@ TIMESPEC_FMT = 'll' TIMESPEC_SIZE = ustruct.calcsize(TIMESPEC_FMT) +TIMEVAL_FMT = TIMESPEC_FMT +TIMEVAL_SIZE = ustruct.calcsize(TIMEVAL_FMT) CLOCK_MONOTONIC = 1 CLOCK_MONOTONIC_RAW = 4 librt = ffilib.open('librt') _clock_gettime = librt.func('i', 'clock_gettime', 'ip') +_gettimeofday = librt.func('i', 'gettimeofday', 'ip') _ts_buf = bytearray(TIMESPEC_SIZE) +_tv_buf = bytearray(TIMEVAL_SIZE) + +def clock_gettimeofday(clk_id): + e1 = _clock_gettime(clk_id, _ts_buf) + e2 = _gettimeofday(_tv_buf, None) + os.check_error(e1) + os.check_error(e2) + s, ns = ustruct.unpack(TIMESPEC_FMT, _ts_buf) + utc_s, utc_us = ustruct.unpack(TIMEVAL_FMT, _tv_buf) + return s, ns, utc_s, utc_us def clock_gettime(clk_id): e = _clock_gettime(clk_id, _ts_buf) From df2ccad599d3b0171f1be79d2ba75dff1c5ab4ac Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Mon, 5 Feb 2018 21:07:23 +0200 Subject: [PATCH 13/53] uasyncio: benchmark/boom_uasyncio.py: More assert output. --- uasyncio/benchmark/boom_uasyncio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uasyncio/benchmark/boom_uasyncio.py b/uasyncio/benchmark/boom_uasyncio.py index def5bfb19..9f2654aaf 100644 --- a/uasyncio/benchmark/boom_uasyncio.py +++ b/uasyncio/benchmark/boom_uasyncio.py @@ -19,7 +19,7 @@ def validate(resp): no = int(l.split()[1]) seen.append(no) c = t.count(l + "\r\n") - assert c == 400101 + assert c == 400101, str(c) assert t.endswith("=== END ===") cnt += 1 From 1b3be0f8a834cee3513c783912b24bf6eb702e29 Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Wed, 7 Feb 2018 00:06:10 +0200 Subject: [PATCH 14/53] uasyncio.core: Switch to separate run and wait queues. Instead of using single priority queue for all tasks, split into using "run queue", which represents tasks not waiting until specific time, which should be run on every (well, next) loop iteration, and wait queue, still a priority queue. Run queue is a simple FIFO, implemented by ucollections.deque, recently introduced in pfalcon/micropython. Thus, there's minimal storage overhead and intrinsic scheduling fairness. Generally, run queue should hold both a callback/coro and its arguments, but as we don't feed any send args into coros still, it's optimized to hold just 1 items for coros, while 2 for callbacks. Introducing run queue will also allow to get rid of tie-breaking counter in utimeq implementation, which was introduced to enforce fair scheduling. It's no longer needed, as all tasks which should be run at given time are batch-removed from wait queue and batch-inserted into run queue. So, they may be executed not in the order scheduled (due to non-stable order of heap), but the whole batch will be executed "atomically", and any new schedulings from will be processed no earlier than next loop iteration. --- uasyncio.core/uasyncio/core.py | 101 ++++++++++++++++++++------------- 1 file changed, 62 insertions(+), 39 deletions(-) diff --git a/uasyncio.core/uasyncio/core.py b/uasyncio.core/uasyncio/core.py index 274883a7f..77fdb7a27 100644 --- a/uasyncio.core/uasyncio/core.py +++ b/uasyncio.core/uasyncio/core.py @@ -1,5 +1,6 @@ import utime as time import utimeq +import ucollections type_gen = type((lambda: (yield))()) @@ -25,8 +26,9 @@ class TimeoutError(CancelledError): class EventLoop: - def __init__(self, len=42): - self.q = utimeq.utimeq(len) + def __init__(self, runq_len=16, waitq_len=16): + self.runq = ucollections.deque((), runq_len, True) + self.waitq = utimeq.utimeq(waitq_len) # Current task being run. Task is a top-level coroutine scheduled # in the event loop (sub-coroutines executed transparently by # yield from/await, event loop "doesn't see" them). @@ -41,18 +43,24 @@ def create_task(self, coro): # CPython asyncio incompatibility: we don't return Task object def call_soon(self, callback, *args): - self.call_at_(self.time(), callback, args) + if __debug__ and DEBUG: + log.debug("Scheduling in runq: %s", (callback, args)) + self.runq.append(callback) + if not isinstance(callback, type_gen): + self.runq.append(args) def call_later(self, delay, callback, *args): self.call_at_(time.ticks_add(self.time(), int(delay * 1000)), callback, args) def call_later_ms(self, delay, callback, *args): + if not delay: + return self.call_soon(callback, *args) self.call_at_(time.ticks_add(self.time(), delay), callback, args) def call_at_(self, time, callback, args=()): if __debug__ and DEBUG: - log.debug("Scheduling %s", (time, callback, args)) - self.q.push(time, callback, args) + log.debug("Scheduling in waitq: %s", (time, callback, args)) + self.waitq.push(time, callback, args) def wait(self, delay): # Default wait implementation, to be overriden in subclasses @@ -64,45 +72,45 @@ def wait(self, delay): def run_forever(self): cur_task = [0, 0, 0] while True: - if self.q: - # wait() may finish prematurely due to I/O completion, - # and schedule new, earlier than before tasks to run. - while 1: - t = self.q.peektime() - tnow = self.time() - delay = time.ticks_diff(t, tnow) - if delay < 0: - delay = 0 - # Always call wait(), to give a chance to I/O scheduling - self.wait(delay) - if delay == 0: - break - - self.q.pop(cur_task) - t = cur_task[0] - cb = cur_task[1] - args = cur_task[2] + # Expire entries in waitq and move them to runq + tnow = self.time() + while self.waitq: + t = self.waitq.peektime() + delay = time.ticks_diff(t, tnow) + if delay > 0: + break + self.waitq.pop(cur_task) + if __debug__ and DEBUG: + log.debug("Moving from waitq to runq: %s", cur_task[1]) + self.call_soon(cur_task[1], *cur_task[2]) + + # Process runq + l = len(self.runq) + if __debug__ and DEBUG: + log.debug("Entries in runq: %d", l) + while l: + cb = self.runq.popleft() + l -= 1 + args = () + if not isinstance(cb, type_gen): + args = self.runq.popleft() + l -= 1 + if __debug__ and DEBUG: + log.info("Next callback to run: %s", (cb, args)) + cb(*args) + continue + if __debug__ and DEBUG: - log.debug("Next coroutine to run: %s", (t, cb, args)) + log.info("Next coroutine to run: %s", (cb, args)) self.cur_task = cb -# __main__.mem_info() - else: - self.wait(-1) - # Assuming IO completion scheduled some tasks - continue - if callable(cb): - cb(*args) - else: delay = 0 try: - if __debug__ and DEBUG: - log.debug("Coroutine %s send args: %s", cb, args) - if args == (): + if args is (): ret = next(cb) else: ret = cb.send(*args) if __debug__ and DEBUG: - log.debug("Coroutine %s yield result: %s", cb, ret) + log.info("Coroutine %s yield result: %s", cb, ret) if isinstance(ret, SysCall1): arg = ret.arg if isinstance(ret, SleepMs): @@ -147,7 +155,22 @@ def run_forever(self): # Currently all syscalls don't return anything, so we don't # need to feed anything to the next invocation of coroutine. # If that changes, need to pass that value below. - self.call_later_ms(delay, cb) + if delay: + self.call_later_ms(delay, cb) + else: + self.call_soon(cb) + + # Wait until next waitq task or I/O availability + delay = 0 + if not self.runq: + delay = -1 + if self.waitq: + tnow = self.time() + t = self.waitq.peektime() + delay = time.ticks_diff(t, tnow) + if delay < 0: + delay = 0 + self.wait(delay) def run_until_complete(self, coro): def _run_and_stop(): @@ -195,10 +218,10 @@ class IOWriteDone(SysCall1): _event_loop = None _event_loop_class = EventLoop -def get_event_loop(len=42): +def get_event_loop(runq_len=16, waitq_len=16): global _event_loop if _event_loop is None: - _event_loop = _event_loop_class(len) + _event_loop = _event_loop_class(runq_len, waitq_len) return _event_loop def sleep(secs): From 9fe2adb9402e56e088abb95d649bd8c0e6a1a310 Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Wed, 7 Feb 2018 00:07:12 +0200 Subject: [PATCH 15/53] uasyncio.core: test_full_wait: Update for runq/waitq refactor. --- uasyncio.core/test_full_wait.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uasyncio.core/test_full_wait.py b/uasyncio.core/test_full_wait.py index 8954a9151..17af6f26d 100644 --- a/uasyncio.core/test_full_wait.py +++ b/uasyncio.core/test_full_wait.py @@ -47,4 +47,4 @@ def cb_2nd(): print(loop.msgs) # .wait() is now called on each loop iteration, and for our mock case, it means that # at the time of running, self.time() will be skewed by 100 virtual time units. -assert loop.msgs == ['I should be run first, time: 200', 'I should be run second, time: 600'], str(loop.msgs) +assert loop.msgs == ['I should be run first, time: 100', 'I should be run second, time: 500'], str(loop.msgs) From 6722d725afad5035e7eeaaa94578086cd0bf38ba Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Wed, 7 Feb 2018 00:11:31 +0200 Subject: [PATCH 16/53] uasyncio: Update __init__() to take runq_len & waitq_len params. --- uasyncio/uasyncio/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/uasyncio/uasyncio/__init__.py b/uasyncio/uasyncio/__init__.py index e26757a25..41fa57259 100644 --- a/uasyncio/uasyncio/__init__.py +++ b/uasyncio/uasyncio/__init__.py @@ -17,8 +17,8 @@ def set_debug(val): class PollEventLoop(EventLoop): - def __init__(self, len=42): - EventLoop.__init__(self, len) + def __init__(self, runq_len=16, waitq_len=16): + EventLoop.__init__(self, runq_len, waitq_len) self.poller = select.poll() self.objmap = {} From fb293feb5a663089f74ea15cf0ec18956ad65427 Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Wed, 7 Feb 2018 00:16:03 +0200 Subject: [PATCH 17/53] uasyncio.core: Release 2.0. --- uasyncio.core/metadata.txt | 2 +- uasyncio.core/setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/uasyncio.core/metadata.txt b/uasyncio.core/metadata.txt index 6d40c7a33..21d581668 100644 --- a/uasyncio.core/metadata.txt +++ b/uasyncio.core/metadata.txt @@ -1,6 +1,6 @@ srctype = micropython-lib type = package -version = 1.7.2 +version = 2.0 author = Paul Sokolovsky desc = Lightweight asyncio-like library for MicroPython, built around native Python coroutines. (Core event loop). long_desc = Lightweight asyncio-like library for MicroPython, built around native Python coroutines. (Core event loop). diff --git a/uasyncio.core/setup.py b/uasyncio.core/setup.py index 3235a6dcc..d7c468099 100644 --- a/uasyncio.core/setup.py +++ b/uasyncio.core/setup.py @@ -7,7 +7,7 @@ import sdist_upip setup(name='micropython-uasyncio.core', - version='1.7.2', + version='2.0', description='Lightweight asyncio-like library for MicroPython, built around native Python coroutines. (Core event loop).', long_description='Lightweight asyncio-like library for MicroPython, built around native Python coroutines. (Core event loop).', url='https://github.com/micropython/micropython-lib', From 8079e827f7265a7ce2a381acd9436255676ae79c Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Wed, 7 Feb 2018 00:18:20 +0200 Subject: [PATCH 18/53] uasyncio: Release 2.0. --- uasyncio/metadata.txt | 2 +- uasyncio/setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/uasyncio/metadata.txt b/uasyncio/metadata.txt index 68ceb4b92..c0cbd68bf 100644 --- a/uasyncio/metadata.txt +++ b/uasyncio/metadata.txt @@ -1,6 +1,6 @@ srctype = micropython-lib type = package -version = 1.4.2 +version = 2.0 author = Paul Sokolovsky desc = Lightweight asyncio-like library for MicroPython, built around native Python coroutines. long_desc = README.rst diff --git a/uasyncio/setup.py b/uasyncio/setup.py index 6dc73ce43..8bb6fa91b 100644 --- a/uasyncio/setup.py +++ b/uasyncio/setup.py @@ -7,7 +7,7 @@ import sdist_upip setup(name='micropython-uasyncio', - version='1.4.2', + version='2.0', description='Lightweight asyncio-like library for MicroPython, built around native Python coroutines.', long_description=open('README.rst').read(), url='https://github.com/micropython/micropython-lib', From 16273dfa0c16c83ee80617758ac4bbe6dbfb9366 Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Thu, 22 Feb 2018 10:37:22 +0200 Subject: [PATCH 19/53] uasyncio.udp: Remove optional flags value in a call to usocket.sendto(). --- uasyncio.udp/uasyncio/udp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uasyncio.udp/uasyncio/udp.py b/uasyncio.udp/uasyncio/udp.py index dfa4f879d..5987bf7d2 100644 --- a/uasyncio.udp/uasyncio/udp.py +++ b/uasyncio.udp/uasyncio/udp.py @@ -45,7 +45,7 @@ def recvfrom(s, n): def sendto(s, buf, addr=None): while 1: - res = s.sendto(buf, 0, addr) + res = s.sendto(buf, addr) #print("send res:", res) if res == len(buf): return From f1355c0472037fc9c40f22dca38e51baba28a1a8 Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Thu, 22 Feb 2018 10:38:17 +0200 Subject: [PATCH 20/53] uasyncio.udp: Release 0.1.1. --- uasyncio.udp/metadata.txt | 2 +- uasyncio.udp/setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/uasyncio.udp/metadata.txt b/uasyncio.udp/metadata.txt index 0230fb707..c791cef1b 100644 --- a/uasyncio.udp/metadata.txt +++ b/uasyncio.udp/metadata.txt @@ -1,6 +1,6 @@ srctype = micropython-lib type = package -version = 0.1 +version = 0.1.1 author = Paul Sokolovsky desc = UDP support for MicroPython's uasyncio depends = uasyncio diff --git a/uasyncio.udp/setup.py b/uasyncio.udp/setup.py index 3d8335151..95ce6a027 100644 --- a/uasyncio.udp/setup.py +++ b/uasyncio.udp/setup.py @@ -7,7 +7,7 @@ import sdist_upip setup(name='micropython-uasyncio.udp', - version='0.1', + version='0.1.1', description="UDP support for MicroPython's uasyncio", long_description="This is a module reimplemented specifically for MicroPython standard library,\nwith efficient and lean design in mind. Note that this module is likely work\nin progress and likely supports just a subset of CPython's corresponding\nmodule. Please help with the development if you are interested in this\nmodule.", url='https://github.com/micropython/micropython-lib', From 39be24f6e65c6a7175be289ed8c3cff8ca80bd1d Mon Sep 17 00:00:00 2001 From: Konstantin Belyalov Date: Wed, 21 Feb 2018 19:20:12 -0800 Subject: [PATCH 21/53] unittest: Exit with non zero code in case of failures. Fixing #259 --- unittest/unittest.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/unittest/unittest.py b/unittest/unittest.py index 4d5109380..0361c8648 100644 --- a/unittest/unittest.py +++ b/unittest/unittest.py @@ -1,3 +1,6 @@ +import sys + + class SkipTest(Exception): pass @@ -217,3 +220,5 @@ def test_cases(m): suite.addTest(c) runner = TestRunner() result = runner.run(suite) + # Terminate with non zero return code in case of failures + sys.exit(result.failuresNum > 0) From 12c66ad0e55acbbc3d187451203b93aa1eb82fce Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Sat, 24 Feb 2018 17:42:21 +0200 Subject: [PATCH 22/53] unittest: Release 0.3.2. --- unittest/metadata.txt | 2 +- unittest/setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/unittest/metadata.txt b/unittest/metadata.txt index 896a97819..f3c23ccee 100644 --- a/unittest/metadata.txt +++ b/unittest/metadata.txt @@ -1,3 +1,3 @@ srctype = micropython-lib type = module -version = 0.3.1 +version = 0.3.2 diff --git a/unittest/setup.py b/unittest/setup.py index f328b2899..2b9990eb6 100644 --- a/unittest/setup.py +++ b/unittest/setup.py @@ -7,7 +7,7 @@ import sdist_upip setup(name='micropython-unittest', - version='0.3.1', + version='0.3.2', description='unittest module for MicroPython', long_description="This is a module reimplemented specifically for MicroPython standard library,\nwith efficient and lean design in mind. Note that this module is likely work\nin progress and likely supports just a subset of CPython's corresponding\nmodule. Please help with the development if you are interested in this\nmodule.", url='https://github.com/micropython/micropython-lib', From 4a6377071fe8d121763ff1cbc9dee1edbd1531d1 Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Sat, 24 Feb 2018 19:57:46 +0200 Subject: [PATCH 23/53] logging: Some performance and memory use optimizations. --- logging/logging.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/logging/logging.py b/logging/logging.py index 1c3ef0d84..1037194f2 100644 --- a/logging/logging.py +++ b/logging/logging.py @@ -19,18 +19,24 @@ class Logger: + level = NOTSET + def __init__(self, name): - self.level = NOTSET self.name = name def _level_str(self, level): - if level in _level_dict: - return _level_dict[level] - return "LVL" + str(level) + l = _level_dict.get(level) + if l is not None: + return l + return "LVL%s" % level def log(self, level, msg, *args): if level >= (self.level or _level): - print(("%s:%s:" + msg) % ((self._level_str(level), self.name) + args), file=_stream) + _stream.write("%s:%s:" % (self._level_str(level), self.name)) + if not args: + print(msg, file=_stream) + else: + print(msg % args, file=_stream) def debug(self, msg, *args): self.log(DEBUG, msg, *args) From c6441922b56a343a504e288f470b1f59c6ce2758 Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Sat, 24 Feb 2018 20:10:28 +0200 Subject: [PATCH 24/53] logging: Implement isEnabledFor(level) method. --- logging/logging.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/logging/logging.py b/logging/logging.py index 1037194f2..8bb17298c 100644 --- a/logging/logging.py +++ b/logging/logging.py @@ -30,6 +30,9 @@ def _level_str(self, level): return l return "LVL%s" % level + def isEnabledFor(self, level): + return level >= (self.level or _level) + def log(self, level, msg, *args): if level >= (self.level or _level): _stream.write("%s:%s:" % (self._level_str(level), self.name)) From 5312e9ee0f05f0d17e1ca307ff047a847ebdbd38 Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Sat, 24 Feb 2018 20:11:05 +0200 Subject: [PATCH 25/53] logging: example_logging: Add more testcases. --- logging/example_logging.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/logging/example_logging.py b/logging/example_logging.py index 0fefb8898..c1dabf31e 100644 --- a/logging/example_logging.py +++ b/logging/example_logging.py @@ -4,3 +4,7 @@ log = logging.getLogger("test") log.debug("Test message: %d(%s)", 100, "foobar") log.info("Test message2: %d(%s)", 100, "foobar") +log.warning("Test message3: %d(%s)") +log.error("Test message4") +log.critical("Test message5") +logging.info("Test message6") From 0bfed82a28b32240d1c81bdefe005f7595bc9c73 Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Sat, 24 Feb 2018 20:12:17 +0200 Subject: [PATCH 26/53] logging: Release 0.2. --- logging/metadata.txt | 2 +- logging/setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/logging/metadata.txt b/logging/metadata.txt index c14869284..a984e65fe 100644 --- a/logging/metadata.txt +++ b/logging/metadata.txt @@ -1,3 +1,3 @@ srctype = micropython-lib type = module -version = 0.1.3 +version = 0.2 diff --git a/logging/setup.py b/logging/setup.py index 20da4635b..ea689d051 100644 --- a/logging/setup.py +++ b/logging/setup.py @@ -7,7 +7,7 @@ import sdist_upip setup(name='micropython-logging', - version='0.1.3', + version='0.2', description='logging module for MicroPython', long_description="This is a module reimplemented specifically for MicroPython standard library,\nwith efficient and lean design in mind. Note that this module is likely work\nin progress and likely supports just a subset of CPython's corresponding\nmodule. Please help with the development if you are interested in this\nmodule.", url='https://github.com/micropython/micropython-lib', From 55131db2c00a82c3c88bcc8bdf357a96cef8686c Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Sun, 25 Feb 2018 10:37:33 +0200 Subject: [PATCH 27/53] logging: Add setLevel() method. --- logging/logging.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/logging/logging.py b/logging/logging.py index 8bb17298c..0e1b7f04e 100644 --- a/logging/logging.py +++ b/logging/logging.py @@ -30,6 +30,9 @@ def _level_str(self, level): return l return "LVL%s" % level + def setLevel(self, level): + self.level = level + def isEnabledFor(self, level): return level >= (self.level or _level) From a06b02d34806d22c118b1825e973a9719b3b9c1b Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Sun, 25 Feb 2018 10:38:52 +0200 Subject: [PATCH 28/53] logging: Add exc() and exception() methods. Non-standard exc() method accepts exception instance to log as a parameter. exception() just uses sys.exc_info(). --- logging/logging.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/logging/logging.py b/logging/logging.py index 0e1b7f04e..cea2de031 100644 --- a/logging/logging.py +++ b/logging/logging.py @@ -59,6 +59,13 @@ def error(self, msg, *args): def critical(self, msg, *args): self.log(CRITICAL, msg, *args) + def exc(self, e, msg, *args): + self.log(ERROR, msg, *args) + sys.print_exception(e, _stream) + + def exception(self, msg, *args): + self.exc(sys.exc_info()[1], msg, *args) + _level = INFO _loggers = {} From b77c0dcec24100f246f61d4e526a7e29709f1ca5 Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Sun, 25 Feb 2018 10:40:35 +0200 Subject: [PATCH 29/53] logging: example_logging: Add testcase for exception(). --- logging/example_logging.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/logging/example_logging.py b/logging/example_logging.py index c1dabf31e..2ba2f1a18 100644 --- a/logging/example_logging.py +++ b/logging/example_logging.py @@ -8,3 +8,8 @@ log.error("Test message4") log.critical("Test message5") logging.info("Test message6") + +try: + 1/0 +except: + log.exception("Some trouble (%s)", "expected") From 8273bb89eb3e6a917e750404868efaafe05e10f3 Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Sun, 25 Feb 2018 10:43:04 +0200 Subject: [PATCH 30/53] logging: Release 0.3. --- logging/metadata.txt | 2 +- logging/setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/logging/metadata.txt b/logging/metadata.txt index a984e65fe..a1ff78f65 100644 --- a/logging/metadata.txt +++ b/logging/metadata.txt @@ -1,3 +1,3 @@ srctype = micropython-lib type = module -version = 0.2 +version = 0.3 diff --git a/logging/setup.py b/logging/setup.py index ea689d051..ea7f3eb44 100644 --- a/logging/setup.py +++ b/logging/setup.py @@ -7,7 +7,7 @@ import sdist_upip setup(name='micropython-logging', - version='0.2', + version='0.3', description='logging module for MicroPython', long_description="This is a module reimplemented specifically for MicroPython standard library,\nwith efficient and lean design in mind. Note that this module is likely work\nin progress and likely supports just a subset of CPython's corresponding\nmodule. Please help with the development if you are interested in this\nmodule.", url='https://github.com/micropython/micropython-lib', From a288c7d101e6f9da4143e5a64abba5864a2209d4 Mon Sep 17 00:00:00 2001 From: Dustin Ingram Date: Tue, 17 Apr 2018 15:34:03 -0500 Subject: [PATCH 31/53] upip: Use new JSON API pointing to pypi.org. So upip doesn't have to follow redirects. --- upip/upip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/upip/upip.py b/upip/upip.py index 329d3d2e2..a400c3174 100644 --- a/upip/upip.py +++ b/upip/upip.py @@ -156,7 +156,7 @@ def url_open(url): def get_pkg_metadata(name): - f = url_open("https://pypi.python.org/pypi/%s/json" % name) + f = url_open("https://pypi.org/pypi/%s/json" % name) try: return json.load(f) finally: From b7a3d263c148dac99ddbbf7e4f416171ceccbfb1 Mon Sep 17 00:00:00 2001 From: Dustin Ingram Date: Tue, 17 Apr 2018 15:34:46 -0500 Subject: [PATCH 32/53] upip: Fix upip bootstrap script to use pypi.org. --- upip/bootstrap_upip.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/upip/bootstrap_upip.sh b/upip/bootstrap_upip.sh index 35446b9f2..9692f450c 100755 --- a/upip/bootstrap_upip.sh +++ b/upip/bootstrap_upip.sh @@ -9,7 +9,7 @@ fi # Remove any stale old version rm -rf micropython-upip-* -wget -nd -r -l1 https://pypi.python.org/pypi/micropython-upip/ --accept-regex ".*pypi.python.org/packages/source/.*.gz" --reject=html +wget -nd -rH -l1 -D files.pythonhosted.org https://pypi.org/project/micropython-upip/ --reject=html tar xfz micropython-upip-*.tar.gz mkdir -p ~/.micropython/lib/ From a5eaaabcfc3e42c04f4466a0fceccb1f097486d0 Mon Sep 17 00:00:00 2001 From: Damien George Date: Mon, 23 Apr 2018 16:15:00 +1000 Subject: [PATCH 33/53] upip: Release 1.2.4. Change PyPI URL to pypi.org. --- upip/metadata.txt | 2 +- upip/setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/upip/metadata.txt b/upip/metadata.txt index 690ecf776..95d03c03d 100644 --- a/upip/metadata.txt +++ b/upip/metadata.txt @@ -1,6 +1,6 @@ srctype = micropython-lib type = module -version = 1.2.3 +version = 1.2.4 author = Paul Sokolovsky extra_modules = upip_utarfile desc = Simple package manager for MicroPython. diff --git a/upip/setup.py b/upip/setup.py index 59b8fdc8c..3fb55af9e 100644 --- a/upip/setup.py +++ b/upip/setup.py @@ -7,7 +7,7 @@ import sdist_upip setup(name='micropython-upip', - version='1.2.3', + version='1.2.4', description='Simple package manager for MicroPython.', long_description='Simple self-hosted package manager for MicroPython (requires usocket, ussl, uzlib, uctypes builtin modules). Compatible only with packages without custom setup.py code.', url='https://github.com/micropython/micropython-lib', From 110c57c9e5bc13c4b073295c41dd9fb45eb72957 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Wed, 2 May 2018 11:17:28 +0200 Subject: [PATCH 34/53] uasyncio: do not raise an exception when trying to remove a non-existing reader --- uasyncio/uasyncio/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uasyncio/uasyncio/__init__.py b/uasyncio/uasyncio/__init__.py index 41fa57259..bcbb606f3 100644 --- a/uasyncio/uasyncio/__init__.py +++ b/uasyncio/uasyncio/__init__.py @@ -36,7 +36,7 @@ def remove_reader(self, sock): if DEBUG and __debug__: log.debug("remove_reader(%s)", sock) self.poller.unregister(sock) - del self.objmap[id(sock)] + self.objmap.pop(id(sock), None) def add_writer(self, sock, cb, *args): if DEBUG and __debug__: From 60b4417038527858ba2f9e33d4429f89f9bd15e1 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Tue, 17 Jul 2018 17:51:45 +0200 Subject: [PATCH 35/53] uasyncio: close server socket on server termination This is needed to support cancelling the server task --- uasyncio/uasyncio/__init__.py | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/uasyncio/uasyncio/__init__.py b/uasyncio/uasyncio/__init__.py index bcbb606f3..43ee5eaba 100644 --- a/uasyncio/uasyncio/__init__.py +++ b/uasyncio/uasyncio/__init__.py @@ -235,23 +235,26 @@ def start_server(client_coro, host, port, backlog=10): ai = _socket.getaddrinfo(host, port, 0, _socket.SOCK_STREAM) ai = ai[0] s = _socket.socket(ai[0], ai[1], ai[2]) - s.setblocking(False) + try: + s.setblocking(False) - s.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1) - s.bind(ai[-1]) - s.listen(backlog) - while True: - if DEBUG and __debug__: - log.debug("start_server: Before accept") - yield IORead(s) - if DEBUG and __debug__: - log.debug("start_server: After iowait") - s2, client_addr = s.accept() - s2.setblocking(False) - if DEBUG and __debug__: - log.debug("start_server: After accept: %s", s2) - extra = {"peername": client_addr} - yield client_coro(StreamReader(s2), StreamWriter(s2, extra)) + s.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1) + s.bind(ai[-1]) + s.listen(backlog) + while True: + if DEBUG and __debug__: + log.debug("start_server: Before accept") + yield IORead(s) + if DEBUG and __debug__: + log.debug("start_server: After iowait") + s2, client_addr = s.accept() + s2.setblocking(False) + if DEBUG and __debug__: + log.debug("start_server: After accept: %s", s2) + extra = {"peername": client_addr} + yield client_coro(StreamReader(s2), StreamWriter(s2, extra)) + finally: + s.close() import uasyncio.core From 4125dcd717dd2a3357c972cb9d4c6752e811efb4 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Wed, 27 Nov 2019 12:04:48 +0100 Subject: [PATCH 36/53] Revert "uasyncio: WIP lots of fixes and changes needs splitting and documenting" This reverts commit 08ccfa807e89db03c518e287ac91f8dd1ed4ef91. --- uasyncio.core/uasyncio/core.py | 33 +++++------- uasyncio/uasyncio/__init__.py | 96 +++++++++++++--------------------- 2 files changed, 48 insertions(+), 81 deletions(-) diff --git a/uasyncio.core/uasyncio/core.py b/uasyncio.core/uasyncio/core.py index 84994944b..236cdfc1f 100644 --- a/uasyncio.core/uasyncio/core.py +++ b/uasyncio.core/uasyncio/core.py @@ -36,7 +36,7 @@ def call_at(self, time, callback, *args, exc=None): # Including self.cnt is a workaround per heapq docs if __debug__: log.debug("Scheduling %s", (time, self.cnt, callback, args, exc)) - heapq.heappush(self.q, (time, self.cnt, callback, args, exc, False)) + heapq.heappush(self.q, (time, self.cnt, callback, args, exc)) # print(self.q) self.cnt += 1 @@ -50,30 +50,24 @@ def wait(self, delay): def cancel(self, callback, exc = CancelledError): _id = id(callback) for idx, item in enumerate(self.q): - t, cnt, cb, args, _exc = item + t, cnt, cb, args, exc = item if id(cb) != _id: continue - if __debug__: - log.debug("Setting discard flag on: %s at index %d", (t, cnt, cb, args, _exc), idx) - self.q[idx] = t, cnt, cb, args, _exc, True + del self.q[idx] + heapq.heapify(self.q) self.call_at(0, cb, *args, exc=exc) + return self.remove_polled_cb(callback) def run_forever(self): while True: if self.q: - tnow = self.time() - if __debug__: - log.debug('*'*20+' sched step start at %s, num tasks in queue %d', tnow, len(self.q)) - t, cnt, cb, args, exc, discard = heapq.heappop(self.q) - delay = t - tnow + t, cnt, cb, args, exc = heapq.heappop(self.q) if __debug__: - log.debug("Next coroutine to run in %s: %s", delay, (t, cnt, cb, args, exc)) - if discard: - if __debug__: - log.debug("Discarding: %s", (t, cnt, cb, args, exc, discard)) - continue + log.debug("Next coroutine to run: %s", (t, cnt, cb, args, exc)) # __main__.mem_info() + tnow = self.time() + delay = t - tnow if delay > 0 and not exc: self.call_at(t, cb, *args) self.wait(delay) @@ -83,9 +77,8 @@ def run_forever(self): # Assuming IO completion scheduled some tasks continue # cancelled callbacks aren't called and nor rescheduled - if callable(cb): - if not exc: - cb(*args) + if callable(cb) and not exc: + cb(*args) else: delay = 0 try: @@ -118,9 +111,9 @@ def run_forever(self): self.add_writer(arg.fileno(), cb) continue elif isinstance(ret, IOReadDone): - self.remove_reader(arg.fileno(), cb) + self.remove_reader(arg.fileno()) elif isinstance(ret, IOWriteDone): - self.remove_writer(arg.fileno(), cb) + self.remove_writer(arg.fileno()) elif isinstance(ret, StopLoop): return arg elif isinstance(ret, type_gen): diff --git a/uasyncio/uasyncio/__init__.py b/uasyncio/uasyncio/__init__.py index 39f13b45a..244598e1e 100644 --- a/uasyncio/uasyncio/__init__.py +++ b/uasyncio/uasyncio/__init__.py @@ -11,64 +11,55 @@ def __init__(self): self.poller = select.poll() self.objmap = {} - def _unregister_fd(self, fd): - self.objmap.pop(fd, None) - try: - self.poller.unregister(fd) - except OSError as e: - if e.args[0] != errno.ENOENT: - raise - - def remove_polled_cb(self, cb): - _id = id(cb) - for fd, cbs in self.objmap.items(): - cbs.pop(id(cb), None) - if not cbs: - self._unregister_fd(fd) + def remove_polled_cb(self, _id): + for fd, cb in self.objmap.items(): + if id(cb) == _id: + self.poller.unregister(fd) + break def add_reader(self, fd, cb, *args): if __debug__: log.debug("add_reader%s", (fd, cb, args)) - cbs = self.objmap.setdefault(fd, {}) - self.poller.register(fd, select.POLLIN) if args: - cbs[id(cb)] = (cb, args) + self.poller.register(fd, select.POLLIN) + self.objmap[fd] = (cb, args) else: - cbs[id(cb)] = (cb, None) + self.poller.register(fd, select.POLLIN) + self.objmap[fd] = cb - def remove_reader(self, fd, cb): + def remove_reader(self, fd): if __debug__: - log.debug("remove_reader(%s)", (fd, cb)) - cbs = self.objmap.get(fd, {}) - cbs.pop(id(cb), None) - if not cbs: - self._unregister_fd(fd) + log.debug("remove_reader(%s)", fd) + self.poller.unregister(fd) + del self.objmap[fd] def add_writer(self, fd, cb, *args): if __debug__: log.debug("add_writer%s", (fd, cb, args)) - cbs = self.objmap.setdefault(fd, {}) - self.poller.register(fd, select.POLLOUT) if args: - cbs[id(cb)] = (cb, args) + self.poller.register(fd, select.POLLOUT) + self.objmap[fd] = (cb, args) else: - cbs[id(cb)] = (cb, None) + self.poller.register(fd, select.POLLOUT) + self.objmap[fd] = cb - def remove_writer(self, fd, cb): + def remove_writer(self, fd): if __debug__: log.debug("remove_writer(%s)", fd) - cbs = self.objmap.get(fd, {}) - cbs.pop(id(cb), None) - if not cbs: - self._unregister_fd(fd) + try: + self.poller.unregister(fd) + self.objmap.pop(fd, None) + except OSError as e: + # StreamWriter.awrite() first tries to write to an fd, + # and if that succeeds, yield IOWrite may never be called + # for that fd, and it will never be added to poller. So, + # ignore such error. + if e.args[0] != errno.ENOENT: + raise def wait(self, delay): if __debug__: - log.debug("epoll.wait(%s)", delay) - for fd, cbs in self.objmap.items(): - for cb, args in cbs.values(): - log.debug("epoll.registered(%d) %s", fd, (cb, args)) - + log.debug("epoll.wait(%d)", delay) # We need one-shot behavior (second arg of 1 to .poll()) if delay == -1: res = self.poller.poll(-1, 1) @@ -76,30 +67,13 @@ def wait(self, delay): res = self.poller.poll(int(delay * 1000), 1) #log.debug("epoll result: %s", res) for fd, ev in res: - # Remove the registered callbacks dictionary from its parent - # so when callbacks are invoked they can add their registrations - # to a fresh dictionary. - cbs = self.objmap.pop(fd, {}) - if not cbs: - log.error("Event %d on fd %r but no callback registered", ev, fd) - continue + cb = self.objmap[fd] if __debug__: - s = '\n'.join(str(v) for v in cbs.values()) - log.debug("Matching IO callbacks for %r:\n%s", (fd, ev), s) - while cbs: - _id, data = cbs.popitem() - cb, args = data - if args is None: - if __debug__: - log.debug("Scheduling IO coro: %r", (fd, ev, cb)) - self.call_soon(cb) - else: - if __debug__: - log.debug("Calling IO callback: %r", (fd, ev, cb, args)) - cb(*args) - # If no callback registered an event for this fd unregister it - if not self.objmap.get(fd, None): - self._unregister_fd(fd) + log.debug("Calling IO callback: %r", cb) + if isinstance(cb, tuple): + cb[0](*cb[1]) + else: + self.call_soon(cb) class StreamReader: From 5675c06e41dcccdcb733b63c72c8b25a858fe1e0 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Wed, 27 Nov 2019 12:04:57 +0100 Subject: [PATCH 37/53] Revert "uasyncio: add support for task cancellation" This reverts commit fd1a13dc0a9b86dbdbdf4a03353a010c10fa1ba4. --- uasyncio.core/uasyncio/core.py | 39 ++++++++-------------------------- uasyncio/uasyncio/__init__.py | 6 ------ 2 files changed, 9 insertions(+), 36 deletions(-) diff --git a/uasyncio.core/uasyncio/core.py b/uasyncio.core/uasyncio/core.py index 236cdfc1f..2286383c7 100644 --- a/uasyncio.core/uasyncio/core.py +++ b/uasyncio.core/uasyncio/core.py @@ -5,8 +5,6 @@ import uheapq as heapq import logging -class CancelledError(Exception): - pass log = logging.getLogger("asyncio") @@ -32,11 +30,11 @@ def call_soon(self, callback, *args): def call_later(self, delay, callback, *args): self.call_at(self.time() + delay, callback, *args) - def call_at(self, time, callback, *args, exc=None): + def call_at(self, time, callback, *args): # Including self.cnt is a workaround per heapq docs if __debug__: - log.debug("Scheduling %s", (time, self.cnt, callback, args, exc)) - heapq.heappush(self.q, (time, self.cnt, callback, args, exc)) + log.debug("Scheduling %s", (time, self.cnt, callback, args)) + heapq.heappush(self.q, (time, self.cnt, callback, args)) # print(self.q) self.cnt += 1 @@ -47,28 +45,16 @@ def wait(self, delay): log.debug("Sleeping for: %s", delay) time.sleep(delay) - def cancel(self, callback, exc = CancelledError): - _id = id(callback) - for idx, item in enumerate(self.q): - t, cnt, cb, args, exc = item - if id(cb) != _id: - continue - del self.q[idx] - heapq.heapify(self.q) - self.call_at(0, cb, *args, exc=exc) - return - self.remove_polled_cb(callback) - def run_forever(self): while True: if self.q: - t, cnt, cb, args, exc = heapq.heappop(self.q) + t, cnt, cb, args = heapq.heappop(self.q) if __debug__: - log.debug("Next coroutine to run: %s", (t, cnt, cb, args, exc)) + log.debug("Next coroutine to run: %s", (t, cnt, cb, args)) # __main__.mem_info() tnow = self.time() delay = t - tnow - if delay > 0 and not exc: + if delay > 0: self.call_at(t, cb, *args) self.wait(delay) continue @@ -76,21 +62,14 @@ def run_forever(self): self.wait(-1) # Assuming IO completion scheduled some tasks continue - # cancelled callbacks aren't called and nor rescheduled - if callable(cb) and not exc: + if callable(cb): cb(*args) else: delay = 0 try: if __debug__: - log.debug("Coroutine %s send args: %s, %s", cb, args, exc) - if exc: - try: - ret = cb.throw(exc) - except exc: - # ret == None reschedules a canceled task, next round it should raise StopIteration - ret = None - elif args == (): + log.debug("Coroutine %s send args: %s", cb, args) + if args == (): ret = next(cb) else: ret = cb.send(*args) diff --git a/uasyncio/uasyncio/__init__.py b/uasyncio/uasyncio/__init__.py index 244598e1e..5d0c0e5c6 100644 --- a/uasyncio/uasyncio/__init__.py +++ b/uasyncio/uasyncio/__init__.py @@ -11,12 +11,6 @@ def __init__(self): self.poller = select.poll() self.objmap = {} - def remove_polled_cb(self, _id): - for fd, cb in self.objmap.items(): - if id(cb) == _id: - self.poller.unregister(fd) - break - def add_reader(self, fd, cb, *args): if __debug__: log.debug("add_reader%s", (fd, cb, args)) From e25fafe8a670065e18514b8116616a1669478d9f Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Wed, 27 Nov 2019 12:05:13 +0100 Subject: [PATCH 38/53] Revert "uasyncio.core: fix early return from wait()" This reverts commit 99bccc91d924fd3c271dc9e624a02f7b89ef0f4c. --- uasyncio.core/uasyncio/core.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/uasyncio.core/uasyncio/core.py b/uasyncio.core/uasyncio/core.py index 2286383c7..87d0e605b 100644 --- a/uasyncio.core/uasyncio/core.py +++ b/uasyncio.core/uasyncio/core.py @@ -55,9 +55,7 @@ def run_forever(self): tnow = self.time() delay = t - tnow if delay > 0: - self.call_at(t, cb, *args) self.wait(delay) - continue else: self.wait(-1) # Assuming IO completion scheduled some tasks From 45aebb1897af198d9de13e827162ca70efa87965 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Wed, 27 Nov 2019 12:05:33 +0100 Subject: [PATCH 39/53] Revert "uasyncio: fix calls to log.warning()" This reverts commit e79cfcba4d745b1881b96fb7eb149d77edee7f97. --- uasyncio/uasyncio/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/uasyncio/uasyncio/__init__.py b/uasyncio/uasyncio/__init__.py index 5d0c0e5c6..504f1e928 100644 --- a/uasyncio/uasyncio/__init__.py +++ b/uasyncio/uasyncio/__init__.py @@ -81,7 +81,7 @@ def read(self, n=-1): res = self.s.read(n) if res is not None: break - log.warning("Empty read") + log.warn("Empty read") if not res: yield IOReadDone(self.s) return res @@ -96,7 +96,7 @@ def readline(self): res = self.s.readline() if res is not None: break - log.warning("Empty read") + log.warn("Empty read") if not res: yield IOReadDone(self.s) if __debug__: From 8c73bf3c9f1a07d615bf7b1acd8fb101095b9e20 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Wed, 27 Nov 2019 12:06:45 +0100 Subject: [PATCH 40/53] Revert "itertools: add product() implementation" This reverts commit 2dbcd8cd64ba005420e73ab6c14e266ccdeda1cb. --- itertools/itertools.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/itertools/itertools.py b/itertools/itertools.py index eacad309b..5c00952ff 100644 --- a/itertools/itertools.py +++ b/itertools/itertools.py @@ -55,12 +55,3 @@ def tee(iterable, n=2): def starmap(function, iterable): for args in iterable: yield function(*args) - -def product(*args, repeat=1): - if not args: - yield () - else: - args = args*repeat - for a in args[0]: - for prod in product(*args[1:]): - yield (a,)+prod From 1f47a13aeb7d2d5b5082dd6bc662e049b3d94549 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Wed, 4 Dec 2019 17:55:30 +0100 Subject: [PATCH 41/53] itertools: add product() implementation --- itertools/itertools.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/itertools/itertools.py b/itertools/itertools.py index 2ff053472..41ceb39b0 100644 --- a/itertools/itertools.py +++ b/itertools/itertools.py @@ -66,3 +66,12 @@ def accumulate(iterable, func=lambda x, y: x + y): for element in it: acc = func(acc, element) yield acc + +def product(*args, repeat=1): + if not args: + yield () + else: + args = args*repeat + for a in args[0]: + for prod in product(*args[1:]): + yield (a,)+prod From 071e71987e28d12700f2e64008e4a07359c7e5d6 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Wed, 4 Dec 2019 23:49:45 +0100 Subject: [PATCH 42/53] uasyncio: restore uasyncio.__init__ and uasyncio.core from 25fb042 --- uasyncio.core/uasyncio/core.py | 282 ++++++++++-------------------- uasyncio/uasyncio/__init__.py | 309 +++++++++++++++------------------ 2 files changed, 232 insertions(+), 359 deletions(-) diff --git a/uasyncio.core/uasyncio/core.py b/uasyncio.core/uasyncio/core.py index 77fdb7a27..84994944b 100644 --- a/uasyncio.core/uasyncio/core.py +++ b/uasyncio.core/uasyncio/core.py @@ -1,176 +1,140 @@ -import utime as time -import utimeq -import ucollections - - -type_gen = type((lambda: (yield))()) - -DEBUG = 0 -log = None - -def set_debug(val): - global DEBUG, log - DEBUG = val - if val: - import logging - log = logging.getLogger("uasyncio.core") - +try: + import utime as time +except ImportError: + import time +import uheapq as heapq +import logging class CancelledError(Exception): pass +log = logging.getLogger("asyncio") -class TimeoutError(CancelledError): - pass - +type_gen = type((lambda: (yield))()) class EventLoop: - def __init__(self, runq_len=16, waitq_len=16): - self.runq = ucollections.deque((), runq_len, True) - self.waitq = utimeq.utimeq(waitq_len) - # Current task being run. Task is a top-level coroutine scheduled - # in the event loop (sub-coroutines executed transparently by - # yield from/await, event loop "doesn't see" them). - self.cur_task = None + def __init__(self): + self.q = [] + self.cnt = 0 def time(self): - return time.ticks_ms() + return time.time() def create_task(self, coro): # CPython 3.4.2 - self.call_later_ms(0, coro) + self.call_at(0, coro) # CPython asyncio incompatibility: we don't return Task object def call_soon(self, callback, *args): - if __debug__ and DEBUG: - log.debug("Scheduling in runq: %s", (callback, args)) - self.runq.append(callback) - if not isinstance(callback, type_gen): - self.runq.append(args) + self.call_at(self.time(), callback, *args) def call_later(self, delay, callback, *args): - self.call_at_(time.ticks_add(self.time(), int(delay * 1000)), callback, args) + self.call_at(self.time() + delay, callback, *args) - def call_later_ms(self, delay, callback, *args): - if not delay: - return self.call_soon(callback, *args) - self.call_at_(time.ticks_add(self.time(), delay), callback, args) - - def call_at_(self, time, callback, args=()): - if __debug__ and DEBUG: - log.debug("Scheduling in waitq: %s", (time, callback, args)) - self.waitq.push(time, callback, args) + def call_at(self, time, callback, *args, exc=None): + # Including self.cnt is a workaround per heapq docs + if __debug__: + log.debug("Scheduling %s", (time, self.cnt, callback, args, exc)) + heapq.heappush(self.q, (time, self.cnt, callback, args, exc, False)) +# print(self.q) + self.cnt += 1 def wait(self, delay): # Default wait implementation, to be overriden in subclasses # with IO scheduling - if __debug__ and DEBUG: + if __debug__: log.debug("Sleeping for: %s", delay) - time.sleep_ms(delay) + time.sleep(delay) + + def cancel(self, callback, exc = CancelledError): + _id = id(callback) + for idx, item in enumerate(self.q): + t, cnt, cb, args, _exc = item + if id(cb) != _id: + continue + if __debug__: + log.debug("Setting discard flag on: %s at index %d", (t, cnt, cb, args, _exc), idx) + self.q[idx] = t, cnt, cb, args, _exc, True + self.call_at(0, cb, *args, exc=exc) + self.remove_polled_cb(callback) def run_forever(self): - cur_task = [0, 0, 0] while True: - # Expire entries in waitq and move them to runq - tnow = self.time() - while self.waitq: - t = self.waitq.peektime() - delay = time.ticks_diff(t, tnow) - if delay > 0: - break - self.waitq.pop(cur_task) - if __debug__ and DEBUG: - log.debug("Moving from waitq to runq: %s", cur_task[1]) - self.call_soon(cur_task[1], *cur_task[2]) - - # Process runq - l = len(self.runq) - if __debug__ and DEBUG: - log.debug("Entries in runq: %d", l) - while l: - cb = self.runq.popleft() - l -= 1 - args = () - if not isinstance(cb, type_gen): - args = self.runq.popleft() - l -= 1 - if __debug__ and DEBUG: - log.info("Next callback to run: %s", (cb, args)) - cb(*args) + if self.q: + tnow = self.time() + if __debug__: + log.debug('*'*20+' sched step start at %s, num tasks in queue %d', tnow, len(self.q)) + t, cnt, cb, args, exc, discard = heapq.heappop(self.q) + delay = t - tnow + if __debug__: + log.debug("Next coroutine to run in %s: %s", delay, (t, cnt, cb, args, exc)) + if discard: + if __debug__: + log.debug("Discarding: %s", (t, cnt, cb, args, exc, discard)) continue - - if __debug__ and DEBUG: - log.info("Next coroutine to run: %s", (cb, args)) - self.cur_task = cb +# __main__.mem_info() + if delay > 0 and not exc: + self.call_at(t, cb, *args) + self.wait(delay) + continue + else: + self.wait(-1) + # Assuming IO completion scheduled some tasks + continue + # cancelled callbacks aren't called and nor rescheduled + if callable(cb): + if not exc: + cb(*args) + else: delay = 0 try: - if args is (): + if __debug__: + log.debug("Coroutine %s send args: %s, %s", cb, args, exc) + if exc: + try: + ret = cb.throw(exc) + except exc: + # ret == None reschedules a canceled task, next round it should raise StopIteration + ret = None + elif args == (): ret = next(cb) else: ret = cb.send(*args) - if __debug__ and DEBUG: - log.info("Coroutine %s yield result: %s", cb, ret) + if __debug__: + log.debug("Coroutine %s yield result: %s", cb, ret) if isinstance(ret, SysCall1): arg = ret.arg - if isinstance(ret, SleepMs): + if isinstance(ret, Sleep): delay = arg elif isinstance(ret, IORead): - cb.pend_throw(False) - self.add_reader(arg, cb) +# self.add_reader(ret.obj.fileno(), lambda self, c, f: self.call_soon(c, f), self, cb, ret.obj) +# self.add_reader(ret.obj.fileno(), lambda c, f: self.call_soon(c, f), cb, ret.obj) +# self.add_reader(arg.fileno(), lambda cb: self.call_soon(cb), cb) + self.add_reader(arg.fileno(), cb) continue elif isinstance(ret, IOWrite): - cb.pend_throw(False) - self.add_writer(arg, cb) +# self.add_writer(arg.fileno(), lambda cb: self.call_soon(cb), cb) + self.add_writer(arg.fileno(), cb) continue elif isinstance(ret, IOReadDone): - self.remove_reader(arg) + self.remove_reader(arg.fileno(), cb) elif isinstance(ret, IOWriteDone): - self.remove_writer(arg) + self.remove_writer(arg.fileno(), cb) elif isinstance(ret, StopLoop): return arg - else: - assert False, "Unknown syscall yielded: %r (of type %r)" % (ret, type(ret)) elif isinstance(ret, type_gen): self.call_soon(ret) - elif isinstance(ret, int): - # Delay - delay = ret elif ret is None: # Just reschedule pass - elif ret is False: - # Don't reschedule - continue else: assert False, "Unsupported coroutine yield value: %r (of type %r)" % (ret, type(ret)) except StopIteration as e: - if __debug__ and DEBUG: + if __debug__: log.debug("Coroutine finished: %s", cb) continue - except CancelledError as e: - if __debug__ and DEBUG: - log.debug("Coroutine cancelled: %s", cb) - continue - # Currently all syscalls don't return anything, so we don't - # need to feed anything to the next invocation of coroutine. - # If that changes, need to pass that value below. - if delay: - self.call_later_ms(delay, cb) - else: - self.call_soon(cb) - - # Wait until next waitq task or I/O availability - delay = 0 - if not self.runq: - delay = -1 - if self.waitq: - tnow = self.time() - t = self.waitq.peektime() - delay = time.ticks_diff(t, tnow) - if delay < 0: - delay = 0 - self.wait(delay) + self.call_later(delay, cb, *args) def run_until_complete(self, coro): def _run_and_stop(): @@ -179,9 +143,6 @@ def _run_and_stop(): self.call_soon(_run_and_stop()) self.run_forever() - def stop(self): - self.call_soon((lambda: (yield StopLoop(0)))()) - def close(self): pass @@ -200,6 +161,9 @@ class SysCall1(SysCall): def __init__(self, arg): self.arg = arg +class Sleep(SysCall1): + pass + class StopLoop(SysCall1): pass @@ -218,82 +182,14 @@ class IOWriteDone(SysCall1): _event_loop = None _event_loop_class = EventLoop -def get_event_loop(runq_len=16, waitq_len=16): +def get_event_loop(): global _event_loop if _event_loop is None: - _event_loop = _event_loop_class(runq_len, waitq_len) + _event_loop = _event_loop_class() return _event_loop def sleep(secs): - yield int(secs * 1000) - -# Implementation of sleep_ms awaitable with zero heap memory usage -class SleepMs(SysCall1): - - def __init__(self): - self.v = None - self.arg = None - - def __call__(self, arg): - self.v = arg - #print("__call__") - return self - - def __iter__(self): - #print("__iter__") - return self - - def __next__(self): - if self.v is not None: - #print("__next__ syscall enter") - self.arg = self.v - self.v = None - return self - #print("__next__ syscall exit") - _stop_iter.__traceback__ = None - raise _stop_iter - -_stop_iter = StopIteration() -sleep_ms = SleepMs() - - -def cancel(coro): - prev = coro.pend_throw(CancelledError()) - if prev is False: - _event_loop.call_soon(coro) - - -class TimeoutObj: - def __init__(self, coro): - self.coro = coro - - -def wait_for_ms(coro, timeout): - - def waiter(coro, timeout_obj): - res = yield from coro - if __debug__ and DEBUG: - log.debug("waiter: cancelling %s", timeout_obj) - timeout_obj.coro = None - return res - - def timeout_func(timeout_obj): - if timeout_obj.coro: - if __debug__ and DEBUG: - log.debug("timeout_func: cancelling %s", timeout_obj.coro) - prev = timeout_obj.coro.pend_throw(TimeoutError()) - #print("prev pend", prev) - if prev is False: - _event_loop.call_soon(timeout_obj.coro) - - timeout_obj = TimeoutObj(_event_loop.cur_task) - _event_loop.call_later_ms(timeout, timeout_func, timeout_obj) - return (yield from waiter(coro, timeout_obj)) - - -def wait_for(coro, timeout): - return wait_for_ms(coro, int(timeout * 1000)) - + yield Sleep(secs) def coroutine(f): return f diff --git a/uasyncio/uasyncio/__init__.py b/uasyncio/uasyncio/__init__.py index 43ee5eaba..39f13b45a 100644 --- a/uasyncio/uasyncio/__init__.py +++ b/uasyncio/uasyncio/__init__.py @@ -1,152 +1,146 @@ -import uerrno +import errno import uselect as select import usocket as _socket from uasyncio.core import * -DEBUG = 0 -log = None +class EpollEventLoop(EventLoop): -def set_debug(val): - global DEBUG, log - DEBUG = val - if val: - import logging - log = logging.getLogger("uasyncio") - - -class PollEventLoop(EventLoop): - - def __init__(self, runq_len=16, waitq_len=16): - EventLoop.__init__(self, runq_len, waitq_len) + def __init__(self): + EventLoop.__init__(self) self.poller = select.poll() self.objmap = {} - def add_reader(self, sock, cb, *args): - if DEBUG and __debug__: - log.debug("add_reader%s", (sock, cb, args)) + def _unregister_fd(self, fd): + self.objmap.pop(fd, None) + try: + self.poller.unregister(fd) + except OSError as e: + if e.args[0] != errno.ENOENT: + raise + + def remove_polled_cb(self, cb): + _id = id(cb) + for fd, cbs in self.objmap.items(): + cbs.pop(id(cb), None) + if not cbs: + self._unregister_fd(fd) + + def add_reader(self, fd, cb, *args): + if __debug__: + log.debug("add_reader%s", (fd, cb, args)) + cbs = self.objmap.setdefault(fd, {}) + self.poller.register(fd, select.POLLIN) if args: - self.poller.register(sock, select.POLLIN) - self.objmap[id(sock)] = (cb, args) + cbs[id(cb)] = (cb, args) else: - self.poller.register(sock, select.POLLIN) - self.objmap[id(sock)] = cb - - def remove_reader(self, sock): - if DEBUG and __debug__: - log.debug("remove_reader(%s)", sock) - self.poller.unregister(sock) - self.objmap.pop(id(sock), None) - - def add_writer(self, sock, cb, *args): - if DEBUG and __debug__: - log.debug("add_writer%s", (sock, cb, args)) + cbs[id(cb)] = (cb, None) + + def remove_reader(self, fd, cb): + if __debug__: + log.debug("remove_reader(%s)", (fd, cb)) + cbs = self.objmap.get(fd, {}) + cbs.pop(id(cb), None) + if not cbs: + self._unregister_fd(fd) + + def add_writer(self, fd, cb, *args): + if __debug__: + log.debug("add_writer%s", (fd, cb, args)) + cbs = self.objmap.setdefault(fd, {}) + self.poller.register(fd, select.POLLOUT) if args: - self.poller.register(sock, select.POLLOUT) - self.objmap[id(sock)] = (cb, args) + cbs[id(cb)] = (cb, args) else: - self.poller.register(sock, select.POLLOUT) - self.objmap[id(sock)] = cb + cbs[id(cb)] = (cb, None) - def remove_writer(self, sock): - if DEBUG and __debug__: - log.debug("remove_writer(%s)", sock) - try: - self.poller.unregister(sock) - self.objmap.pop(id(sock), None) - except OSError as e: - # StreamWriter.awrite() first tries to write to a socket, - # and if that succeeds, yield IOWrite may never be called - # for that socket, and it will never be added to poller. So, - # ignore such error. - if e.args[0] != uerrno.ENOENT: - raise + def remove_writer(self, fd, cb): + if __debug__: + log.debug("remove_writer(%s)", fd) + cbs = self.objmap.get(fd, {}) + cbs.pop(id(cb), None) + if not cbs: + self._unregister_fd(fd) def wait(self, delay): - if DEBUG and __debug__: - log.debug("poll.wait(%d)", delay) + if __debug__: + log.debug("epoll.wait(%s)", delay) + for fd, cbs in self.objmap.items(): + for cb, args in cbs.values(): + log.debug("epoll.registered(%d) %s", fd, (cb, args)) + # We need one-shot behavior (second arg of 1 to .poll()) - res = self.poller.ipoll(delay, 1) - #log.debug("poll result: %s", res) - # Remove "if res" workaround after - # https://github.com/micropython/micropython/issues/2716 fixed. - if res: - for sock, ev in res: - cb = self.objmap[id(sock)] - if ev & (select.POLLHUP | select.POLLERR): - # These events are returned even if not requested, and - # are sticky, i.e. will be returned again and again. - # If the caller doesn't do proper error handling and - # unregister this sock, we'll busy-loop on it, so we - # as well can unregister it now "just in case". - self.remove_reader(sock) - if DEBUG and __debug__: - log.debug("Calling IO callback: %r", cb) - if isinstance(cb, tuple): - cb[0](*cb[1]) - else: - cb.pend_throw(None) + if delay == -1: + res = self.poller.poll(-1, 1) + else: + res = self.poller.poll(int(delay * 1000), 1) + #log.debug("epoll result: %s", res) + for fd, ev in res: + # Remove the registered callbacks dictionary from its parent + # so when callbacks are invoked they can add their registrations + # to a fresh dictionary. + cbs = self.objmap.pop(fd, {}) + if not cbs: + log.error("Event %d on fd %r but no callback registered", ev, fd) + continue + if __debug__: + s = '\n'.join(str(v) for v in cbs.values()) + log.debug("Matching IO callbacks for %r:\n%s", (fd, ev), s) + while cbs: + _id, data = cbs.popitem() + cb, args = data + if args is None: + if __debug__: + log.debug("Scheduling IO coro: %r", (fd, ev, cb)) self.call_soon(cb) + else: + if __debug__: + log.debug("Calling IO callback: %r", (fd, ev, cb, args)) + cb(*args) + # If no callback registered an event for this fd unregister it + if not self.objmap.get(fd, None): + self._unregister_fd(fd) class StreamReader: - def __init__(self, polls, ios=None): - if ios is None: - ios = polls - self.polls = polls - self.ios = ios + def __init__(self, s): + self.s = s def read(self, n=-1): + yield IORead(self.s) while True: - yield IORead(self.polls) - res = self.ios.read(n) + res = self.s.read(n) if res is not None: break - # This should not happen for real sockets, but can easily - # happen for stream wrappers (ssl, websockets, etc.) - #log.warn("Empty read") + log.warning("Empty read") if not res: - yield IOReadDone(self.polls) + yield IOReadDone(self.s) return res - def readexactly(self, n): - buf = b"" - while n: - yield IORead(self.polls) - res = self.ios.read(n) - assert res is not None - if not res: - yield IOReadDone(self.polls) - break - buf += res - n -= len(res) - return buf - def readline(self): - if DEBUG and __debug__: + if __debug__: log.debug("StreamReader.readline()") - buf = b"" + yield IORead(self.s) +# if __debug__: +# log.debug("StreamReader.readline(): after IORead: %s", s) while True: - yield IORead(self.polls) - res = self.ios.readline() - assert res is not None - if not res: - yield IOReadDone(self.polls) - break - buf += res - if buf[-1] == 0x0a: + res = self.s.readline() + if res is not None: break - if DEBUG and __debug__: - log.debug("StreamReader.readline(): %s", buf) - return buf + log.warning("Empty read") + if not res: + yield IOReadDone(self.s) + if __debug__: + log.debug("StreamReader.readline(): res: %s", res) + return res def aclose(self): - yield IOReadDone(self.polls) - self.ios.close() + yield IOReadDone(self.s) + self.s.close() def __repr__(self): - return "" % (self.polls, self.ios) + return "" % self.s class StreamWriter: @@ -155,40 +149,34 @@ def __init__(self, s, extra): self.s = s self.extra = extra - def awrite(self, buf, off=0, sz=-1): + def awrite(self, buf): # This method is called awrite (async write) to not proliferate # incompatibility with original asyncio. Unlike original asyncio # whose .write() method is both not a coroutine and guaranteed # to return immediately (which means it has to buffer all the # data), this method is a coroutine. - if sz == -1: - sz = len(buf) - off - if DEBUG and __debug__: + sz = len(buf) + if __debug__: log.debug("StreamWriter.awrite(): spooling %d bytes", sz) while True: - res = self.s.write(buf, off, sz) + res = self.s.write(buf) # If we spooled everything, return immediately if res == sz: - if DEBUG and __debug__: + if __debug__: log.debug("StreamWriter.awrite(): completed spooling %d bytes", res) return if res is None: res = 0 - if DEBUG and __debug__: + if __debug__: log.debug("StreamWriter.awrite(): spooled partial %d bytes", res) assert res < sz - off += res + buf = buf[res:] sz -= res yield IOWrite(self.s) #assert s2.fileno() == self.s.fileno() - if DEBUG and __debug__: + if __debug__: log.debug("StreamWriter.awrite(): can write more") - # Write piecewise content from iterable (usually, a generator) - def awriteiter(self, iterable): - for buf in iterable: - yield from self.awrite(buf) - def aclose(self): yield IOWriteDone(self.s) self.s.close() @@ -200,62 +188,51 @@ def __repr__(self): return "" % self.s -def open_connection(host, port, ssl=False): - if DEBUG and __debug__: +def open_connection(host, port): + if __debug__: log.debug("open_connection(%s, %s)", host, port) - ai = _socket.getaddrinfo(host, port, 0, _socket.SOCK_STREAM) - ai = ai[0] - s = _socket.socket(ai[0], ai[1], ai[2]) + s = _socket.socket() s.setblocking(False) + ai = _socket.getaddrinfo(host, port) + addr = ai[0][4] try: - s.connect(ai[-1]) + s.connect(addr) except OSError as e: - if e.args[0] != uerrno.EINPROGRESS: + if e.args[0] != errno.EINPROGRESS: raise - if DEBUG and __debug__: + if __debug__: log.debug("open_connection: After connect") yield IOWrite(s) # if __debug__: # assert s2.fileno() == s.fileno() - if DEBUG and __debug__: + if __debug__: log.debug("open_connection: After iowait: %s", s) - if ssl: - print("Warning: uasyncio SSL support is alpha") - import ussl - s.setblocking(True) - s2 = ussl.wrap_socket(s) - s.setblocking(False) - return StreamReader(s, s2), StreamWriter(s2, {}) return StreamReader(s), StreamWriter(s, {}) def start_server(client_coro, host, port, backlog=10): - if DEBUG and __debug__: - log.debug("start_server(%s, %s)", host, port) - ai = _socket.getaddrinfo(host, port, 0, _socket.SOCK_STREAM) - ai = ai[0] - s = _socket.socket(ai[0], ai[1], ai[2]) - try: - s.setblocking(False) + log.debug("start_server(%s, %s)", host, port) + s = _socket.socket() + s.setblocking(False) - s.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1) - s.bind(ai[-1]) - s.listen(backlog) - while True: - if DEBUG and __debug__: - log.debug("start_server: Before accept") - yield IORead(s) - if DEBUG and __debug__: - log.debug("start_server: After iowait") - s2, client_addr = s.accept() - s2.setblocking(False) - if DEBUG and __debug__: - log.debug("start_server: After accept: %s", s2) - extra = {"peername": client_addr} - yield client_coro(StreamReader(s2), StreamWriter(s2, extra)) - finally: - s.close() + ai = _socket.getaddrinfo(host, port) + addr = ai[0][4] + s.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1) + s.bind(addr) + s.listen(backlog) + while True: + if __debug__: + log.debug("start_server: Before accept") + yield IORead(s) + if __debug__: + log.debug("start_server: After iowait") + s2, client_addr = s.accept() + s2.setblocking(False) + if __debug__: + log.debug("start_server: After accept: %s", s2) + extra = {"peername": client_addr} + yield client_coro(StreamReader(s2), StreamWriter(s2, extra)) import uasyncio.core -uasyncio.core._event_loop_class = PollEventLoop +uasyncio.core._event_loop_class = EpollEventLoop From 04d215f484676109bbb957bb69c3002256787b70 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Thu, 5 Dec 2019 00:16:28 +0100 Subject: [PATCH 43/53] uasyncio: fix unpack operation in cancel() --- uasyncio.core/uasyncio/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uasyncio.core/uasyncio/core.py b/uasyncio.core/uasyncio/core.py index 84994944b..d62db976a 100644 --- a/uasyncio.core/uasyncio/core.py +++ b/uasyncio.core/uasyncio/core.py @@ -50,7 +50,7 @@ def wait(self, delay): def cancel(self, callback, exc = CancelledError): _id = id(callback) for idx, item in enumerate(self.q): - t, cnt, cb, args, _exc = item + t, cnt, cb, args, _exc, _discard = item if id(cb) != _id: continue if __debug__: From a8883969809688118ef0595f68e76eb1d5aa75f5 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Thu, 5 Dec 2019 13:32:37 +0100 Subject: [PATCH 44/53] Use newer event loop implementation (based on asi2700-master branch) --- uasyncio.core/uasyncio/core.py | 299 +++++++++++++++++++++++---------- uasyncio/uasyncio/__init__.py | 111 ++++++------ 2 files changed, 277 insertions(+), 133 deletions(-) diff --git a/uasyncio.core/uasyncio/core.py b/uasyncio.core/uasyncio/core.py index d62db976a..8103a2e31 100644 --- a/uasyncio.core/uasyncio/core.py +++ b/uasyncio.core/uasyncio/core.py @@ -1,120 +1,148 @@ -try: - import utime as time -except ImportError: - import time -import uheapq as heapq -import logging +import utime as time +import utimeq +import ucollections + + +type_gen = type((lambda: (yield))()) + +DEBUG = 0 +log = None + +def set_debug(val): + global DEBUG, log + DEBUG = val + if val: + import logging + log = logging.getLogger("uasyncio.core") + log.setLevel(val) + class CancelledError(Exception): pass -log = logging.getLogger("asyncio") -type_gen = type((lambda: (yield))()) +class TimeoutError(CancelledError): + pass + class EventLoop: - def __init__(self): - self.q = [] - self.cnt = 0 + def __init__(self, runq_len=16, waitq_len=16): + self.runq = ucollections.deque((), runq_len, True) + self.waitq = utimeq.utimeq(waitq_len) + # Current task being run. Task is a top-level coroutine scheduled + # in the event loop (sub-coroutines executed transparently by + # yield from/await, event loop "doesn't see" them). + self.cur_task = None def time(self): - return time.time() + return time.ticks_ms() def create_task(self, coro): # CPython 3.4.2 - self.call_at(0, coro) + self.call_later_ms(0, coro) # CPython asyncio incompatibility: we don't return Task object def call_soon(self, callback, *args): - self.call_at(self.time(), callback, *args) + if __debug__ and DEBUG: + log.debug("Scheduling in runq: %s", (callback, args)) + self.runq.append(callback) + if not isinstance(callback, type_gen): + self.runq.append(args) def call_later(self, delay, callback, *args): - self.call_at(self.time() + delay, callback, *args) + self.call_at_(time.ticks_add(self.time(), int(delay * 1000)), callback, args) - def call_at(self, time, callback, *args, exc=None): - # Including self.cnt is a workaround per heapq docs - if __debug__: - log.debug("Scheduling %s", (time, self.cnt, callback, args, exc)) - heapq.heappush(self.q, (time, self.cnt, callback, args, exc, False)) -# print(self.q) - self.cnt += 1 + def call_later_ms(self, delay, callback, *args): + if not delay: + return self.call_soon(callback, *args) + self.call_at_(time.ticks_add(self.time(), delay), callback, args) + + def call_at_(self, time, callback, args=()): + if __debug__ and DEBUG: + log.debug("Scheduling in waitq: %s", (time, callback, args)) + self.waitq.push(time, callback, args) def wait(self, delay): # Default wait implementation, to be overriden in subclasses # with IO scheduling - if __debug__: + if __debug__ and DEBUG: log.debug("Sleeping for: %s", delay) - time.sleep(delay) - - def cancel(self, callback, exc = CancelledError): - _id = id(callback) - for idx, item in enumerate(self.q): - t, cnt, cb, args, _exc, _discard = item - if id(cb) != _id: - continue - if __debug__: - log.debug("Setting discard flag on: %s at index %d", (t, cnt, cb, args, _exc), idx) - self.q[idx] = t, cnt, cb, args, _exc, True - self.call_at(0, cb, *args, exc=exc) - self.remove_polled_cb(callback) + time.sleep_ms(delay) + + def cancel(self, coro, exc = CancelledError): + if isinstance(coro, type_gen): + try: + prev = coro.pend_throw(exc) + if __debug__ and DEBUG: + log.debug("Cancelling %s asynchronously", coro) + if prev is False: + _event_loop.remove_polled_cb(coro) + _event_loop.call_soon(coro) + except TypeError: + # .pend_throw() works only on started coroutines + # Kill the coro right here, right now + # No need to worry about IO because the coro cannot be registered + # because it didn't get a chance to run yet + try: + if __debug__ and DEBUG: + log.debug("Cancelling %s synchronously", coro) + coro.throw(exc) + except exc: + pass + else: + raise UnimplementedError('Cancelling a callback is not supported') def run_forever(self): + cur_task = [0, 0, 0] while True: - if self.q: - tnow = self.time() - if __debug__: - log.debug('*'*20+' sched step start at %s, num tasks in queue %d', tnow, len(self.q)) - t, cnt, cb, args, exc, discard = heapq.heappop(self.q) - delay = t - tnow - if __debug__: - log.debug("Next coroutine to run in %s: %s", delay, (t, cnt, cb, args, exc)) - if discard: - if __debug__: - log.debug("Discarding: %s", (t, cnt, cb, args, exc, discard)) - continue -# __main__.mem_info() - if delay > 0 and not exc: - self.call_at(t, cb, *args) - self.wait(delay) - continue - else: - self.wait(-1) - # Assuming IO completion scheduled some tasks - continue - # cancelled callbacks aren't called and nor rescheduled - if callable(cb): - if not exc: + # Expire entries in waitq and move them to runq + tnow = self.time() + while self.waitq: + t = self.waitq.peektime() + delay = time.ticks_diff(t, tnow) + if delay > 0: + break + self.waitq.pop(cur_task) + if __debug__ and DEBUG: + log.debug("Moving from waitq to runq: %s", cur_task[1]) + self.call_soon(cur_task[1], *cur_task[2]) + + # Process runq + l = len(self.runq) + if __debug__ and DEBUG: + log.debug("Entries in runq: %d", l) + while l: + cb = self.runq.popleft() + l -= 1 + args = () + if not isinstance(cb, type_gen): + args = self.runq.popleft() + l -= 1 + if __debug__ and DEBUG: + log.info("Next callback to run: %s", (cb, args)) cb(*args) - else: + continue + + if __debug__ and DEBUG: + log.info("Next coroutine to run: %s", (cb, args)) + self.cur_task = cb delay = 0 try: - if __debug__: - log.debug("Coroutine %s send args: %s, %s", cb, args, exc) - if exc: - try: - ret = cb.throw(exc) - except exc: - # ret == None reschedules a canceled task, next round it should raise StopIteration - ret = None - elif args == (): + if args is (): ret = next(cb) else: ret = cb.send(*args) - if __debug__: - log.debug("Coroutine %s yield result: %s", cb, ret) + if __debug__ and DEBUG: + log.info("Coroutine %s yield result: %s", cb, ret) if isinstance(ret, SysCall1): arg = ret.arg - if isinstance(ret, Sleep): + if isinstance(ret, SleepMs): delay = arg elif isinstance(ret, IORead): -# self.add_reader(ret.obj.fileno(), lambda self, c, f: self.call_soon(c, f), self, cb, ret.obj) -# self.add_reader(ret.obj.fileno(), lambda c, f: self.call_soon(c, f), cb, ret.obj) -# self.add_reader(arg.fileno(), lambda cb: self.call_soon(cb), cb) self.add_reader(arg.fileno(), cb) continue elif isinstance(ret, IOWrite): -# self.add_writer(arg.fileno(), lambda cb: self.call_soon(cb), cb) self.add_writer(arg.fileno(), cb) continue elif isinstance(ret, IOReadDone): @@ -123,18 +151,56 @@ def run_forever(self): self.remove_writer(arg.fileno(), cb) elif isinstance(ret, StopLoop): return arg + else: + assert False, "Unknown syscall yielded: %r (of type %r)" % (ret, type(ret)) elif isinstance(ret, type_gen): self.call_soon(ret) + elif isinstance(ret, int): + # Delay + delay = ret elif ret is None: # Just reschedule pass + elif ret is False: + # Don't reschedule + continue else: assert False, "Unsupported coroutine yield value: %r (of type %r)" % (ret, type(ret)) except StopIteration as e: - if __debug__: + if __debug__ and DEBUG: log.debug("Coroutine finished: %s", cb) + self.remove_polled_cb(cb) + continue + except CancelledError as e: + if __debug__ and DEBUG: + log.debug("Coroutine cancelled: %s", cb) + self.remove_polled_cb(cb) continue - self.call_later(delay, cb, *args) + except Exception as e: + if log: + log.error("Coroutine %s exception: %s", cb, e) + # Currently all syscalls don't return anything, so we don't + # need to feed anything to the next invocation of coroutine. + # If that changes, need to pass that value below. + if delay: + self.call_later_ms(delay, cb) + else: + self.call_soon(cb) + + # Wait until next waitq task or I/O availability + delay = 0 + if not self.runq: + if self.waitq: + tnow = self.time() + t = self.waitq.peektime() + delay = time.ticks_diff(t, tnow) + if delay < 0: + delay = 0 + else: + if __debug__ and DEBUG: + log.info("No more tasks to execute, waiting forever") + delay = -1 + self.wait(delay) def run_until_complete(self, coro): def _run_and_stop(): @@ -143,6 +209,9 @@ def _run_and_stop(): self.call_soon(_run_and_stop()) self.run_forever() + def stop(self): + self.call_soon((lambda: (yield StopLoop(0)))()) + def close(self): pass @@ -161,9 +230,6 @@ class SysCall1(SysCall): def __init__(self, arg): self.arg = arg -class Sleep(SysCall1): - pass - class StopLoop(SysCall1): pass @@ -182,14 +248,77 @@ class IOWriteDone(SysCall1): _event_loop = None _event_loop_class = EventLoop -def get_event_loop(): +def get_event_loop(runq_len=16, waitq_len=16): global _event_loop if _event_loop is None: - _event_loop = _event_loop_class() + _event_loop = _event_loop_class(runq_len, waitq_len) return _event_loop def sleep(secs): - yield Sleep(secs) + yield int(secs * 1000) + +# Implementation of sleep_ms awaitable with zero heap memory usage +class SleepMs(SysCall1): + + def __init__(self): + self.v = None + self.arg = None + + def __call__(self, arg): + self.v = arg + #print("__call__") + return self + + def __iter__(self): + #print("__iter__") + return self + + def __next__(self): + if self.v is not None: + #print("__next__ syscall enter") + self.arg = self.v + self.v = None + return self + #print("__next__ syscall exit") + _stop_iter.__traceback__ = None + raise _stop_iter + +_stop_iter = StopIteration() +sleep_ms = SleepMs() + + +def cancel(coro, exc=CancelledError): + _event_loop.cancel(coro, exc=exc) + + +class TimeoutObj: + def __init__(self, coro): + self.coro = coro + + +def wait_for_ms(coro, timeout): + + def waiter(coro, timeout_obj): + res = yield from coro + if __debug__ and DEBUG: + log.debug("waiter: cancelling %s", timeout_obj) + timeout_obj.coro = None + return res + + def timeout_func(timeout_obj): + if timeout_obj.coro: + if __debug__ and DEBUG: + log.debug("timeout_func: cancelling %s", timeout_obj.coro) + cancel(timeout_obj.coro, exc=TimeoutError()) + + timeout_obj = TimeoutObj(_event_loop.cur_task) + _event_loop.call_later_ms(timeout, timeout_func, timeout_obj) + return (yield from waiter(coro, timeout_obj)) + + +def wait_for(coro, timeout): + return wait_for_ms(coro, int(timeout * 1000)) + def coroutine(f): return f diff --git a/uasyncio/uasyncio/__init__.py b/uasyncio/uasyncio/__init__.py index 39f13b45a..c48379172 100644 --- a/uasyncio/uasyncio/__init__.py +++ b/uasyncio/uasyncio/__init__.py @@ -1,13 +1,25 @@ -import errno +import uerrno as errno import uselect as select import usocket as _socket from uasyncio.core import * +DEBUG = 0 +log = None + +def set_debug(val): + global DEBUG, log + DEBUG = val + if val: + import logging + log = logging.getLogger("uasyncio") + log.setLevel(val) + + class EpollEventLoop(EventLoop): - def __init__(self): - EventLoop.__init__(self) + def __init__(self, runq_len=16, waitq_len=16): + EventLoop.__init__(self, runq_len, waitq_len) self.poller = select.poll() self.objmap = {} @@ -16,7 +28,10 @@ def _unregister_fd(self, fd): try: self.poller.unregister(fd) except OSError as e: - if e.args[0] != errno.ENOENT: + if e.args[0] == errno.ENOENT: + if __debug__ and DEBUG: + log.debug("_unregister_fd() attepted to remove unregistered FD %s", fd) + else: raise def remove_polled_cb(self, cb): @@ -27,7 +42,7 @@ def remove_polled_cb(self, cb): self._unregister_fd(fd) def add_reader(self, fd, cb, *args): - if __debug__: + if __debug__ and DEBUG: log.debug("add_reader%s", (fd, cb, args)) cbs = self.objmap.setdefault(fd, {}) self.poller.register(fd, select.POLLIN) @@ -37,7 +52,7 @@ def add_reader(self, fd, cb, *args): cbs[id(cb)] = (cb, None) def remove_reader(self, fd, cb): - if __debug__: + if __debug__ and DEBUG: log.debug("remove_reader(%s)", (fd, cb)) cbs = self.objmap.get(fd, {}) cbs.pop(id(cb), None) @@ -45,7 +60,7 @@ def remove_reader(self, fd, cb): self._unregister_fd(fd) def add_writer(self, fd, cb, *args): - if __debug__: + if __debug__ and DEBUG: log.debug("add_writer%s", (fd, cb, args)) cbs = self.objmap.setdefault(fd, {}) self.poller.register(fd, select.POLLOUT) @@ -55,7 +70,7 @@ def add_writer(self, fd, cb, *args): cbs[id(cb)] = (cb, None) def remove_writer(self, fd, cb): - if __debug__: + if __debug__ and DEBUG: log.debug("remove_writer(%s)", fd) cbs = self.objmap.get(fd, {}) cbs.pop(id(cb), None) @@ -63,17 +78,14 @@ def remove_writer(self, fd, cb): self._unregister_fd(fd) def wait(self, delay): - if __debug__: + if __debug__ and DEBUG: log.debug("epoll.wait(%s)", delay) for fd, cbs in self.objmap.items(): for cb, args in cbs.values(): log.debug("epoll.registered(%d) %s", fd, (cb, args)) # We need one-shot behavior (second arg of 1 to .poll()) - if delay == -1: - res = self.poller.poll(-1, 1) - else: - res = self.poller.poll(int(delay * 1000), 1) + res = self.poller.poll(delay, 1) #log.debug("epoll result: %s", res) for fd, ev in res: # Remove the registered callbacks dictionary from its parent @@ -83,18 +95,18 @@ def wait(self, delay): if not cbs: log.error("Event %d on fd %r but no callback registered", ev, fd) continue - if __debug__: + if __debug__ and DEBUG: s = '\n'.join(str(v) for v in cbs.values()) log.debug("Matching IO callbacks for %r:\n%s", (fd, ev), s) while cbs: _id, data = cbs.popitem() cb, args = data if args is None: - if __debug__: + if __debug__ and DEBUG: log.debug("Scheduling IO coro: %r", (fd, ev, cb)) self.call_soon(cb) else: - if __debug__: + if __debug__ and DEBUG: log.debug("Calling IO callback: %r", (fd, ev, cb, args)) cb(*args) # If no callback registered an event for this fd unregister it @@ -119,10 +131,10 @@ def read(self, n=-1): return res def readline(self): - if __debug__: + if __debug__ and DEBUG: log.debug("StreamReader.readline()") yield IORead(self.s) -# if __debug__: +# if __debug__ and DEBUG: # log.debug("StreamReader.readline(): after IORead: %s", s) while True: res = self.s.readline() @@ -131,7 +143,7 @@ def readline(self): log.warning("Empty read") if not res: yield IOReadDone(self.s) - if __debug__: + if __debug__ and DEBUG: log.debug("StreamReader.readline(): res: %s", res) return res @@ -156,25 +168,25 @@ def awrite(self, buf): # to return immediately (which means it has to buffer all the # data), this method is a coroutine. sz = len(buf) - if __debug__: + if __debug__ and DEBUG: log.debug("StreamWriter.awrite(): spooling %d bytes", sz) while True: res = self.s.write(buf) # If we spooled everything, return immediately if res == sz: - if __debug__: + if __debug__ and DEBUG: log.debug("StreamWriter.awrite(): completed spooling %d bytes", res) return if res is None: res = 0 - if __debug__: + if __debug__ and DEBUG: log.debug("StreamWriter.awrite(): spooled partial %d bytes", res) assert res < sz buf = buf[res:] sz -= res yield IOWrite(self.s) #assert s2.fileno() == self.s.fileno() - if __debug__: + if __debug__ and DEBUG: log.debug("StreamWriter.awrite(): can write more") def aclose(self): @@ -189,7 +201,7 @@ def __repr__(self): def open_connection(host, port): - if __debug__: + if __debug__ and DEBUG: log.debug("open_connection(%s, %s)", host, port) s = _socket.socket() s.setblocking(False) @@ -200,39 +212,42 @@ def open_connection(host, port): except OSError as e: if e.args[0] != errno.EINPROGRESS: raise - if __debug__: + if __debug__ and DEBUG: log.debug("open_connection: After connect") yield IOWrite(s) -# if __debug__: +# if __debug__ and DEBUG: # assert s2.fileno() == s.fileno() - if __debug__: + if __debug__ and DEBUG: log.debug("open_connection: After iowait: %s", s) return StreamReader(s), StreamWriter(s, {}) def start_server(client_coro, host, port, backlog=10): - log.debug("start_server(%s, %s)", host, port) - s = _socket.socket() - s.setblocking(False) - - ai = _socket.getaddrinfo(host, port) - addr = ai[0][4] - s.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1) - s.bind(addr) - s.listen(backlog) - while True: - if __debug__: - log.debug("start_server: Before accept") - yield IORead(s) - if __debug__: - log.debug("start_server: After iowait") - s2, client_addr = s.accept() - s2.setblocking(False) - if __debug__: - log.debug("start_server: After accept: %s", s2) - extra = {"peername": client_addr} - yield client_coro(StreamReader(s2), StreamWriter(s2, extra)) + if DEBUG and __debug__: + log.debug("start_server(%s, %s)", host, port) + ai = _socket.getaddrinfo(host, port, 0, _socket.SOCK_STREAM) + ai = ai[0] + s = _socket.socket(ai[0], ai[1], ai[2]) + try: + s.setblocking(False) + s.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1) + s.bind(ai[-1]) + s.listen(backlog) + while True: + if DEBUG and __debug__: + log.debug("start_server: Before accept") + yield IORead(s) + if DEBUG and __debug__: + log.debug("start_server: After iowait") + s2, client_addr = s.accept() + s2.setblocking(False) + if DEBUG and __debug__: + log.debug("start_server: After accept: %s", s2) + extra = {"peername": client_addr} + yield client_coro(StreamReader(s2), StreamWriter(s2, extra)) + finally: + s.close() import uasyncio.core uasyncio.core._event_loop_class = EpollEventLoop From c4f27607df54ba3f8f769e98a92e508e509f7474 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Wed, 18 Dec 2019 17:23:36 +0100 Subject: [PATCH 45/53] os: fix walk() for versions of micropython using DTTOIF() --- os/os/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/os/os/__init__.py b/os/os/__init__.py index f941e7f54..54aa86359 100644 --- a/os/os/__init__.py +++ b/os/os/__init__.py @@ -148,7 +148,7 @@ def walk(top, topdown=True): files = [] dirs = [] for dirent in ilistdir(top): - mode = dirent[1] << 12 + mode = dirent[1] fname = fsdecode(dirent[0]) if stat_.S_ISDIR(mode): if fname != "." and fname != "..": From 1643e084993b1ad4cb482a5fd5645dc10b0f4351 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Tue, 31 Dec 2019 13:10:42 +0100 Subject: [PATCH 46/53] uasyncio: fix _unregister_fd() Do not remove items from loop.objmap while iterating it. --- uasyncio/uasyncio/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/uasyncio/uasyncio/__init__.py b/uasyncio/uasyncio/__init__.py index c48379172..0496e3129 100644 --- a/uasyncio/uasyncio/__init__.py +++ b/uasyncio/uasyncio/__init__.py @@ -24,7 +24,6 @@ def __init__(self, runq_len=16, waitq_len=16): self.objmap = {} def _unregister_fd(self, fd): - self.objmap.pop(fd, None) try: self.poller.unregister(fd) except OSError as e: From 089eb7a1f6209f74518ca5db072a8705213008cb Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Tue, 31 Dec 2019 13:11:59 +0100 Subject: [PATCH 47/53] uasyncio: fix loop.cancel() Schedule task for execution when pend_throw() returns None (and not False). None is the default value returned by pend_throw() when called the first time. --- uasyncio.core/uasyncio/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uasyncio.core/uasyncio/core.py b/uasyncio.core/uasyncio/core.py index 8103a2e31..e8fa57252 100644 --- a/uasyncio.core/uasyncio/core.py +++ b/uasyncio.core/uasyncio/core.py @@ -76,7 +76,7 @@ def cancel(self, coro, exc = CancelledError): prev = coro.pend_throw(exc) if __debug__ and DEBUG: log.debug("Cancelling %s asynchronously", coro) - if prev is False: + if prev is None: _event_loop.remove_polled_cb(coro) _event_loop.call_soon(coro) except TypeError: From 67028080474dd0501dcfea8466309310deeb15d3 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Mon, 1 Mar 2021 12:16:32 +0000 Subject: [PATCH 48/53] multiprocessing: open Connection() fd in binary mode --- multiprocessing/multiprocessing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/multiprocessing/multiprocessing.py b/multiprocessing/multiprocessing.py index 30c470546..d18cf8b21 100644 --- a/multiprocessing/multiprocessing.py +++ b/multiprocessing/multiprocessing.py @@ -39,7 +39,7 @@ class Connection: def __init__(self, fd): self.fd = fd - self.f = open(fd) + self.f = open(fd, 'b') def __repr__(self): return "" % self.f From e6bae982626a25e3c9dbdfb855bd18a139997773 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Fri, 21 May 2021 07:09:43 +0000 Subject: [PATCH 49/53] fcntl: add fcntl command constants for linux --- fcntl/fcntl.py | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/fcntl/fcntl.py b/fcntl/fcntl.py index 5917840cb..9caad77c3 100644 --- a/fcntl/fcntl.py +++ b/fcntl/fcntl.py @@ -2,6 +2,75 @@ import os import ffilib +DN_ACCESS = 1 +DN_ATTRIB = 32 +DN_CREATE = 4 +DN_DELETE = 8 +DN_MODIFY = 2 +DN_MULTISHOT = 2147483648 +DN_RENAME = 16 +FASYNC = 8192 +FD_CLOEXEC = 1 +F_DUPFD = 0 +F_EXLCK = 4 +F_GETFD = 1 +F_GETFL = 3 +F_GETLEASE = 1025 +F_GETLK = 5 +F_GETLK64 = 5 +F_GETOWN = 9 +F_GETSIG = 11 +F_NOTIFY = 1026 +F_RDLCK = 0 +F_SETFD = 2 +F_SETFL = 4 +F_SETLEASE = 1024 +F_SETLK = 6 +F_SETLK64 = 6 +F_SETLKW = 7 +F_SETLKW64 = 7 +F_SETOWN = 8 +F_SETSIG = 10 +F_SHLCK = 8 +F_UNLCK = 2 +F_WRLCK = 1 +I_ATMARK = 21279 +I_CANPUT = 21282 +I_CKBAND = 21277 +I_FDINSERT = 21264 +I_FIND = 21259 +I_FLUSH = 21253 +I_FLUSHBAND = 21276 +I_GETBAND = 21278 +I_GETCLTIME = 21281 +I_GETSIG = 21258 +I_GRDOPT = 21255 +I_GWROPT = 21268 +I_LINK = 21260 +I_LIST = 21269 +I_LOOK = 21252 +I_NREAD = 21249 +I_PEEK = 21263 +I_PLINK = 21270 +I_POP = 21251 +I_PUNLINK = 21271 +I_PUSH = 21250 +I_RECVFD = 21262 +I_SENDFD = 21265 +I_SETCLTIME = 21280 +I_SETSIG = 21257 +I_SRDOPT = 21254 +I_STR = 21256 +I_SWROPT = 21267 +I_UNLINK = 21261 +LOCK_EX = 2 +LOCK_MAND = 32 +LOCK_NB = 4 +LOCK_READ = 64 +LOCK_RW = 192 +LOCK_SH = 1 +LOCK_UN = 8 +LOCK_WRITE = 128 libc = ffilib.libc() From 3b2ff6d0eb142d58d4e2856f613cd9f9ae415596 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Thu, 27 May 2021 09:22:16 +0000 Subject: [PATCH 50/53] os.linux: add reboot() func --- os.linux/os/linux/__init__.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/os.linux/os/linux/__init__.py b/os.linux/os/linux/__init__.py index 463c0c24b..9e2acbc80 100644 --- a/os.linux/os/linux/__init__.py +++ b/os.linux/os/linux/__init__.py @@ -8,6 +8,25 @@ _mount = libc.func('i', 'mount', 'sssLs') _umount = libc.func('i', 'umount', 's') _setenv = libc.func('i', 'setenv', 'ssi') +_reboot_syscall = libc.func('l', 'syscall', 'liiis') + + +LINUX_REBOOT_MAGIC1 = 0xfee1dead +LINUX_REBOOT_MAGIC2 = 672274793 +LINUX_REBOOT_CMD_RESTART = 0x01234567 +LINUX_REBOOT_CMD_HALT = 0xCDEF0123 +LINUX_REBOOT_CMD_CAD_ON = 0x89ABCDEF +LINUX_REBOOT_CMD_CAD_OFF = 0x00000000 +LINUX_REBOOT_CMD_POWER_OFF = 0x4321FEDC +LINUX_REBOOT_CMD_RESTART2 = 0xA1B2C3D4 +LINUX_REBOOT_CMD_SW_SUSPEND = 0xD000FCE2 +LINUX_REBOOT_CMD_KEXEC = 0x45584543 + + +def reboot(cmd, arg_str): + SYS_reboot = 142 + e = _reboot_syscall(SYS_reboot, LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, arg_str) + os.check_error(e) def mount(source, target, fstype, flags = 0, opts = None): From 656239efb2ab1c15b02ade639db2032e131b981d Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Tue, 22 Feb 2022 13:49:55 +0000 Subject: [PATCH 51/53] datetime: add __new__() to tzinfo class --- datetime/datetime.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/datetime/datetime.py b/datetime/datetime.py index 03b4e4cdd..9ef43605e 100644 --- a/datetime/datetime.py +++ b/datetime/datetime.py @@ -914,6 +914,11 @@ class tzinfo: Subclasses must override the name(), utcoffset() and dst() methods. """ __slots__ = () + + def __new__(self, *args, **kwargs): + """Constructor.""" + return object.__new__(self) + def tzname(self, dt): "datetime -> string name of time zone." raise NotImplementedError("tzinfo subclass must override tzname()") From 6b9b09c02c1bf125ac9d9ac75679d8e5ef126345 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Thu, 2 Feb 2023 11:21:25 +0000 Subject: [PATCH 52/53] os: add constants for WNOHANG, SIGTERM and SIGKILL --- os/os/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/os/os/__init__.py b/os/os/__init__.py index 54aa86359..091d5e592 100644 --- a/os/os/__init__.py +++ b/os/os/__init__.py @@ -21,6 +21,11 @@ O_APPEND = 0o0002000 O_NONBLOCK = 0o0004000 +WNOHANG = 0x00000001 + +SIGKILL = 9 +SIGTERM = 15 + error = OSError name = "posix" sep = "/" From 2a3bcc58624d43d0bf88179666606752de3f7773 Mon Sep 17 00:00:00 2001 From: Delio Brignoli Date: Thu, 2 Feb 2023 11:22:19 +0000 Subject: [PATCH 53/53] os: add setpgid() --- os/os/__init__.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/os/os/__init__.py b/os/os/__init__.py index 091d5e592..b88ee9f9c 100644 --- a/os/os/__init__.py +++ b/os/os/__init__.py @@ -60,6 +60,7 @@ execvp_ = libc.func("i", "execvp", "PP") kill_ = libc.func("i", "kill", "ii") getenv_ = libc.func("s", "getenv", "P") + setpgid_ = libc.func("i", "setpgid", "ii") @@ -237,6 +238,11 @@ def kill(pid, sig): r = kill_(pid, sig) check_error(r) +def setpgid(pid, pgid): + r = setpgid_(pid, pgid) + check_error(r) + return r + def system(command): r = system_(command) check_error(r)