diff --git a/DRIVERS.md b/DRIVERS.md deleted file mode 100644 index 2d7c9fb..0000000 --- a/DRIVERS.md +++ /dev/null @@ -1,188 +0,0 @@ -# 1. Introduction - -Drivers for switches and pushbuttons are provided, plus a retriggerable delay -class. The switch and button drivers support debouncing. The switch driver -provides for running a callback or launching a coroutine (coro) on contact -closure and/or opening. - -The pushbutton driver extends this to support long-press and double-click -events. - -# 2. Modules - - 1. `aledflash.py` Flashes the four Pyboard LED's asynchronously for 10s. The - simplest uasyncio demo. Import it to run. - 2. `aswitch.py` This provides classes for interfacing switches and pushbuttons - and also a software retriggerable delay object. Pushbuttons are a - generalisation of switches providing logical rather than physical status along - with double-clicked and long pressed events. - 3. `asyn.py` Provides synchronisation primitives. Required by `aswitch.py`. - 4. `astests.py` Test/demonstration programs for `aswitch.py`. - -# 3. Module aswitch.py - -This module provides the following classes: - - * `Switch` This supports debouncing a normally open switch connected between - a pin and ground. Can run callbacks or schedule coros on contact closure - and/or opening. - * `Pushbutton` A generalisation of `Switch` to support normally open or - normally closed switches connected to ground or 3V3. Can run callbacks or - schedule coros on double-click or long press events. - * `Delay_ms` A class providing a retriggerable delay measured in ms. Can be - used to run a callback or to schedule a coro. Its state can be tested by any - coro. - -The module `astests.py` provides examples of usage. - -## 3.1 Switch class - -This assumes a normally open switch connected between a pin and ground. The pin -should be initialised as an input with a pullup. Functions may be specified to -run on contact closure or opening. Functions can be callbacks or coroutines; -coroutines will be scheduled for execution and will run asynchronously. -Debouncing is implicit: contact bounce will not cause spurious execution of -these functions. - -Constructor argument (mandatory): - - 1. `pin` The initialised Pin instance. - -Methods: - - 1. `close_func` Args: `func` (mandatory) a function to run on contact - closure. `args` a tuple of arguments for the function (default `()`) - 2. `open_func` Args: `func` (mandatory) a function to run on contact open. - `args` a tuple of arguments for the function (default `()`) - 3. `__call__` Call syntax e.g. `myswitch()` returns the physical debounced - state of the switch i.e. 0 if grounded, 1 if connected to `3V3`. - -Methods 1 and 2 should be called before starting the scheduler. - -Class attribute: - 1. `debounce_ms` Debounce time in ms. Default 50. - -## 3.2 Pushbutton class - -This can support normally open or normally closed switches, connected to `gnd` -(with a pullup) or to `3V3` (with a pull-down). The `Pin` object should be -initialised appropriately. The assumption is that on initialisation the button -is not pressed. - -The Pushbutton class uses logical rather than physical state: a button's state -is considered `True` if pressed, otherwise `False` regardless of its -physical implementation. - -Functions may be specified to run on button press, release, double click or -long press events. Functions can be callbacks or coroutines; coroutines will be -scheduled for execution and will run asynchronously. - -Constructor argument (mandatory): - - 1. `pin` The initialised Pin instance. - -Methods: - - 1. `press_func` Args: `func` (mandatory) a function to run on button push. - `args` a tuple of arguments for the function (default `()`). - 2. `release_func` Args: `func` (mandatory) a function to run on button - release. `args` a tuple of arguments for the function (default `()`). - 3. `long_func` Args: `func` (mandatory) a function to run on long button - push. `args` a tuple of arguments for the function (default `()`). - 4. `double_func` Args: `func` (mandatory) a function to run on double - push. `args` a tuple of arguments for the function (default `()`). - 5. `__call__` Call syntax e.g. `mybutton()` Returns the logical debounced - state of the button (`True` corresponds to pressed). - 6. `rawstate()` Returns the logical instantaneous state of the button. There - is probably no reason to use this. - -Methods 1 - 4 should be called before starting the scheduler. - -Class attributes: - 1. `debounce_ms` Debounce time in ms. Default 50. - 2. `long_press_ms` Threshold time in ms for a long press. Default 1000. - 3. `double_click_ms` Threshold time in ms for a double click. Default 400. - -## 3.3 Delay_ms class - -This implements the software equivalent of a retriggerable monostable or a -watchdog timer. It has an internal boolean `running` state. When instantiated -the `Delay_ms` instance does nothing, with `running` `False` until triggered. -Then `running` becomes `True` and a timer is initiated. This can be prevented -from timing out by triggering it again (with a new timeout duration). So long -as it is triggered before the time specified in the preceeding trigger it will -never time out. - -If it does time out the `running` state will revert to `False`. This can be -interrogated by the object's `running()` method. In addition a function can -be specified to the constructor. This will execute when a timeout occurs. The -function can be a callback or a coroutine; in the latter case it will be -scheduled for execution and will run asynchronously. - -Constructor arguments (defaults in brackets): - - 1. `func` The function to call on timeout (default `None`). - 2. `args` A tuple of arguments for the function (default `()`). - 3. `can_alloc` Boolean, default `True`. See below. - -Methods: - - 1. `trigger` mandatory argument `duration`. A timeout will occur after - `duration` ms unless retriggered. - 2. `stop` No argument. Cancels the timeout, setting the `running` status - `False`. The timer can be restarted by issuing `trigger` again. - 3. `running` No argument. Returns the running status of the object. - -If the `trigger` method is to be called from an interrupt service routine the -`can_alloc` constructor arg should be `False`. This causes the delay object -to use a slightly less efficient mode which avoids RAM allocation when -`trigger` runs. - -# 4. Module astests.py - -This provides demonstration/test functions for the `Switch` and `Pushbutton` -classes. They assume a switch or button wired between pin X1 and gnd. Tests may -be terminated by grounding X2. - -## 4.1 Function test_sw() - -This will flash the red LED on switch closure, and the green LED on opening -and demonstrates the scheduling of coroutines. See section 5 for a discussion -of its behaviour if the switch is toggled rapidly. - -## 4.2 Function test_swcb() - -Demonstrates the use of callbacks to toggle the red and green LED's. - -## 4.3 Function test_btn() - -This will flash the red LED on button push, and the green LED on release. A -long press will flash the blue LED and a double-press the yellow one. - -The note below on race conditions applies. - -## 4.4 Function test_btncb() - -Demonstrates the use of callbacks. Toggles the red, green, yellow and blue -LED's on press, release, double-press and long press respectively. - -# 5 Race conditions - -Note that in the tests such as test_sw() where coroutines are scheduled by -events and the switch is cycled rapidly the LED behaviour may seem surprising. -This is because each time the switch is closed a coro is launched to flash the -red LED; on each open event one is launched for the green LED. With rapid -cycling a new coro instance will commence while one is still running against -the same LED. This type of conflict over a resource is known as a race -condition: in this instance it leads to the LED behaving erratically. - -This is a hazard of asynchronous programming. In some situations it is -desirable to launch a new instance on each button press or switch closure, even -if other instances are still incomplete. In other cases it can lead to a race -condition, leading to the need to code an interlock to ensure that the desired -behaviour occurs. The programmer must define the desired behaviour. - -In the case of this test program it might be to ignore events while a similar -one is running, or to extend the timer to prolong the LED illumination. -Alternatively a subsequent button press might be required to terminate the -illumination. The "right" behaviour is application dependent. diff --git a/FASTPOLL.md b/FASTPOLL.md deleted file mode 100644 index 28cb42a..0000000 --- a/FASTPOLL.md +++ /dev/null @@ -1,446 +0,0 @@ -# A modified version of uasyncio - -This document describes a "priority" version of uasyncio. Its purpose is to -provide a simple priority mechanism to facilitate the design of applications -with improved millisecond-level timing accuracy and reduced scheduling latency. - -V0.3 Feb 2018. A single module designed to work with the official `uasyncio` -library. This requires `uasyncio` V2.0 which requires firmware dated -22nd Feb 2018 or later. - -**API CHANGES** -V2.0 of `uasyncio` changed the arguments to `get_event_loop` so this version -has corresponding changes. See [section 3](./FASTPOLL.md#3-a-solution). - -###### [Main README](./README.md) - -# Contents - - 1. [Installation](./FASTPOLL.md#1-installation) - - 1.1 [Benchmarks](./FASTPOLL.md#11-benchmarks) Benchmark and demo programs. - - 2. [Rationale](./FASTPOLL.md#2-rationale) - - 2.1 [Latency](./FASTPOLL.md#21-latency) - - 2.2 [Timing accuracy](./FASTPOLL.md#22-timing-accuracy) - - 2.3 [Polling in uasyncio](./FASTPOLL.md#23-polling-in-usayncio) - - 2.4 [Background](./FASTPOLL.md#24-background) - - 3. [A solution](./FASTPOLL.md#3-a-solution) - - 3.1 [Low priority yield](./FASTPOLL.md#31-low-priority-yield) - - 3.1.1 [Task Cancellation and Timeouts](./FASTPOLL.md#311-task-cancellation-and-timeouts) - - 3.2 [Low priority callbacks](./FASTPOLL.md#32-low-priority-callbacks) - - 3.3 [High priority tasks](./FASTPOLL.md#33-high-priority-tasks) - - 4. [The asyn library](./FASTPOLL.md#4-the-asyn-library) - - 5. [Heartbeat](./FASTPOLL.md#5-heartbeat) - - 6. [ESP Platforms](./FASTPOLL.md#6-esp-platforms) - -# 1. Installation - -Install and test uasyncio on the target hardware. Copy `asyncio_priority.py` -to the target. Users of previous versions should update any of the benchmark -programs which are to be run. - -In MicroPython 1.9 `uasyncio` was implemented as a frozen module on the -ESP8266. This version is not compatible with `asyncio_priority.py`. Given the -limited resources of the ESP8266 `uasyncio` and `uasyncio_priority` should be -implemented as frozen bytecode. See -[ESP Platforms](./FASTPOLL.md#6-esp-platforms) for general comments on the -suitability of ESP platforms for systems requiring fast response. - -## 1.1 Benchmarks - -The benchmarks directory contains files demonstrating the performance gains -offered by prioritisation. They also offer illustrations of the use of these -features. Documentation is in the code. - - * `benchmarks/latency.py` Shows the effect on latency with and without low - priority usage. - * `benchmarks/timing.py` Shows the effect on timing with and without low - priority usage. - * ``benchmarks/rate.py` Shows the frequency with which the official uasyncio - schedules minimal coroutines (coros). - * `benchmarks/rate_p.py` As above, but measures the overhead of the priority - extension. - * `benchmarks/call_lp.py` Demos low priority callbacks. - * `benchmarks/overdue.py` Demo of maximum overdue feature. - * `benchmarks/priority.py` Demo of high priority coro. - * `priority_test.py` Cancellation of low priority coros. - -With the exceptions of call_lp and priority.py, benchmarks can be run against -the official and priority versions of usayncio. - -# 2. Rationale - -Applications may need to poll a hardware device or a flag set by an interrupt -service routine (ISR). An overrun may occur if the scheduling of the polling -coroutine (coro) is subject to excessive latency. - -Further, a coro issuing `await asyncio.sleep_ms(t)` may block for much longer -than `t` depending on the number and design of other coros which are pending -execution. - -This variant mitigates this by enabling coros to yield control in a way which -prevents them from competing with coros which are ready for execution. Coros -which have yielded in a low priority fashion will not be scheduled until all -"normal" coros are waiting on a nonzero timeout. The benchmarks show that the -improvement can exceed two orders of magnitude. - -It also provides for fast scheduling where a user supplied callback is tested -on every iteration of the scheduler. This minimises latency at some cost to -overall performance. - -## 2.1 Latency - -Coroutines in uasyncio which are pending execution are scheduled in a "fair" -round-robin fashion. Consider these functions: - -```python -async def foo(): - while True: - yield - # code which takes 4ms to complete - -async def handle_isr(): - global isr_has_run - while True: - if isr_has_run: - # read and process data - isr_has_run = False - yield -``` - -Assume a hardware interrupt handler sets the `isr_has_run` flag, and that we -have ten instances of `foo()` and one instance of `handle_isr()`. When -`handle_isr()` issues `yield`, its execution will pause for 40ms while each -instance of `foo()` is scheduled and performs one iteration. This may be -unacceptable: it may be necessary to poll and respond to the flag at a rate -adequate to avoid overruns. - -This version provides a mechanism for reducing this latency by enabling the -`foo()` instances to yield in a low priority manner. In the case where all -coros other than `handle_isr()` are low priority the latency is reduced to -250μs - a figure close to the inherent latency of uasyncio. - -The benchmark latency.py demonstrates this. Documentation is in the code; it -can be run against both official and priority versions. This measures scheduler -latency. Maximum application latency, measured relative to the incidence of an -asynchronous event, will be 250μs plus the worst-case delay between yields of -any one competing task. - -Where a coro must respond rapidly to an event, the scheduler can test a user -supplied callback on every iteration. See -[section 3.3](./FASTPOLL.md#33-high-priority-tasks). - -###### [Jump to Contents](./FASTPOLL.md#contents) - -## 2.2 Timing accuracy - -Consider these functions: - -```python -async def foo(): - while True: - await asyncio.sleep(0) - # code which takes 4ms to complete - -async def fast(): - while True: - # Code omitted - await asyncio.sleep_ms(15) - # Code omitted -``` - -Again assume ten instances of `foo()` and one of `fast()`. When `fast()` -issues `await asyncio.sleep_ms(15)` it will not see a 15ms delay. During the -15ms period `foo()` instances will be scheduled. When the delay elapses, -`fast()` will compete with pending `foo()` instances. - -This results in variable delays up to 55ms (10 tasks * 4ms + 15ms). The -priority version can improve this substantially. The degree of improvement -is dependent on other coros regularly yielding with low priority: if any coro -hogs execution for a substantial period that will inevitably contribute to -latency in a cooperative system. - -In the somewhat contrived example of 200 tasks each issuing a low priority -yield every 2ms, a 10ms nominal delay produced times in the range 9.8 to 10.8ms -contrasing to 407.9 to 410.9ms using normal scheduling. - -The benchmark timing.py demonstrates this. Documentation is in the code. It can -be run against the official and priority versions. - -###### [Jump to Contents](./FASTPOLL.md#contents) - -## 2.3 Polling in uasyncio - -The asyncio library provides various mechanisms for polling a device or flag. -Aside from a polling loop these include awaitable classes and asynchronous -iterators. It is important to appreciate that these mechanisms have the same -drawback as the polling loop: uasyncio schedules tasks by placing them on a -`utimeq` queue. This is a queue sorted by time-to-run. Tasks which are ready -to run are scheduled in "fair" round-robin fashion. This means that a task -waiting on a zero delay will be rescheduled only after the scheduling of all -other such tasks (including timed waits whose time has elapsed). - -A partial solution is to design the competing `foo()` tasks to minimise the -delay between yields to the scheduler. This can be difficult or impossible. -Further it is inefficient to reduce the delay much below 2ms as the scheduler -takes ~200μs to schedule a task. - -Practical cases exist where the `foo()` tasks are not time-critical: in such -cases the performance of time critical tasks may be enhanced by enabling -`foo()` to submit for rescheduling in a way which does not compete with tasks -requiring a fast response. In essence "slow" operations tolerate longer latency -and longer time delays so that fast operations meet their performance targets. -Examples are: - - * User interface code. A system with ten pushbuttons might have a coro running - on each. A GUI touch detector coro needs to check a touch against sequence of - objects. Both may tolerate 100ms of latency before users notice any lag. - * Networking code: a latency of 100ms may be dwarfed by that of the network. - * Mathematical code: there are cases where time consuming calculations may - take place which are tolerant of delays. Examples are statistical analysis, - sensor fusion and astronomical calculations. - * Data logging. - -###### [Jump to Contents](./FASTPOLL.md#contents) - -## 2.4 Background - -This has been discussed in detail -[in issue 2989](https://github.com/micropython/micropython/issues/2989). - -A further discussion on the subject of using the ioread mechanism to achieve -fast scheduling took place -[in issue 2664](https://github.com/micropython/micropython/issues/2664). The final -comment to this issue suggests that it may never be done for drivers written in -Python. While a driver written in C is an undoubted solution, the purpose of -MicroPython is arguably to facilitate coding in Python where possible. - -It seems there is no plan to incorporate a priority mechanism in the official -verion of uasyncio but I believe it confers significant advantages for the -reasons discussed above. Hence this variant. - -###### [Jump to Contents](./FASTPOLL.md#contents) - -# 3. A solution - -The module enables coroutines to yield to the scheduler with three levels of -priority, one with higher and one with lower priority than standard. It -provides a replacement for `uasyncio.get_event_loop()` enabling the queue -sizes to be set. - -`aysncio_priority.get_event_loop(runq_len, waitq_len, lpqlen)` -Arguments: - 1. `runq_len` Length of normal queue. Default 16 tasks. - 2. `waitq_len` Length of wait queue. Default 16. - 3. `lpqlen` Length of low priority queue. Default 16. - -The low priority solution is based on the notion of "after" implying a time -delay which can be expected to be less precise than the asyncio standard calls. -The optional high priority mechanism adds "when" implying scheduling when a -condition is met. The library adds the following awaitable instances: - - * `after(t)` Low priority version of `sleep(t)`. - * `after_ms(t)` LP version of `sleep_ms(t)`. - * `when(callback)` Re-schedules when the callback returns True. - -It adds the following event loop methods: - - * `loop.call_after(t, callback, *args)` - * `loop.call_after_ms(t, callback, *args)` - * `loop.max_overdue_ms(t=None)` This sets the maximum time a low priority task - will wait before being scheduled. A value of 0 corresponds to no limit. The - default arg `None` leaves the period unchanged. Always returns the period - value. - -See [Low priority callbacks](./FASTPOLL.md#32-low-priority-callbacks) - -## 3.1 Low priority yield - -Consider this code fragment: - -```python -import asyncio_priority as asyncio -loop = asyncio.get_event_loop() - -async def foo(): - while True: - # Do something - await asyncio.after(1.5) # Wait a minimum of 1.5s - # code - await asyncio.after_ms(20) # Wait a minimum of 20ms -``` - -These `await` statements cause the coro to suspend execution for the minimum -time specified. Low priority coros run in a mutually "fair" round-robin fashion. -By default the coro will only be rescheduled when all "normal" coros are waiting -on a nonzero time delay. A "normal" coro is one that has yielded by any other -means. - -This behaviour can be overridden to limit the degree to which they can become -overdue. For the reasoning behind this consider this code: - -```python -import asyncio_priority as asyncio - -async def foo(): - while True: - # Do something - await asyncio.after(0) -``` - -By default a coro yielding in this way will be re-scheduled only when there are -no "normal" coros ready for execution i.e. when all are waiting on a nonzero -delay. The implication of having this degree of control is that if a coro -issues: - -```python -while True: - await asyncio.sleep(0) - # Do something which does not yield to the scheduler -``` - -low priority tasks will never be executed. Normal coros must sometimes wait on -a non-zero delay to enable the low priority ones to be scheduled. This is -analogous to running an infinite loop without yielding. - -This behaviour can be modified by issuing: - -```python -loop = asyncio.get_event_loop(max_overdue_ms = 1000) -``` - -In this instance a task which has yielded in a low priority manner will be -rescheduled in the presence of pending "normal" tasks if they become overdue by -more than 1s. - -### 3.1.1 Task Cancellation and Timeouts - -Tasks which yield in a low priority manner may be subject to timeouts or be -cancelled in the same way as normal tasks. See [Task cancellation](./TUTORIAL.md#36-task-cancellation) -and [Coroutines with timeouts](./TUTORIAL.md#44-coroutines-with-timeouts). - -###### [Jump to Contents](./FASTPOLL.md#contents) - -## 3.2 Low priority callbacks - -The following `EventLoop` methods enable callback functions to be scheduled -to run when all normal coros are waiting on a delay or when `max_overdue_ms` -has elapsed: - -`call_after` Schedule a callback with low priority. Positional args: - 1. `delay` Minimum delay in seconds. May be a float or integer. - 2. `callback` The callback to run. - 3. `*args` Optional comma-separated positional args for the callback. - -The delay specifies a minimum period before the callback will run and may have -a value of 0. The period may be extended depending on other high and low -priority tasks which are pending execution. - -A simple demo of this is `benchmarks/call_lp.py`. Documentation is in the -code. - -`call_after_ms(delay, callback, *args)` Call with low priority. Positional -args: - 1. `delay` Integer. Minimum delay in millisecs before callback runs. - 2. `callback` The callback to run. - 3. `*args` Optional positional args for the callback. - -###### [Jump to Contents](./FASTPOLL.md#contents) - -## 3.3 High priority tasks - -Where latency must be reduced to the absolute minimum, a condition may be -tested on every iteration of the scheduler. This involves yielding a callback -function which returns a boolean. When a coro yields to the scheduler, each -pending callback is run until one returns `True` when that task is run. If -there are no pending callbacks which return `True` it will schedule other -tasks. - -This machanism should be used only if the application demands it. Caution is -required since running the callbacks inevitably impacts the performance of -the scheduler. To minimise this callbacks should be short (typically returning -a boolean flag set by a hard interrupt handler) and the number of high priority -tasks should be small. - -The benchmark priority.py demonstrates and tests this mechanism. - -To yield at high priority issue - -```python -import asyncio_priority as asyncio - -async def foo(): - while True: - await asyncio.when(callback) # Pauses until callback returns True - # Code omitted - typically queue received data for processing - # by another coro -``` - -Pending callbacks are stored in a list which grows dynamically. An application -will typically have only one or two coroutines which wait on callbacks so the -list will never grow beyond this length. - -In the current implementation the callback takes no arguments. However it can -be a bound method, enabling it to access class and instance variables. - -No means of scheduling a high priority callback analogous to `call_soon` is -provided. If such a mechanism existed, the cb would run immediately the coro -yielded, with the coro being rescheduled once the cb returned `True`. This -behaviour can be achieved more efficiently by simply calling the function. - -###### [Jump to Contents](./FASTPOLL.md#contents) - -## 4. The asyn library - -This now uses the low priority (LP) mechanism if available and where -appropriate. It is employed as follows: - - * `Lock` class. Uses normal scheduling on the basis that locks should be - held for brief periods only. - * `Event` class. An optional boolean constructor arg, defaulting `False`, - specifies LP scheduling (if available). A `True` value provides for cases - where response to an event is not time-critical. - * `Barrier`, `Semaphore` and `BoundedSemaphore` classes use LP - scheduling if available. This is on the basis that typical code may wait on - these objects for some time. - -A coro waiting on a `Lock` or an `Event` which uses normal scheduling will -therefore prevent the execution of LP tasks for the duration. - -###### [Jump to Contents](./FASTPOLL.md#contents) - -# 5. Heartbeat - -I find it useful to run a "heartbeat" coro in development as a simple check -for code which has failed to yield. If the low priority mechanism is used this -can be extended to check that no coro loops indefinitely on a zero delay. - -```python -async def heartbeat(led): - while True: - led.toggle() - await after_ms(500) # Will hang while a coro loops on a zero delay -``` - -###### [Jump to Contents](./FASTPOLL.md#contents) - -# 6. ESP Platforms - -It should be noted that the response of the ESP8266 to hardware interrupts is -remarkably slow. This also appears to apply to ESP32 platforms. Consider -whether a response in the high hundreds of μs meets project requirements; also -whether a priority mechanism is needed on hardware with such poor realtime -performance. diff --git a/HD44780/alcdtest.py b/HD44780/alcdtest.py deleted file mode 100644 index 2899c31..0000000 --- a/HD44780/alcdtest.py +++ /dev/null @@ -1,19 +0,0 @@ -# alcdtest.py Test program for LCD class -# Author: Peter Hinch -# Copyright Peter Hinch 2017 Released under the MIT license -# runs for 20s -import uasyncio as asyncio -import utime as time -from alcd import LCD, PINLIST - -lcd = LCD(PINLIST, cols = 16) - -async def lcd_task(): - for secs in range(20, -1, -1): - lcd[0] = 'MicroPython {}'.format(secs) - lcd[1] = "{:11d}uS".format(time.ticks_us()) - await asyncio.sleep(1) - - -loop = asyncio.get_event_loop() -loop.run_until_complete(lcd_task()) diff --git a/PRIMITIVES.md b/PRIMITIVES.md deleted file mode 100644 index 1ebac96..0000000 --- a/PRIMITIVES.md +++ /dev/null @@ -1,690 +0,0 @@ -# 1. The asyn.py library - -This provides some simple synchronisation primitives, together with an API for -task monitoring and cancellation. Task cancellation requires usayncio V 1.7.1 -or higher. At the time of writing (7th Jan 2018) it requires a daily build of -MicroPython firmware or one built from source. - -The library is too large to run on the ESP8266 except as frozen bytecode. An -obvious workround is to produce a version with unused primitives removed. - -###### [Main README](./README.md) - -# Contents - - 1. [The asyn.py library](./PRIMITIVES.md#1-the-asyn.py-library) - - 1.1 [Synchronisation Primitives](./PRIMITIVES.md#11-synchronisation-primitives) - - 1.2 [Task control and monitoring](./PRIMITIVES.md#12-task-control-and-monitoring) - - 2. [Modules](./PRIMITIVES.md#2-modules) - - 3 [Synchronisation Primitives](./PRIMITIVES.md#3-synchronisation-primitives) - - 3.1 [Function launch](./PRIMITIVES.md#31-function-launch) - - 3.2 [Class Lock](./PRIMITIVES.md#32-class-lock) - - 3.2.1 [Definition](./PRIMITIVES.md#321-definition) - - 3.3 [Class Event](./PRIMITIVES.md#33-class-event) - - 3.3.1 [Definition](./PRIMITIVES.md#331-definition) - - 3.4 [Class Barrier](./PRIMITIVES.md#34-class-barrier) - - 3.5 [Class Semaphore](./PRIMITIVES.md#35-class-semaphore) - - 3.5.1 [Class BoundedSemaphore](./PRIMITIVES.md#351-class-boundedsemaphore) - - 3.6 [Class Condition](./PRIMITIVES.md#36-class-condition) - - 3.6.1 [Definition](./PRIMITIVES.md#361-definition) - - 3.7 [Class Gather](./PRIMITIVES.md#37-class-gather) - - 3.7.1 [Definition](./PRIMITIVES.md#371-definition) - - 4. [Task Cancellation](./PRIMITIVES.md#4-task-cancellation) - - 4.1 [Coro sleep](./PRIMITIVES.md#41-coro-sleep) - - 4.2 [Class Cancellable](./PRIMITIVES.md#42-class-cancellable) - - 4.2.1 [Groups](./PRIMITIVES.md#421-groups) - - 4.2.2 [Custom cleanup](./PRIMITIVES.md#422-custom-cleanup) - - 4.3 [Class NamedTask](./PRIMITIVES.md#43-class-namedtask) - - 4.3.1 [Latency and Barrier objects](./PRIMITIVES.md#431-latency-and-barrier-objects) - - 4.3.2 [Custom cleanup](./PRIMITIVES.md#432-custom-cleanup) - - 4.3.3 [Changes](./PRIMITIVES.md#433-changes) - -## 1.1 Synchronisation Primitives - -There is often a need to provide synchronisation between coros. A common -example is to avoid what are known as "race conditions" where multiple coros -compete to access a single resource. An example is provided in the `aswitch.py` -program and discussed in [the docs](./DRIVERS.md). Another hazard is the "deadly -embrace" where two coros wait on the other's completion. - -In simple applications these are often addressed with global flags. A more -elegant approach is to use synchronisation primitives. The module `asyn.py` -offers "micro" implementations of `Lock`, `Event`, `Barrier`, `Semaphore` and -`Condition` primitives, and a lightweight implementation of `asyncio.gather`. - -Another synchronisation issue arises with producer and consumer coros. The -producer generates data which the consumer uses. Asyncio provides the `Queue` -object. The producer puts data onto the queue while the consumer waits for its -arrival (with other coros getting scheduled for the duration). The `Queue` -guarantees that items are removed in the order in which they were received. As -this is a part of the uasyncio library its use is described in the [tutorial](./TUTORIAL.md). - -###### [Contents](./PRIMITIVES.md#contents) - -## 1.2 Task control and monitoring - -`uasyncio` does not implement the `Task` and `Future` classes of `asyncio`. -Instead it uses a 'micro' lightweight means of task cancellation. The `asyn.py` -module provides an API to simplify its use and to check on the running status -of coroutines which are subject to cancellation. - -# 2. Modules - -The following modules are provided: - * `asyn.py` The main library. - * `asyntest.py` Test/demo programs for the primitives. - * `asyn_demos.py` Minimal "get started" task cancellation demos. - * `cantest.py` Task cancellation tests. Examples of intercepting `StopTask`. - Intended to verify the library against future `uasyncio` changes. - -Import `asyn_demos.py` or `cantest.py` for a list of available tests. - -###### [Contents](./PRIMITIVES.md#contents) - -# 3. Synchronisation Primitives - -The primitives are intended for use only with `uasyncio`. They are `micro` in -design. They are not thread safe and hence are incompatible with the `_thread` -module and with interrupt handlers. - -## 3.1 Function launch - -This function accepts a function or coro as an argument, along with a tuple of -args. If the function is a callback it is executed with the supplied argumets. -If it is a coro, it is scheduled for execution. - -args: - * `func` Mandatory. a function or coro. These are provided 'as-is' i.e. not - using function call syntax. - * `tup_args` Optional. A tuple of arguments, default `()`. The args are - upacked when provided to the function. - -## 3.2 Class Lock - -This has now been superseded by the more efficient official version. - -At time of writing (18th Dec 2017) the official `Lock` class is not complete. -If a coro is subject to a [timeout](./TUTORIAL.md#44-coroutines-with-timeouts) -and the timeout is triggered while it is waiting on a lock, the timeout will be -ineffective. It will not receive the `TimeoutError` until it has acquired the -lock. - -The implementation in `asyn.py` avoids this limitation but at the cost of lower -efficiency. The remainder of this section describes this version. - -A lock guarantees unique access to a shared resource. The preferred way to use it -is via an asynchronous context manager. In the following code sample a `Lock` -instance `lock` has been created and is passed to all coros wishing to access -the shared resource. Each coro issues the following: - -```python -async def bar(lock): - async with lock: - # Access resource -``` - -While the coro `bar` is accessing the resource, other coros will pause at the -`async with lock` statement until the context manager in `bar()` is complete. - -Note that MicroPython has a bug in its implementation of asynchronous context -managers: a `return` statement should not be issued in the `async with` block. -See note at end of [this section](./TUTORIAL.md#43-asynchronous-context-managers). - -### 3.2.1 Definition - -Constructor: Optional argument `delay_ms` default 0. Sets a delay between -attempts to acquire the lock. In applications with coros needing frequent -scheduling a nonzero value will facilitate this at the expense of latency. -Methods: - - * `locked` No args. Returns `True` if locked. - * `release` No args. Releases the lock. - * `acquire` No args. Coro which pauses until the lock has been acquired. Use - by executing `await lock.acquire()`. - -###### [Contents](./PRIMITIVES.md#contents) - -## 3.3 Class Event - -This provides a way for one or more coros to pause until another one flags them -to continue. An `Event` object is instantiated and passed to all coros using -it. Coros waiting on the event issue `await event`. Execution pauses -until a coro issues `event.set()`. `event.clear()` must then be issued. An -optional data argument may be passed to `event.set()` and retrieved by -`event.value()`. - -In the usual case where a single coro is awaiting the event this can be done -immediately after it is received: - -```python -async def eventwait(event): - await event - event.clear() -``` - -The coro raising the event may need to check that it has been serviced: - -```python -async def foo(event): - while True: - # Acquire data from somewhere - while event.is_set(): - await asyncio.sleep(1) # Wait for coro to respond - event.set() -``` - -If multiple coros are to wait on a single event, consider using a `Barrier` -object described below. This is because the coro which raised the event has no -way to determine whether all others have received it; determining when to clear -it down requires further synchronisation. One way to achieve this is with an -acknowledge event: - -```python -async def eventwait(event, ack_event): - await event - ack_event.set() -``` - -Example of this are in `event_test` and `ack_test` in asyntest.py. - -### 3.3.1 Definition - -Constructor: takes one optional boolean argument, defaulting False. - * `lp` If `True` and the experimental low priority core.py is installed, - low priority scheduling will be used while awaiting the event. If the standard - version of uasyncio is installed the arg will have no effect. - -Synchronous Methods: - * `set` Initiates the event. Optional arg `data`: may be of any type, - sets the event's value. Default `None`. May be called in an interrupt context. - * `clear` No args. Clears the event, sets the value to `None`. - * `is_set` No args. Returns `True` if the event is set. - * `value` No args. Returns the value passed to `set`. - -The optional data value may be used to compensate for the latency in awaiting -the event by passing `loop.time()`. - -###### [Contents](./PRIMITIVES.md#contents) - -## 3.4 Class Barrier - -This enables multiple coros to rendezvous at a particular point. For example -producer and consumer coros can synchronise at a point where the producer has -data available and the consumer is ready to use it. At that point in time the -`Barrier` can optionally run a callback before releasing the barrier and -allowing all waiting coros to continue. - -Constructor. -Mandatory arg: -`participants` The number of coros which will use the barrier. -Optional args: -`func` Callback to run. Default `None`. -`args` Tuple of args for the callback. Default `()`. - -Public synchronous methods: - * `busy` No args. Returns `True` if at least one coro is waiting on the - barrier, or if at least one non-waiting coro has not triggered it. - * `trigger` No args. The barrier records that the coro has passed the critical - point. Returns "immediately". - -The callback can be a function or a coro. In most applications a function will -be used as this can be guaranteed to run to completion beore the barrier is -released. - -Participant coros issue `await my_barrier` whereupon execution pauses until all -other participants are also waiting on it. At this point any callback will run -and then each participant will re-commence execution. See `barrier_test` and -`semaphore_test` in `asyntest.py` for example usage. - -A special case of `Barrier` usage is where some coros are allowed to pass the -barrier, registering the fact that they have done so. At least one coro must -wait on the barrier. That coro will pause until all non-waiting coros have -passed the barrier, and all waiting coros have reached it. At that point all -waiting coros will resume. A non-waiting coro issues `barrier.trigger()` to -indicate that is has passed the critical point. - -This mechanism is used in the `Cancellable` and `NamedTask` classes to register -the fact that a coro has responded to cancellation. Using a non-waiting barrier -in a looping construct carries a fairly obvious hazard and is normally to be -avoided. - -###### [Contents](./PRIMITIVES.md#contents) - -## 3.5 Class Semaphore - -A semaphore limits the number of coros which can access a resource. It can be -used to limit the number of instances of a particular coro which can run -concurrently. It performs this using an access counter which is initialised by -the constructor and decremented each time a coro acquires the semaphore. - -Constructor: Optional arg `value` default 1. Number of permitted concurrent -accesses. - -Synchronous method: - * `release` No args. Increments the access counter. - -Asynchronous method: - * `acquire` No args. If the access counter is greater than 0, decrements it - and terminates. Otherwise waits for it to become greater than 0 before - decrementing it and terminating. - -The easiest way to use it is with a context manager: - -```python -async def foo(sema): - async with sema: - # Limited access here -``` - -There is a difference between a `Semaphore` and a `Lock`. A `Lock` -instance is owned by the coro which locked it: only that coro can release it. A -`Semaphore` can be released by any coro which acquired it. - -### 3.5.1 Class BoundedSemaphore - -This works identically to the `Semaphore` class except that if the `release` -method causes the access counter to exceed its initial value, a `ValueError` -is raised. - -###### [Contents](./PRIMITIVES.md#contents) - -## 3.6 Class Condition - -A `Condition` instance enables controlled access to a shared resource. In -typical applications a number of tasks wait for the resource to be available. -Once this occurs access can be controlled both by the number of tasks and by -means of a `Lock`. - -A task waiting on a `Condition` instance will pause until another task issues -`condition.notify(n)` or `condition.notify_all()`. If the number of tasks -waiting on the condition exceeds `n`, only `n` tasks will resume. A `Condition` -instance has a `Lock` as a member. A task will only resume when it has acquired -the lock. User code may release the lock as required by the application logic. - -Typical use of the class is in a synchronous context manager: - -```python - with await cond: - cond.notify(2) # Notify 2 tasks -``` - -```python - with await cond: - await cond.wait() - # Has been notified and has access to the locked resource - # Resource has been unocked by context manager -``` -### 3.6.1 Definition - -Constructor: Optional arg `lock=None`. A `Lock` instance may be specified, -otherwise the `Condition` instantiates its own. - -Synchronous methods: - * `locked` No args. Returns the state of the `Lock` instance. - * `release` No args. Release the `Lock`. A `RuntimeError` will occur if the - `Lock` is not locked. - * `notify` Arg `n=1`. Notify `n` tasks. The `Lock` must be acquired before - issuing `notify` otherwise a `RuntimeError` will occur. - * `notify_all` No args. Notify all tasks. The `Lock` must be acquired before - issuing `notify_all` otherwise a `RuntimeError` will occur. - -Asynchronous methods: - * `acquire` No args. Pause until the `Lock` is acquired. - * `wait` No args. Await notification and the `Lock`. The `Lock` must be - acquired before issuing `wait` otherwise a `RuntimeError` will occur. The - sequence is as follows: - The `Lock` is released. - The task pauses until another task issues `notify`. - It continues to pause until the `Lock` has been re-acquired when execution - resumes. - * `wait_for` Arg: `predicate` a callback returning a `bool`. The task pauses - until a notification is received and an immediate test of `predicate()` - returns `True`. - -###### [Contents](./PRIMITIVES.md#contents) - -## 3.7 Class Gather - -This aims to replicate some of the functionality of `asyncio.gather` in a -'micro' form. The user creates a list of `Gatherable` tasks and then awaits a -`Gather` object. When the last task to complete terminates, this will return a -list of results returned by the tasks. Timeouts may be assigned to individual -tasks. - -```python -async def bar(x, y, rats): # Example coro: note arg passing - await asyncio.sleep(1) - return x * y * rats - -gatherables = [asyn.Gatherable(foo, n) for n in range(4)] -gatherables.append(asyn.Gatherable(bar, 7, 8, rats=77)) -gatherables.append(asyn.Gatherable(rats, 0, timeout=5)) -res = await asyn.Gather(gatherables) -``` - -The result `res` is a 6 element list containing the result of each of the 6 -coros. These are ordered by the position of the coro in the `gatherables` list. -This is as per `asyncio.gather()`. - -See `asyntest.py` function `gather_test()`. - -### 3.7.1 Definition - -The `Gatherable` class has no user methods. The constructor takes a coro by -name followed by any positional or keyword arguments for the coro. If an arg -`timeout` is provided it should have an integer or float value: this is taken -to be the timeout for the coro in seconds. Note that timeout is subject to the -latency discussed in [Coroutines with timeouts](./TUTORIAL.md#44-coroutines-with-timeouts). -A way to reduce this is to use `asyn.sleep()` in such coros. - -The `Gather` class has no user methods. The constructor takes one mandatory -arg: a list of `Gatherable` instances. - -`Gather` instances are awaitable. An `await` on an instance will terminate when -the last member task completes or times out. It returns a list whose length -matches the length of the list of `Gatherable` instances. Each element contains -the return value of the corresponding `Gatherable` instance. Each return value -may be of any type. - -###### [Contents](./PRIMITIVES.md#contents) - -# 4. Task Cancellation - -This has been under active development. Existing users please see -[Changes](./PRIMITIVES.md#433-changes) for recent API changes. - -`uasyncio` now provides a `cancel(coro)` function. This works by throwing an -exception to the coro in a special way: cancellation is deferred until the coro -is next scheduled. This mechanism works with nested coros. However there is a -limitation. If a coro issues `await uasyncio.sleep(secs)` or -`await uasyncio.sleep_ms(ms)` scheduling will not occur until the time has -elapsed. This introduces latency into cancellation which matters in some -use-cases. Other potential sources of latency take the form of slow code. -`uasyncio` has no mechanism for verifying when cancellation has actually -occurred. The `asyn.py` library provides solutions in the form of two classes. - -These are `Cancellable` and `NamedTask`. The `Cancellable` class allows the -creation of named groups of tasks which may be cancelled as a group; this -awaits completion of cancellation of all tasks in the group. - -The `NamedTask` class enables a task to be associated with a user supplied -name, enabling it to be cancelled and its status checked. Cancellation -optionally awaits confirmation of completion. - -For cases where cancellation latency is of concern `asyn.py` offers a `sleep` -function which provides a delay with reduced latency. - -## 4.1 Coro sleep - -Pause for a period as per `uasyncio.sleep` but with reduced exception handling -latency. - -The asynchronous `sleep` function takes two args: - * `t` Mandatory. Time in seconds. May be integer or float. - * `granularity` Optional integer >= 0, units ms. Default 100ms. Defines the - maximum latency. Small values reduce latency at cost of increased scheduler - workload. - -This repeatedly issues `uasyncio.sleep_ms(t)` where t <= `granularity`. - -## 4.2 Class Cancellable - -This class provides for cancellation of one or more tasks where it is necesary -to await confirmation that cancellation is complete. `Cancellable` instances -are anonymous coros which are members of a named group. They are capable of -being cancelled as a group. A typical use-case might take this form: - -```python -async def comms(): # Perform some communications task - while True: - await initialise_link() - try: - await do_communications() # Launches Cancellable tasks - except CommsError: - await Cancellable.cancel_all() - # All sub-tasks are now known to be stopped. They can be re-started - # with known initial state on next pass. -``` - -A `Cancellable` task is declared with the `@cancellable` decorator: - -```python -@cancellable -async def print_nums(num): - while True: - print(num) - num += 1 - await sleep(1) # asyn.sleep() allows fast response to exception -``` - -Positional or keyword arguments for the task are passed to the `Cancellable` -constructor as below. Note that the coro is passed not using function call -syntax. `Cancellable` tasks may be awaited or placed on the event loop: - -```python -await Cancellable(print_nums, 5) # single arg to print_nums. -loop = asyncio.get_event_loop() -loop.create_task(Cancellable(print_nums, 42)()) # Note () syntax. -``` - -The following will cancel any tasks still running, pausing until cancellation -is complete: - -```python -await Cancellable.cancel_all() -``` - -Constructor mandatory args: - * `task` A coro passed by name i.e. not using function call syntax. - -Constructor optional positional args: - * Any further positional args are passed to the coro. - -Constructor optional keyword args: - * `group` Any Python object, typically integer or string. Default 0. See - Groups below. - * Further keyword args are passed to the coro. - -Public class method: - * `cancel_all` Asynchronous. - Optional args `group` default 0, `nowait` default `False`. - The `nowait` arg is for use by the `NamedTask` derived class. The default - value is assumed below. - The method cancels all instances in the specified group and awaits completion. - See Groups below. - The `cancel_all` method will complete when all `Cancellable` instances have - been cancelled or terminated naturally before `cancel_all` was launched. - Each coro will receive a `StopTask` exception when it is next scheduled. If - the coro is written using the `@cancellable` decorator this is handled - automatically. - It is possible to trap the `StopTask` exception: see 'Custom cleanup' below. - -Public bound method: - * `__call__` This returns the coro and is used to schedule the task using the - event loop `create_task()` method using function call syntax. - -The `StopTask` exception is an alias for `usayncio.CancelledError`. In my view -the name is more descriptive of its function. - -### 4.2.1 Groups - -`Cancellable` tasks may be assigned to groups, identified by a user supplied -Python object, typically an integer or string. By default tasks are assigned to -group 0. The `cancel_all` class method cancels all tasks in the specified -group. The 0 default ensures that this facility can be ignored if not required, -with `cancel_all` cancelling all `Cancellable` tasks. - -### 4.2.2 Custom cleanup - -A task created with the `cancellable` decorator can intercept the `StopTask` -exception to perform custom cleanup operations. This may be done as below: - -```python -@cancellable -async def foo(): - while True: - try: - await sleep(1) # Main body of task - except StopTask: - # perform custom cleanup - return # Respond by quitting -``` - -The following example returns `True` if it ends normally or `False` if -cancelled. - -```python -@cancellable -async def bar(): - try: - await sleep(1) # Main body of task - except StopTask: - return False - else: - return True -``` - -###### [Contents](./PRIMITIVES.md#contents) - -## 4.3 Class NamedTask - -A `NamedTask` instance is associated with a user-defined name such that the -name may outlive the task: a coro may end but the class enables its state to be -checked. It is a subclass of `Cancellable` and its constructor disallows -duplicate names: each instance of a coro must be assigned a unique name. - -A `NamedTask` coro is defined with the `@cancellable` decorator. - -```python -@cancellable -async def foo(arg1, arg2): - await asyn.sleep(1) - print('Task foo has ended.', arg1, arg2) -``` - -The `NamedTask` constructor takes the name, the coro, plus any user positional -or keyword args. Th eresultant instance can be scheduled in the usual ways: - -```python -await NamedTask('my foo', foo, 1, 2) # Pause until complete or killed -loop = asyncio.get_event_loop() # Or schedule and continue: -loop.create_task(NamedTask('my nums', foo, 10, 11)()) # Note () syntax. -``` - -Cancellation is performed with: - -```python -await NamedTask.cancel('my foo') -``` - -When cancelling a task there is no need to check if the task is still running: -if it has already completed, cancellation will have no effect. - -NamedTask Constructor. -Mandatory args: - * `name` Names may be any immutable type capable of being a dictionary index - e.g. integer or string. A `ValueError` will be raised if the name is already - assigned by a running coro. If multiple instances of a coro are to run - concurrently, each should be assigned a different name. - * `task` A coro passed by name i.e. not using function call syntax. - - Optional positional args: - * Any further positional args are passed to the coro. - - Optional keyword only args: - * `barrier` A `Barrier` instance may be passed. See below. - * Further keyword args are passed to the coro. - -Public class methods: - * `cancel` Asynchronous. - Mandatory arg: a coro name. - Optional boolean arg `nowait` default `True` - By default it will return soon. If `nowait` is `False` it will pause until the - coro has completed cancellation. - The named coro will receive a `StopTask` exception the next time it is - scheduled. If the `@namedtask` decorator is used this is transparent to the - user but the exception may be trapped for custom cleanup (see below). - `cancel` will return `True` if the coro was cancelled. It will return `False` - if the coro has already ended or been cancelled. - * `is_running` Synchronous. Arg: A coro name. Returns `True` if coro is queued - for scheduling, `False` if it has ended or been cancelled. - -Public bound method: - * `__call__` This returns the coro and is used to schedule the task using the - event loop `create_task()` method using function call syntax. - -### 4.3.1 Latency and Barrier objects - -It is possible to get confirmation of cancellation of an arbitrary set of -`NamedTask` instances by instantiating a `Barrier` and passing it to the -constructor of each member. This enables more complex synchronisation cases -than the normal method of using a group of `Cancellable` tasks. The approach is -described below. - -If a `Barrier` instance is passed to the `NamedTask` constructor, a task -performing cancellation can pause until a set of cancelled tasks have -terminated. The `Barrier` is constructed with the number of dependent tasks -plus one (the task which is to wait on it). It is passed to the constructor of -each dependent task and the cancelling task waits on it after cancelling all -dependent tasks. Each task being cancelled terminates 'immediately' subject -to latency. - -See examples in `cantest.py` e.g. `cancel_test2()`. - -### 4.3.2 Custom cleanup - -A coroutine to be used as a `NamedTask` can intercept the `StopTask` exception -if necessary. This might be done for cleanup or to return a 'cancelled' status. -The coro should have the following form: - -```python -@cancellable -async def foo(): - try: - await asyncio.sleep(1) # User code here - except StopTask: - return False # Cleanup code - else: - return True # Normal exit -``` - -### 4.3.3 Changes - -The `NamedTask` class has been rewritten as a subclass of `Cancellable`. This -is to simplify the code and to ensure accuracy of the `is_running` method. The -latest API changes are: - * `Cancellable.stopped()` is no longer a public method. - * `NamedTask.cancel()` is now asynchronous. - * `NamedTask` and `Cancellable` coros no longer receive a `TaskId` instance as - their 1st arg. - * `@namedtask` still works but is now an alias for `@cancellable`. - -The drive to simplify code comes from the fact that `uasyncio` is itself under -development. Tracking changes is an inevitable headache. - -###### [Contents](./PRIMITIVES.md#contents) diff --git a/README.md b/README.md index 337798a..31e643c 100644 --- a/README.md +++ b/README.md @@ -1,107 +1,24 @@ -# 1. The MicroPython uasyncio library +# Asynchronous programming in MicroPython -This GitHub repository consists of the following parts: - * [A tutorial](./TUTORIAL.md) An introductory tutorial on asynchronous - programming and the use of the uasyncio library is offered. This is a work in - progress, not least because uasyncio is not yet complete. - * [Asynchronous device drivers](./DRIVERS.md). A module providing drivers for - devices such as switches and pushbuttons. - * [Synchronisation primitives](./PRIMITIVES.md). Provides commonly used - synchronisation primitives plus an API for task cancellation and monitoring. - * [A driver for an IR remote control](./nec_ir/README.md) This is intended as - an example of an asynchronous device driver. It decodes signals received from - infra red remote controls using the popular NEC protocol. - * [A driver for the HTU21D](./htu21d/README.md) temperature and humidity - sensor. This is intended to be portable across platforms and is another - example of an asynchronous device driver. - * [A modified uasyncio](./FASTPOLL.md) This incorporates a simple priority - mechanism. With suitable application design this improves the rate at which - devices can be polled and improves the accuracy of time delays. Also provides - for low priority tasks which are only scheduled when normal tasks are paused. - NOTE: this requires uasyncio V2.0. - * [Communication between devices](./syncom_as/README.md) Enables MicroPython - boards to communicate without using a UART. Primarily intended to enable a - a Pyboard-like device to achieve bidirectional communication with an ESP8266. +CPython supports asynchronous programming via the `asyncio` library. +MicroPython provides `asyncio` which is a subset of this, optimised for small +code size and high performance on bare metal targets. This repository provides +documentation, tutorial material and code to aid in its effective use. -# 2. Version and installation of uasyncio +# asyncio version 3 -The documentation and code in this repository are based on `uasyncio` version -2.0, which is the version on PyPi. This requires firmware dated 22nd Feb 2018 -or later. +Damien has completely rewritten `asyncio` which was released as V3.0. This is +incorporated in all recent firmware builds. The resources in this repo may be found in the +`v3` directory. These include a tutorial, synchronisation primitives, drivers, +applications and demos. -Version 2.0 brings only one API change over V1.7.1, namely the arguments to -`get_event_loop()`. Unless using the priority version all test programs and -code samples use default args so will work under either version. The priority -version requires the later version and firmware. +# Concurrency -[Paul Sokolovsky's library](https://github.com/pfalcon/micropython-lib) has the -latest `uasyncio` code. At the time of writing (Feb 27th 2018) the version in -[micropython-lib](https://github.com/micropython/micropython-lib) is 1.7.1. +Other documents provide hints on asynchronous programming techniques including +threading and multi-core coding. -See [tutorial](./TUTORIAL.md#installing-uasyncio-on-bare-metal) for -installation instructions. +### [Go to V3 docs](./v3/README.md) -# 3. uasyncio development state +# uasyncio version 2 -These notes are intended for users familiar with `asyncio` under CPython. - -The MicroPython language is based on CPython 3.4. The `uasyncio` library -supports a subset of the CPython 3.4 `asyncio` library with some V3.5 -extensions. In addition there are nonstandard extensions to optimise services -such as millisecond level timing and task cancellation. Its design focus is on -high performance and scheduling is performed without RAM allocation. - -The `uasyncio` library supports the following Python 3.5 features: - - * `async def` and `await` syntax. - * Awaitable classes (using `__iter__` rather than `__await__`). - * Asynchronous context managers. - * Asynchronous iterators. - * Event loop methods `call_soon` and `call_later`. - * `sleep(seconds)`. - -It supports millisecond level timing with the following: - - * Event loop method `call_later_ms` - * uasyncio `sleep_ms(time)` - -As of `uasyncio.core` V1.7.1 (7th Jan 2018) it supports coroutine timeouts and -cancellation. - - * `wait_for(coro, t_secs)` runs `coro` with a timeout. - * `cancel(coro)` tags `coro` for cancellation when it is next scheduled. - -Classes `Task` and `Future` are not supported. - -## 3.1 Asynchronous I/O - -Asynchronous I/O works with devices whose drivers support streaming, such as -UARTs. - -## 3.2 Time values - -For timing asyncio uses floating point values of seconds. The `uasyncio.sleep` -method accepts floats (including sub-second values) or integers. Note that in -MicroPython the use of floats implies RAM allocation which incurs a performance -penalty. The design of `uasyncio` enables allocation-free scheduling. In -applications where performance is an issue, integers should be used and the -millisecond level functions (with integer arguments) employed where necessary. - -The `loop.time` method returns an integer number of milliseconds whereas -CPython returns a floating point number of seconds. `call_at` follows the -same convention. - -# 4. The asyn.py library - -This library ([docs](./PRIMITIVES.md)) provides 'micro' implementations of the -`asyncio` synchronisation primitives. -[CPython docs](https://docs.python.org/3/library/asyncio-sync.html) - -It also supports a `Barrier` class to facilitate coroutine synchronisation. - -Coroutine cancellation is performed in an efficient manner in `uasyncio`. The -`asyn` library uses this, further enabling the cancelling coro to pause until -cancellation is complete. It also provides a means of checking the 'running' -status of individual coroutines. - -A lightweight implementation of `asyncio.gather` is provided. +This is obsolete: code and docs have been removed. diff --git a/TUTORIAL.md b/TUTORIAL.md deleted file mode 100644 index 7b6416f..0000000 --- a/TUTORIAL.md +++ /dev/null @@ -1,1642 +0,0 @@ -# Application of uasyncio to hardware interfaces - -Most of this document assumes some familiarity with asynchronous programming. -For those new to it an introduction may be found -[here](./TUTORIAL.md#7-notes-for-beginners). - -The MicroPython `uasyncio` library comprises a subset of Python's `asyncio` -library. It is designed for use on microcontrollers. As such it has a small RAM -footprint and fast context switching with zero RAM allocation. This document -describes its use with a focus on interfacing hardware devices. The aim is to -design drivers in such a way that the application continues to run while the -driver is awaiting a response from the hardware. The application remains -responsive to events and to user interaction. - -Another major application area for asyncio is in network programming: many -guides to this may be found online. - -Note that MicroPython is based on Python 3.4 with minimal Python 3.5 additions. -Except where detailed below, `asyncio` features of versions >3.4 are -unsupported. As stated above it is a subset; this document identifies supported -features. - -# Installing uasyncio on bare metal - -MicroPython libraries are located on [PyPi](https://pypi.python.org/pypi). -Libraries to be installed are: - - * micropython-uasyncio - * micropython-uasyncio.queues - * micropython-uasyncio.synchro - -The `queues` and `synchro` modules are optional, but are required to run all -the examples below. - -The oficial approach is to use the `upip` utility as described -[here](https://github.com/micropython/micropython-lib). Network enabled -hardware has this included in the firmware so it can be run locally. This is -the preferred approach. - -On non-networked hardware there are two options. One is to use `upip` under a -Linux real or virtual machine. This involves installing and building the Unix -version of MicroPython, using `upip` to install to a directory on the PC, and -then copying the library to the target. - -The need for Linux and the Unix build may be avoided by using -[micropip.py](https://github.com/peterhinch/micropython-samples/tree/master/micropip). -This runs under Python 3.2 or above. Create a temporary directory on your PC -and install to that. Then copy the contents of the temporary direcory to the -device. The following assume Linux and a temporary directory named `~/syn` - -adapt to suit your OS. The first option requires that `micropip.py` has -executable permission. - -``` -$ ./micropip.py install -p ~/syn micropython-uasyncio -$ python3 -m micropip.py install -p ~/syn micropython-uasyncio -``` - -The `uasyncio` modules may be frozen as bytecode in the usual way, by placing -the `uasyncio` and `collections` directories in the port's `modules` directory -and rebuilding. - -###### [Main README](./README.md) - -# Contents - - 1. [Cooperative scheduling](./TUTORIAL.md#1-cooperative-scheduling) - - 1.1 [Modules](./TUTORIAL.md#11-modules) - - 2. [uasyncio](./TUTORIAL.md#2-uasyncio) - - 2.1 [Program structure: the event loop](./TUTORIAL.md#21-program-structure-the-event-loop) - - 2.2 [Coroutines (coros)](./TUTORIAL.md#22-coroutines-coros) - - 2.2.1 [Queueing a coro for scheduling](./TUTORIAL.md#221-queueing-a-coro-for-scheduling) - - 2.2.2 [Running a callback function](./TUTORIAL.md#222-running-a-callback-function) - - 2.2.3 [Notes](./TUTORIAL.md#223-notes) Coros as bound methods. Returning values. - - 2.3 [Delays](./TUTORIAL.md#23-delays) - - 3. [Synchronisation](./TUTORIAL.md#3-synchronisation) - - 3.1 [Lock](./TUTORIAL.md#31-lock) - - 3.1.1 [Locks and timeouts](./TUTORIAL.md#311-locks-and-timeouts) - - 3.2 [Event](./TUTORIAL.md#32-event) - - 3.2.1 [The event's value](./TUTORIAL.md#321-the-events-value) - - 3.3 [Barrier](./TUTORIAL.md#33-barrier) - - 3.4 [Semaphore](./TUTORIAL.md#34-semaphore) - - 3.4.1 [BoundedSemaphore](./TUTORIAL.md#341-boundedsemaphore) - - 3.5 [Queue](./TUTORIAL.md#35-queue) - - 3.6 [Task cancellation](./TUTORIAL.md#36-task-cancellation) - - 3.7 [Other synchronisation primitives](./TUTORIAL.md#37-other-synchronisation-primitives) - - 4. [Designing classes for asyncio](./TUTORIAL.md#4-designing-classes-for-asyncio) - - 4.1 [Awaitable classes](./TUTORIAL.md#41-awaitable-classes) - - 4.1.1 [Use in context managers](./TUTORIAL.md#411-use-in-context-managers) - - 4.1.2 [Awaiting a coro](./TUTORIAL.md#412-awaiting-a-coro) - - 4.2 [Asynchronous iterators](./TUTORIAL.md#42-asynchronous-iterators) - - 4.3 [Asynchronous context managers](./TUTORIAL.md#43-asynchronous-context-managers) - - 4.4 [Coroutines with timeouts](./TUTORIAL.md#44-coroutines-with-timeouts) - - 4.5 [Exceptions](./TUTORIAL.md#45-exceptions) - - 5. [Device driver examples](./TUTORIAL.md#5-device-driver-examples) - - 5.1 [The IORead mechnaism](./TUTORIAL.md#51-the-ioread-mechanism) - - 5.2 [Using a coro to poll hardware](./TUTORIAL.md#52-using-a-coro-to-poll-hardware) - - 5.3 [Using IORead to poll hardware](./TUTORIAL.md#53-using-ioread-to-poll-hardware) - - 5.4 [A complete example: aremote.py](./TUTORIAL.md#54-a-complete-example-aremotepy) - A driver for an IR remote control receiver. - - 5.5 [Driver for HTU21D](./TUTORIAL.md#55-htu21d-environment-sensor) A - temperature and humidity sensor. - - 6. [Hints and tips](./TUTORIAL.md#6-hints-and-tips) - - 6.1 [Coroutines are generators](./TUTORIAL.md#61-coroutines-are-generators) - - 6.2 [Program hangs](./TUTORIAL.md#62-program-hangs) - - 6.3 [uasyncio retains state](./TUTORIAL.md#63-uasyncio-retains-state) - - 6.4 [Garbage Collection](./TUTORIAL.md#64-garbage-collection) - - 6.5 [Testing](./TUTORIAL.md#65-testing) - - 6.6 [A common hard to find error](./TUTORIAL.md#66-a-common-error) - - 6.7 [Socket programming](./TUTORIAL.md#67-socket-programming) - - 7. [Notes for beginners](./TUTORIAL.md#7-notes-for-beginners) - - 7.1 [Problem 1: event loops](./TUTORIAL.md#71-problem-1:-event-loops) - - 7.2 [Problem 2: blocking methods](./TUTORIAL.md#7-problem-2:-blocking-methods) - - 7.3 [The uasyncio approach](./TUTORIAL.md#73-the-uasyncio-approach) - - 7.4 [Scheduling in uasyncio](./TUTORIAL.md#74-scheduling-in-uasyncio) - - 7.5 [Why cooperative rather than pre-emptive?](./TUTORIAL.md#75-why-cooperative-rather-than-pre-emptive) - - 7.6 [Communication](./TUTORIAL.md#76-communication) - - 7.7 [Polling](./TUTORIAL.md#77-polling) - - 8. [Modifying uasyncio](./TUTORIAL.md#8-modifying-uasyncio) - -# 1. Cooperative scheduling - -The technique of cooperative multi-tasking is widely used in embedded systems. -It offers lower overheads than pre-emptive scheduling and avoids many of the -pitfalls associated with truly asynchronous threads of execution. - -###### [Contents](./TUTORIAL.md#contents) - -## 1.1 Modules - -The following modules are provided which may be copied to the target hardware. - -**Libraries** - - 1. `asyn.py` Provides synchronisation primitives `Lock`, `Event`, `Barrier`, - `Semaphore` and `BoundedSemaphore`. Provides support for task cancellation via - `NamedTask` and `Cancellable` classes. - 2. `aswitch.py` This provides classes for interfacing switches and - pushbuttons and also a software retriggerable delay object. Pushbuttons are a - generalisation of switches providing logical rather than physical status along - with double-clicked and long pressed events. - 3. `asyncio_priority.py` An experimental version of uasyncio with a simple - priority mechanism. See [this doc](./FASTPOLL.md). Note that this does not yet - support `uasyncio` V2.0. - -**Demo Programs** - -The first two are the most immediately rewarding as they produce visible -results by accessing Pyboard hardware. - - 1. `aledflash.py` Flashes the four Pyboard LED's asynchronously for 10s. The - simplest uasyncio demo. Import it to run. - 2. `apoll.py` A device driver for the Pyboard accelerometer. Demonstrates - the use of a coroutine to poll a device. Runs for 20s. Import it to run. - 3. `astests.py` Test/demonstration programs for the `aswitch` module. - 4. `asyn_demos.py` Simple task cancellation demos. - 5. `roundrobin.py` Demo of round-robin scheduling. Also a benchmark of - scheduling performance. - 6. `awaitable.py` Demo of an awaitable class. One way of implementing a - device driver which polls an interface. - 7. `chain.py` Copied from the Python docs. Demo of chaining coroutines. - 8. `aqtest.py` Demo of uasyncio `Queue` class. - 9. `aremote.py` Example device driver for NEC protocol IR remote control. - 10. `auart.py` Demo of streaming I/O via a Pyboard UART. - -**Test Programs** - - 1. `asyntest.py` Tests for the synchronisation primitives in `asyn.py`. - 2. `cantest.py` Task cancellation tests. - -**Utility** - - 1. `check_async_code.py` A Python3 utility to locate a particular coding - error which can be hard to find. See [this para](./TUTORIAL.md#65-a-common-error). - -**Benchmarks** - -The `benchmarks` directory contains scripts to test and characterise the -uasyncio scheduler. See [this doc](./FASTPOLL.md). - -###### [Contents](./TUTORIAL.md#contents) - -# 2. uasyncio - -The asyncio concept is of cooperative multi-tasking based on coroutines, -referred to in this document as coros or tasks. - -###### [Contents](./TUTORIAL.md#contents) - -## 2.1 Program structure: the event loop - -Consider the following example: - -```python -import uasyncio as asyncio -loop = asyncio.get_event_loop() -async def bar(): - count = 0 - while True: - count += 1 - print(count) - await asyncio.sleep(1) # Pause 1s - -loop.create_task(bar()) # Schedule ASAP -loop.run_forever() -``` - -Program execution proceeds normally until the call to `loop.run_forever`. At -this point execution is controlled by the scheduler. A line after -`loop.run_forever` would never be executed. The scheduler runs `bar` -because this has been placed on the scheduler's queue by `loop.create_task`. -In this trivial example there is only one coro: `bar`. If there were others, -the scheduler would schedule them in periods when `bar` was paused. - -Many embedded applications have an event loop which runs continuously. The event -loop can also be started in a way which permits termination, by using the event -loop's `run_until_complete` method. Examples of this may be found in the -`astests.py` module. - -The event loop instance is a singleton, instantiated by a program's first call -to `asyncio.get_event_loop()`. This takes an optional integer arg being the -length of the coro queue - i.e. the maximum number of concurrent coros allowed. -The default of 42 is likely to be adequate for most purposes. If a coro needs -to call an event loop method, calling `asyncio.get_event_loop()` (without -args) will efficiently return it. - -###### [Contents](./TUTORIAL.md#contents) - -## 2.2 Coroutines (coros) - -A coro is instantiated as follows: - -```python -async def foo(delay_secs): - await asyncio.sleep(delay_secs) - print('Hello') -``` - -A coro can allow other coroutines to run by means of the `await coro` -statement. A coro must contain at least one `await` statement. This causes -`coro` to run to completion before execution passes to the next instruction. -Consider these lines of code: - -```python -await asyncio.sleep(delay_secs) -await asyncio.sleep(0) -``` - -The first causes the code to pause for the duration of the delay, with other -coros being scheduled for the duration. A delay of 0 causes any pending coros -to be scheduled in round-robin fashion before the following line is run. See -the `roundrobin.py` example. - -###### [Contents](./TUTORIAL.md#contents) - -### 2.2.1 Queueing a coro for scheduling - - * `EventLoop.create_task` Arg: the coro to run. The scheduler queues the - coro to run ASAP. The `create_task` call returns immediately. The coro - arg is specified with function call syntax with any required arguments passed. - * `EventLoop.run_until_complete` Arg: the coro to run. The scheduler queues - the coro to run ASAP. The coro arg is specified with function call syntax with - any required arguments passed. The `run_until_complete` call returns when - the coro terminates: this method provides a way of quitting the scheduler. - * `await` Arg: the coro to run, specified with function call syntax. Starts - the coro ASAP and blocks until it has run to completion. - -The above are compatible with CPython. Additional uasyncio methods are -discussed in 2.2.3 below. - -###### [Contents](./TUTORIAL.md#contents) - -### 2.2.2 Running a callback function - -Callbacks should be Python functions designed to complete in a short period of -time. This is because coroutines will have no opportunity to run for the -duration. - -The following `EventLoop` methods schedule callbacks: - - 1. `call_soon` Call as soon as possible. Args: `callback` the callback to - run, `*args` any positional args may follow separated by commas. - 2. `call_later` Call after a delay in secs. Args: `delay`, `callback`, - `*args` - 3. `call_later_ms` Call after a delay in ms. Args: `delay`, `callback`, - `*args`. - -```python -loop = asyncio.get_event_loop() -loop.call_soon(foo, 5) # Schedule callback 'foo' ASAP with an arg of 5. -loop.call_later(2, foo, 5) # Schedule after 2 seconds. -loop.call_later_ms(50, foo, 5) # Schedule after 50ms. -loop.run_forever() -``` - -###### [Contents](./TUTORIAL.md#contents) - -### 2.2.3 Notes - -A coro can contain a `return` statement with arbitrary return values. To -retrieve them issue: - -```python -result = await my_coro() -``` - -Coros may be bound methods. A coro must contain at least one `await` statement. - -###### [Contents](./TUTORIAL.md#contents) - -## 2.3 Delays - -Where a delay is required in a coro there are two options. For longer delays and -those where the duration need not be precise, the following should be used: - -```python -async def foo(delay_secs, delay_ms): - await asyncio.sleep(delay_secs) - print('Hello') - await asyncio.sleep_ms(delay_ms) -``` - -While these delays are in progress the scheduler will schedule other coros. -This is generally highly desirable, but it does introduce uncertainty in the -timing as the calling routine will only be rescheduled when the one running at -the appropriate time has yielded. The amount of latency depends on the design -of the application, but is likely to be on the order of tens or hundreds of ms; -this is discussed further in [Section 5](./TUTORIAL.md#5-device-driver-examples). - -Very precise delays may be issued by using the `utime` functions `sleep_ms` -and `sleep_us`. These are best suited for short delays as the scheduler will -be unable to schedule other coros while the delay is in progress. - -###### [Contents](./TUTORIAL.md#contents) - -# 3 Synchronisation - -There is often a need to provide synchronisation between coros. A common -example is to avoid what are known as "race conditions" where multiple coros -compete to access a single resource. An example is provided in the `astests.py` -program and discussed in [the docs](./DRIVERS.md). Another hazard is the "deadly -embrace" where two coros each wait on the other's completion. - -In simple applications communication may be achieved with global flags. A more -elegant approach is to use synchronisation primitives. The module -[asyn.py](https://github.com/peterhinch/micropython-async/blob/master/asyn.py) -offers "micro" implementations of `Event`, `Barrier`, `Semaphore` and -`Condition` primitives. These are for use only with asyncio. They are not -thread safe and should not be used with the `_thread` module or from an -interrupt handler except where mentioned. A `Lock` primitive is provided which -is partially superseded by an official implementation. - -Another synchronisation issue arises with producer and consumer coros. The -producer generates data which the consumer uses. Asyncio provides the `Queue` -object. The producer puts data onto the queue while the consumer waits for its -arrival (with other coros getting scheduled for the duration). The `Queue` -guarantees that items are removed in the order in which they were received. -Alternatively a `Barrier` instance can be used if the producer must wait -until the consumer is ready to access the data. - -The following provides a brief overview of the primitives. Full documentation -may be found [here](./PRIMITIVES.md). - -###### [Contents](./TUTORIAL.md#contents) - -## 3.1 Lock - -This describes the use of the official `Lock` primitive. - -This guarantees unique access to a shared resource. In the following code -sample a `Lock` instance `lock` has been created and is passed to all coros -wishing to access the shared resource. Each coro attempts to acquire the lock, -pausing execution until it succeeds. - -```python -import uasyncio as asyncio -from uasyncio.synchro import Lock - -async def task(i, lock): - while 1: - await lock.acquire() - print("Acquired lock in task", i) - await asyncio.sleep(0.5) - lock.release() - -async def killer(): - await asyncio.sleep(10) - -loop = asyncio.get_event_loop() - -lock = Lock() # The global Lock instance - -loop.create_task(task(1, lock)) -loop.create_task(task(2, lock)) -loop.create_task(task(3, lock)) - -loop.run_until_complete(killer()) # Run for 10s -``` - -### 3.1.1 Locks and timeouts - -At time of writing (5th Jan 2018) the official `Lock` class is not complete. -If a coro is subject to a [timeout](./TUTORIAL.md#44-coroutines-with-timeouts) -and the timeout is triggered while it is waiting on a lock, the timeout will be -ineffective. It will not receive the `TimeoutError` until it has acquired the -lock. The same observation applies to task cancellation. - -The module `asyn.py` offers a `Lock` class which works in these situations -[full details](./PRIMITIVES.md#32-class-lock). It is significantly less -efficient than the official class but supports additional interfaces as per the -CPython version including context manager usage. - -###### [Contents](./TUTORIAL.md#contents) - -## 3.2 Event - -This provides a way for one or more coros to pause until another flags them to -continue. An `Event` object is instantiated and made accessible to all coros -using it: - -```python -import asyn -event = asyn.Event() -``` - -Coros waiting on the event issue `await event` whereupon execution pauses until -another issues `event.set()`. [Full details.](./PRIMITIVES.md#33-class-event) - -This presents a problem if `event.set()` is issued in a looping construct; the -code must wait until the event has been accessed by all waiting coros before -setting it again. In the case where a single coro is awaiting the event this -can be achieved by the receiving coro clearing the event: - -```python -async def eventwait(event): - await event - event.clear() -``` - -The coro raising the event checks that it has been serviced: - -```python -async def foo(event): - while True: - # Acquire data from somewhere - while event.is_set(): - await asyncio.sleep(1) # Wait for coro to respond - event.set() -``` - -Where multiple coros wait on a single event synchronisationcan be achieved by -means of an acknowledge event. Each coro needs a separate event. - -```python -async def eventwait(event, ack_event): - await event - ack_event.set() -``` - -An example of this is provided in the `event_test` function in `asyntest.py`. -This is cumbersome. In most cases - even those with a single waiting coro - the -Barrier class below offers a simpler approach. - -An Event can also provide a means of communication between an interrupt handler -and a coro. The handler services the hardware and sets an event which is tested -in slow time by the coro. - -###### [Contents](./TUTORIAL.md#contents) - -### 3.2.1 The event's value - -The `event.set()` method can accept an optional data value of any type. A -coro waiting on the event can retrieve it by means of `event.value()`. Note -that `event.clear()` will set the value to `None`. A typical use for this -is for the coro setting the event to issue `event.set(utime.ticks_ms())`. Any -coro waiting on the event can determine the latency incurred, for example to -perform compensation for this. - -###### [Contents](./TUTORIAL.md#contents) - -## 3.3 Barrier - -This has two uses. Firstly it can cause a coro to pause until one or more other -coros have terminated. - -Secondly it enables multiple coros to rendezvous at a particular point. For -example producer and consumer coros can synchronise at a point where the -producer has data available and the consumer is ready to use it. At that point -in time the `Barrier` can run an optional callback before the barrier is -released and all waiting coros can continue. [Full details.](./PRIMITIVES.md#34-class-barrier) - -The callback can be a function or a coro. In most applications a function is -likely to be used: this can be guaranteed to run to completion before the -barrier is released. - -An example is the `barrier_test` function in `asyntest.py`. In the code -fragment from that program: - -```python -import asyn - -def callback(text): - print(text) - -barrier = asyn.Barrier(3, callback, ('Synch',)) - -async def report(): - for i in range(5): - print('{} '.format(i), end='') - await barrier -``` - -multiple instances of `report` print their result and pause until the other -instances are also complete and waiting on `barrier`. At that point the -callback runs. On its completion the coros resume. - -###### [Contents](./TUTORIAL.md#contents) - -## 3.4 Semaphore - -A semaphore limits the number of coros which can access a resource. It can be -used to limit the number of instances of a particular coro which can run -concurrently. It performs this using an access counter which is initialised by -the constructor and decremented each time a coro acquires the semaphore. -[Full details.](./PRIMITIVES.md#35-class-semaphore) - -The easiest way to use it is with a context manager: - -```python -import asyn -sema = asyn.Semaphore(3) -async def foo(sema): - async with sema: - # Limited access here -``` -An example is the `semaphore_test` function in `asyntest.py`. - -###### [Contents](./TUTORIAL.md#contents) - -### 3.4.1 BoundedSemaphore - -This works identically to the `Semaphore` class except that if the `release` -method causes the access counter to exceed its initial value, a `ValueError` -is raised. [Full details.](./PRIMITIVES.md#351-class-boundedsemaphore) - -###### [Contents](./TUTORIAL.md#contents) - -## 3.5 Queue - -The `Queue` class is officially supported and the sample program `aqtest.py` -demonstrates its use. A queue is instantiated as follows: - -```python -from uasyncio.queues import Queue -q = Queue() -``` - -A typical producer coro might work as follows: - -```python -async def producer(q): - while True: - result = await slow_process() # somehow get some data - await q.put(result) # may pause if a size limited queue fills -``` - -and the consumer works along these lines: - -```python -async def consumer(q): - while True: - result = await(q.get()) # Will pause if q is empty - print('Result was {}'.format(result)) -``` - -The `Queue` class provides significant additional functionality in that the -size of queues may be limited and the status may be interrogated. The behaviour -on empty status and (where size is limited) the behaviour on full status may be -controlled. Documentation of this is in the code. - -###### [Contents](./TUTORIAL.md#contents) - -## 3.6 Task cancellation - -This requires `uasyncio` V1.7.1 or later, with suitably recent firmware. - -`uasyncio` now provides a `cancel(coro)` function. This works by throwing an -exception to the coro in a special way: cancellation is deferred until the coro -is next scheduled. This mechanism works with nested coros. However there is a -limitation. If a coro issues `await uasyncio.sleep(secs)` or -`uasyncio.sleep_ms(ms)` scheduling will not occur until the time has elapsed. -This introduces latency into cancellation which matters in some use-cases. -Other potential sources of latency take the form of slow code. `uasyncio` has -no mechanism for verifying when cancellation has actually occurred. The `asyn` -library provides solutions via the following classes: - - 1. `Cancellable` This allows one or more tasks to be assigned to a group. A - coro can cancel all tasks in the group, pausing until this has been acheived. - Documentation may be found [here](./PRIMITIVES.md#42-class-cancellable). - 2. `NamedTask` This enables a coro to be associated with a user-defined name. - The running status of named coros may be checked. For advanced usage more - complex groupings of tasks can be created. Documentation may be found - [here](./PRIMITIVES.md#43-class-namedtask). - -A typical use-case is as follows: - -```python -async def comms(): # Perform some communications task - while True: - await initialise_link() - try: - await do_communications() # Launches Cancellable tasks - except CommsError: - await Cancellable.cancel_all() - # All sub-tasks are now known to be stopped. They can be re-started - # with known initial state on next pass. -``` - -Examples of the usage of these classes may be found in `asyn_demos.py`. For an -illustration of the mechanism a cancellable task is defined as below: - -```python -@asyn.cancellable -async def print_nums(_, num): - while True: - print(num) - num += 1 - await asyn.sleep(1) -``` - -It is launched and cancelled with: - -```python -async def foo(): - loop = asyncio.get_event_loop() - loop.create_task(asyn.Cancellable(print_nums, 42)()) - await asyn.sleep(7.5) - await asyn.Cancellable.cancel_all() - print('Done') -``` - -###### [Contents](./TUTORIAL.md#contents) - -## 3.7 Other synchronisation primitives - -The `asyn.py` library provides 'micro' implementations of CPython capabilities, -namely the [Condition class](./PRIMITIVES.md#36-class-condition) and the -[gather](./PRIMITIVES.md#37-class-gather) method. - -The `Condition` class enables a coro to notify other coros which are waiting on -a locked resource. Once notified they will access the resource and release the -lock in turn. The notifying coro can limit the number of coros to be notified. - -The CPython `gather` method enables a list of coros to be launched. When the -last has completed a list of results is returned. This 'micro' implementation -uses different syntax. Timeouts may be applied to any of the coros. - -###### [Contents](./TUTORIAL.md#contents) - -# 4 Designing classes for asyncio - -In the context of device drivers the aim is to ensure nonblocking operation. -The design should ensure that other coros get scheduled in periods while the -driver is waiting for the hardware. For example a task awaiting data arriving -on a UART or a user pressing a button should allow other coros to be scheduled -until the event occurs.. - -###### [Contents](./TUTORIAL.md#contents) - -## 4.1 Awaitable classes - -A coro can pause execution by waiting on an `awaitable` object. Under CPython -a custom class is made `awaitable` by implementing an `__await__` special -method. This returns a generator. An `awaitable` class is used as follows: - -```python -import uasyncio as asyncio - -class Foo(): - def __await__(self): - for n in range(5): - print('__await__ called') - yield from asyncio.sleep(1) # Other coros get scheduled here - return 42 - - __iter__ = __await__ # See note below - -async def bar(): - foo = Foo() # Foo is an awaitable class - print('waiting for foo') - res = await foo # Retrieve result - print('done', res) - -loop = asyncio.get_event_loop() -loop.run_until_complete(bar()) -``` - -Currently MicroPython doesn't support `__await__` (issue #2678) and -`__iter__` must be used. The line `__iter__ = __await__` enables portability -between CPython and MicroPython. Example code may be found in the `Event`, -`Barrier`, `Cancellable` and `Condition` classes in asyn.py. - -### 4.1.1 Use in context managers - -Awaitable objects can be used in synchronous or asynchronous CM's by providing -the necessary special methods. The syntax is: - -```python -with await awaitable as a: # The 'as' clause is optional - # code omitted -async with awaitable as a: # Asynchronous CM (see below) - # do something -``` - -To achieve this the `__await__` generator should return `self`. This is passed -to any variable in an `as` clause and also enables the special methods to work. -See `asyn.Condition` and `asyntest.condition_test`, where the `Condition` class -is awaitable and may be used in a synchronous CM. - -###### [Contents](./TUTORIAL.md#contents) - -### 4.1.2 Awaiting a coro - -The Python language requires that `__await__` is a generator function. In -MicroPython generators and coroutines are identical, so the solution is to use -`yield from coro(args)`. - -This tutorial aims to offer code portable to CPython 3.5 or above. In CPython -coroutines and generators are distinct. CPython coros have an `__await__` -special method which retrieves a generator. This is portable: - -```python -up = False # Running under MicroPython? -try: - import uasyncio as asyncio - up = True # Or can use sys.implementation.name -except ImportError: - import asyncio - -async def times_two(n): # Coro to await - await asyncio.sleep(1) - return 2 * n - -class Foo(): - def __await__(self): - res = 1 - for n in range(5): - print('__await__ called') - if up: # MicroPython - res = yield from times_two(res) - else: # CPython - res = yield from times_two(res).__await__() - return res - - __iter__ = __await__ - -async def bar(): - foo = Foo() # foo is awaitable - print('waiting for foo') - res = await foo # Retrieve value - print('done', res) - -loop = asyncio.get_event_loop() -loop.run_until_complete(bar()) -``` - -Note that, in `__await__`, `yield from asyncio.sleep(1)` is allowed by CPython. -I haven't yet established how this is achieved. - -###### [Contents](./TUTORIAL.md#contents) - -## 4.2 Asynchronous iterators - -These provide a means of returning a finite or infinite sequence of values -and could be used as a means of retrieving successive data items as they arrive -from a read-only device. An asynchronous iterable calls asynchronous code in -its `next` method. The class must conform to the following requirements: - - * It has an `__aiter__` method defined with `async def`and returning the - asynchronous iterator. - * It has an ` __anext__` method which is a coro - i.e. defined with - `async def` and containing at least one `await` statement. To stop - iteration it must raise a `StopAsyncIteration` exception. - -Successive values are retrieved with `async for` as below: - -```python -class AsyncIterable: - def __init__(self): - self.data = (1, 2, 3, 4, 5) - self.index = 0 - - async def __aiter__(self): - return self - - async def __anext__(self): - data = await self.fetch_data() - if data: - return data - else: - raise StopAsyncIteration - - async def fetch_data(self): - await asyncio.sleep(0.1) # Other coros get to run - if self.index >= len(self.data): - return None - x = self.data[self.index] - self.index += 1 - return x - -async def run(): - ai = AsyncIterable() - async for x in ai: - print(x) -``` - -###### [Contents](./TUTORIAL.md#contents) - -## 4.3 Asynchronous context managers - -Classes can be designed to support asynchronous context managers. These are CM's -having enter and exit procedures which are coros. An example is the `Lock` -class described above. This has an `__aenter__` coro which is logically -required to run asynchronously. To support the asynchronous CM protocol its -`__aexit__` method also must be a coro, achieved by including -`await asyncio.sleep(0)`. Such classes are accessed from within a coro with -the following syntax: - -```python -async def bar(lock): - async with lock: - print('bar acquired lock') -``` - -As with normal context managers an exit method is guaranteed to be called when -the context manager terminates, whether normally or via an exception. To -achieve this the special methods `__aenter__` and `__aexit__` must be -defined, both being coros waiting on a coro or `awaitable` object. This example -comes from the `Lock` class: - -```python - async def __aenter__(self): - await self.acquire() # a coro defined with async def - return self - - async def __aexit__(self, *args): - self.release() # A conventional method - await asyncio.sleep_ms(0) -``` - -If the `async with` has an `as variable` clause the variable receives the -value returned by `__aenter__`. - -Note there is currently a bug in the implementation whereby if an explicit -`return` is issued within an `async with` block, the `__aexit__` method -is not called. The solution is to design the code so that in all cases it runs -to completion. The error appears to be in [PEP492](https://www.python.org/dev/peps/pep-0492/). -See [this issue](https://github.com/micropython/micropython/issues/3153). - -###### [Contents](./TUTORIAL.md#contents) - -## 4.4 Coroutines with timeouts - -This requires uasyncio.core V1.7 which was released on 16th Dec 2017, with -firmware of that date or later. - -Timeouts are implemented by means of `uasyncio.wait_for()`. This takes as -arguments a coroutine and a timeout in seconds. If the timeout expires a -`TimeoutError` will be thrown to the coro. The next time the coro is scheduled -for execution the exception will be raised: the coro should trap this and quit. - -```python -import uasyncio as asyncio - -async def forever(): - print('Starting') - try: - while True: - await asyncio.sleep_ms(300) - print('Got here') - except asyncio.TimeoutError: - print('Got timeout') - -async def foo(): - await asyncio.wait_for(forever(), 5) - await asyncio.sleep(2) - -loop = asyncio.get_event_loop() -loop.run_until_complete(foo()) -``` - -Note that if the coro awaits a long delay, it will not be rescheduled until the -time has elapsed. The `TimeoutError` will occur as soon as the coro is -scheduled. But in real time and from the point of view of the calling coro, its -response to the `TimeoutError` will correspondingly be delayed. - -If this matters to the application, create a long delay by awaiting a short one -in a loop. The coro `asyn.sleep` [supports this](./PRIMITIVES.md#41-coro-sleep). - -## 4.5 Exceptions - -Where an exception occurs in a coro, it should be trapped either in that coro -or in a coro which is awaiting its completion. This ensures that the exception -is not propagated to the scheduler. If this occurred it would stop running, -passing the exception to the code which started the scheduler. - -Using `throw` to throw an exception to a coro is unwise. It subverts the design -of `uasyncio` by forcing the coro to run, and possibly terminate, when it is -still queued for execution. I haven't entirely thought through the implications -of this, but it's a thoroughly bad idea. - -###### [Contents](./TUTORIAL.md#contents) - -# 5 Device driver examples - -Many devices such as sensors are read-only in nature and need to be polled to -acquire data. In the case of a driver written in Python this must be done by -having a coro which does this periodically. This may present problems if there -is a requirement for rapid polling owing to the round-robin nature of uasyncio -scheduling: the coro will compete for execution with others. There are two -solutions to this. One is to use the experimental version of uasyncio presented -[here](./FASTPOLL.md). - -The other potential solution is to delegate the polling to the scheduler using -the IORead mechanism. This is unsupported for Python drivers: see section 5.3. - -Note that where a very repeatable polling interval is required, it should be -done using a hardware timer with a hard interrupt callback. For "very" -repeatable read microsecond level (depending on platform). - -In many cases less precise timing is acceptable. The definition of "less" is -application dependent but the latency associated with scheduling the coro which -is performing the polling may be variable on the order of tens or hundreds of -milliseconds. Latency is determined as follows. When `await asyncio.sleep(0)` -is issued all other pending coros will be scheduled in "fair round-robin" -fashion before it is re-scheduled. Thus its worst-case latency may be -calculated by summing, for every other coro, the worst-case execution time -between yielding to the scheduler. - -If `await asyncio.sleep_ms(t)` is issued where t > 0 the coro is guaranteed not -to be rescheduled until t has elapsed. If, at that time, all other coros are -waiting on nonzero delays, it will immediately be scheduled. But if other coros -are pending execution (either because they issued a zero delay or because their -time has elapsed) they may be scheduled first. This introduces a timing -uncertainty into the `sleep()` and `sleep_ms()` functions. The worst-case value -for this may be calculated as above. - -[This document](./FASTPOLL.md) describes an experimental version of uasyncio -which offers a means of reducing this latency for critical tasks. - -###### [Contents](./TUTORIAL.md#contents) - -## 5.1 The IORead Mechanism - -This can be illustrated using a Pyboard UART. The following code sample -demonstrates concurrent I/O on one UART. To run, link Pyboard pins X1 and X2 -(UART Txd and Rxd). - -```python -import uasyncio as asyncio -from pyb import UART -uart = UART(4, 9600) - -async def sender(): - swriter = asyncio.StreamWriter(uart, {}) - while True: - await swriter.awrite('Hello uart\n') - await asyncio.sleep(2) - -async def receiver(): - sreader = asyncio.StreamReader(uart) - while True: - res = await sreader.readline() - print('Recieved', res) - -loop = asyncio.get_event_loop() -loop.create_task(sender()) -loop.create_task(receiver()) -loop.run_forever() -``` - -The supporting code may be found in `__init__.py` in the uasyncio library. -The mechanism works because the device driver (written in C) implements the -following methods: `ioctl`, `read`, `write`, `readline` and `close`. See -section 5.3 for further discussion. - -###### [Contents](./TUTORIAL.md#contents) - -## 5.2 Using a coro to poll hardware - -This is a simple approach, but is only appropriate to hardware which is to be -polled at a relatively low rate. This is for two reasons. Firstly the variable -latency caused by the execution of other coros will result in variable polling -intervals - this may or may not matter depending on the device and application. -Secondly, attempting to poll with a short polling interval may cause the coro -to consume more processor time than is desirable. - -The example `apoll.py` demonstrates this approach by polling the Pyboard -accelerometer at 100ms intervals. It performs some simple filtering to ignore -noisy samples and prints a message every two seconds if the board is not moved. - -Further examples may be found in `aswitch.py` which provides drivers for -switch and pushbutton devices. - -An example of a driver for a device capable of reading and writing is shown -below. For ease of testing Pyboard UART 4 emulates the notional device. The -driver implements a `RecordOrientedUart` class, where data is supplied in -variable length records consisting of bytes instances. The object appends a -delimiter before sending and buffers incoming data until the delimiter is -received. This is a demo and is an inefficient way to use a UART compared to -IORead. - -For the purpose of demonstrating asynchronous transmission we assume the -device being emulated has a means of checking that transmission is complete -and that the application requires that we wait on this. Neither assumption is -true in this example but the code fakes it with `await asyncio.sleep(0.1)`. - -Link pins X1 and X2 to run. - -```python -import uasyncio as asyncio -from pyb import UART - -class RecordOrientedUart(): - DELIMITER = b'\0' - def __init__(self): - self.uart = UART(4, 9600) - self.data = b'' - - def __await__(self): - data = b'' - while not data.endswith(self.DELIMITER): - yield from asyncio.sleep(0) # Neccessary because: - while not self.uart.any(): - yield from asyncio.sleep(0) # timing may mean this is never called - data = b''.join((data, self.uart.read(self.uart.any()))) - self.data = data - - __iter__ = __await__ # workround for issue #2678 - - async def send_record(self, data): - data = b''.join((data, self.DELIMITER)) - self.uart.write(data) - await self._send_complete() - - # In a real device driver we would poll the hardware - # for completion in a loop with await asyncio.sleep(0) - async def _send_complete(self): - await asyncio.sleep(0.1) - - def read_record(self): # Synchronous: await the object before calling - return self.data[0:-1] # Discard delimiter - -async def run(): - foo = RecordOrientedUart() - rx_data = b'' - await foo.send_record(b'A line of text.') - for _ in range(20): - await foo # Other coros are scheduled while we wait - rx_data = foo.read_record() - print('Got: {}'.format(rx_data)) - await foo.send_record(rx_data) - rx_data = b'' - -loop = asyncio.get_event_loop() -loop.run_until_complete(run()) -``` - -###### [Contents](./TUTORIAL.md#contents) - -## 5.3 Using IORead to poll hardware - -The uasyncio `IORead` class is provided to support IO to stream devices. It -may be employed by drivers of devices which need to be polled: the polling will -be delegated to the scheduler which uses `select` to schedule the first -stream or device driver to be ready. This is more efficient, and offers lower -latency, than running multiple coros each polling a device. - -At the time of writing firmware support for using this mechanism in device -drivers written in Python has not been implemented, and the final comment to -[this](https://github.com/micropython/micropython/issues/2664) issue suggests -that it may never be done. So streaming device drivers must be written in C. - -###### [Contents](./TUTORIAL.md#contents) - -## 5.4 A complete example: aremote.py - -This may be found in the `nec_ir` directory. Its use is documented -[here](./nec_ir/README.md). The demo provides a complete device driver example: -a receiver/decoder for an infra red remote controller. The following notes are -salient points regarding its asyncio usage. - -A pin interrupt records the time of a state change (in us) and sets an event, -passing the time when the first state change occurred. A coro waits on the -event, yields for the duration of a data burst, then decodes the stored data -before calling a user-specified callback. - -Passing the time to the `Event` instance enables the coro to compensate for -any asyncio latency when setting its delay period. - -###### [Contents](./TUTORIAL.md#contents) - -## 5.5 HTU21D environment sensor - -This chip provides accurate measurements of temperature and humidity. The -driver is documented [here](./htu21d/README.md). It has a continuously running -task which updates `temperature` and `humidity` bound variables which may be -accessed "instantly". - -The chip takes on the order of 120ms to acquire both data items. The driver -works asynchronously by triggering the acquisition and using -`await asyncio.sleep(t)` prior to reading the data. This allows other coros to -run while acquisition is in progress. - -# 6 Hints and tips - -## 6.1 Coroutines are generators - -In MicroPython coroutines are generators. This is not the case in CPython. -Issuing `yield` in a coro will provoke a syntax error in CPython, whereas in -MicroPython it has the same effect as `await asyncio.sleep(0)`. The surest way -to write error free code is to use CPython conventions and assume that coros -are not generators. - -The following will work. If you use them, be prepared to test your code against -each uasyncio release because the behaviour is not necessarily guaranteed. - -```python -yield from coro # Equivalent to await coro: continue when coro terminates. -yield # Reschedule current coro in round-robin fashion. -yield 100 # Pause 100ms - equivalent to above -``` - -Issuing `yield` or `yield 100` is slightly faster than the equivalent `await` -statements. - -###### [Contents](./TUTORIAL.md#contents) - -## 6.1 Program hangs - -Hanging usually occurs because a task has blocked without yielding: this will -hang the entire system. When developing it is useful to have a coro which -periodically toggles an onboard LED. This provides confirmtion that the -scheduler is running. - -###### [Contents](./TUTORIAL.md#contents) - -## 6.2 uasyncio retains state - -When running programs using `uasyncio` at the REPL, issue a soft reset -(ctrl-D) between runs. This is because `uasyncio` retains state between runs -which can lead to confusing behaviour. - -###### [Contents](./TUTORIAL.md#contents) - -## 6.3 Garbage Collection - -You may want to consider running a coro which issues: - -```python - gc.collect() - gc.threshold(gc.mem_free() // 4 + gc.mem_alloc()) -``` - -This assumes `import gc` has been issued. The purpose of this is discussed -[here](http://docs.micropython.org/en/latest/pyboard/reference/constrained.html) -in the section on the heap. - -###### [Contents](./TUTORIAL.md#contents) - -## 6.4 Testing - -It's advisable to test that a device driver yields control when you intend it -to. This can be done by running one or more instances of a dummy coro which -runs a loop printing a message, and checking that it runs in the periods when -the driver is blocking: - -```python -async def rr(n): - while True: - print('Roundrobin ', n) - await asyncio.sleep(0) -``` - -As an example of the type of hazard which can occur, in the `RecordOrientedUart` -example above the `__await__` method was originally written as: - -```python - def __await__(self): - data = b'' - while not data.endswith(self.DELIMITER): - while not self.uart.any(): - yield from asyncio.sleep(0) - data = b''.join((data, self.uart.read(self.uart.any()))) - self.data = data -``` - -In testing this hogged execution until an entire record was received. This was -because `uart.any()` always returned a nonzero quantity. By the time it was -called, characters had been received. The solution was to yield execution in -the outer loop: - -```python - def __await__(self): - data = b'' - while not data.endswith(self.DELIMITER): - yield from asyncio.sleep(0) # Neccessary because: - while not self.uart.any(): - yield from asyncio.sleep(0) # timing may mean this is never called - data = b''.join((data, self.uart.read(self.uart.any()))) - self.data = data -``` - -It is perhaps worth noting that this error would not have been apparent had -data been sent to the UART at a slow rate rather than via a loopback test. - -###### [Contents](./TUTORIAL.md#contents) - -## 6.5 A common error - -If a function or method is defined with `async def` and subsequently called as -if it were a regular (synchronous) callable, MicroPython does not issue an -error message. This is [by design](https://github.com/micropython/micropython/issues/3241). -It typically leads to a program silently failing to run correctly. - -The script `check_async_code.py` attempts to locate instances of questionable -use of coros. It is intended to be run on a PC and uses Python3. It takes a -single argument, a path to a MicroPython sourcefile (or `--help`). It is -designed for use on scripts written according to the guidelines in this -tutorial, with coros declared using `async def`. - -Note it is somewhat crude and intended to be used on a syntactically correct -file which is silently failing to run. Use a tool such as pylint for general -syntax checking (pylint currently misses this error). - -The script produces false positives. This is by design: coros are first class -objects; you can pass them to functions and can store them in data structures. -Depending on the program logic you may intend to store the function or the -outcome of its execution. The script can't deduce the intent. It aims to ignore -cases which appear correct while identifying other instances for review. -Assume `foo` is a coro declared with `async def`: - -```python -loop.run_until_complete(foo()) # No warning -bar(foo) # These lines will warn but may or may not be correct -bar(foo()) -z = (foo,) -z = (foo(),) -``` - -I find it useful as-is but improvements are always welcome. - -## 6.7 Socket programming - -The use of nonblocking sockets requires some attention to detail. If a -nonblocking read is performed, because of server latency, there is no guarantee -that all (or any) of the requested data is returned. Likewise writes may not -proceed to completion. - -Hence asynchronous read and write methods need to iteratively perform the -nonblocking operation until the required data has been read or written. In -practice a timeout is likely to be required to cope with server outages. - -A further complication is that, at the time of writing, the ESP32 port has -issues which require rather unpleasant hacks for error-free operation. - -The file `sock_nonblock.py` illustrates the sort of techniques required. It is -not a working demo, and solutions are likely to be application dependent. - -###### [Contents](./TUTORIAL.md#contents) - -# 7 Notes for beginners - -These notes are intended for those new to asynchronous code. They start by -outlining the problems which schedulers seek to solve, and give an overview of -the `uasyncio` approach to a solution. - -[Section 7.5](./TUTORIAL.md#75-why-cooperative-rather-than-pre-emptive) -discusses the relative merits of `uasyncio` and the `_thread` module and why -you may prefer use cooperative (`uasyncio`) over pre-emptive (`_thread`) -scheduling. - -###### [Contents](./TUTORIAL.md#contents) - -## 7.1 Problem 1: event loops - -A typical firmware application runs continuously and is required to respond to -external events. These might include a voltage change on an ADC, the arrival of -a hard interrupt, a character arriving on a UART, or data being available on a -socket. These events occur asynchronously and the code must be able to respond -regardless of the order in which they occur. Further the application may be -required to perform time-dependent tasks such as flashing LED's. - -The obvious way to do this is with an event loop. The following is not -practical code but serves to illustrate the general form of an event loop. - -```python -def event_loop(): - led_1_time = 0 - led_2_time = 0 - switch_state = switch.state() # Current state of a switch - while True: - time_now = utime.time() - if time_now >= led_1_time: # Flash LED #1 - led1.toggle() - led_1_time = time_now + led_1_period - if time_now >= led_2_time: # Flash LED #2 - led2.toggle() - led_2_time = time_now + led_2_period - # Handle LEDs 3 upwards - - if switch.value() != switch_state: - switch_state = switch.value() - # do something - if uart.any(): - # handle UART input -``` - -This works for simple examples but event loops rapidly become unweildy as the -number of events increases. They also violate the principles of object oriented -programming by lumping much of the program logic in one place rather than -associating code with the object being controlled. We want to design a class -for an LED capable of flashing which could be put in a module and imported. An -OOP approach to flashing an LED might look like this: - -```python -import pyb -class LED_flashable(): - def __init__(self, led_no): - self.led = pyb.LED(led_no) - - def flash(self, period): - while True: - self.led.toggle() - # somehow wait for period but allow other - # things to happen at the same time -``` - -A cooperative scheduler such as `uasyncio` enables classes such as this to be -created. - -###### [Contents](./TUTORIAL.md#contents) - -## 7.2 Problem 2: blocking methods - -Assume you need to read a number of bytes from a socket. If you call -`socket.read(n)` with a default blocking socket it will "block" (i.e. fail to -return) until `n` bytes have been received. During this period the application -will be unresponsive to other events. - -With `uasyncio` and a non-blocking socket you can write an asynchronous read -method. The task requiring the data will (necessarily) block until it is -received but during that period other tasks will be scheduled enabling the -application to remain responsive. - -## 7.3 The uasyncio approach - -The following class provides for an LED which can be turned on and off, and -which can also be made to flash at an arbitrary rate. A `LED_async` instance -has a `run` method which can be considered to run continuously. The LED's -behaviour can be controlled by methods `on()`, `off()` and `flash(secs)`. - -```python -import pyb -import uasyncio as asyncio - -class LED_async(): - def __init__(self, led_no): - self.led = pyb.LED(led_no) - self.rate = 0 - loop = asyncio.get_event_loop() - loop.create_task(self.run()) - - async def run(self): - while True: - if self.rate <= 0: - await asyncio.sleep_ms(200) - else: - self.led.toggle() - await asyncio.sleep_ms(int(500 / self.rate)) - - def flash(self, rate): - self.rate = rate - - def on(self): - self.led.on() - self.rate = 0 - - def off(self): - self.led.off() - self.rate = 0 -``` - -Note that `on()`, `off()` and `flash()` are conventional synchronous methods. -They change the behaviour of the LED but return immediately. The flashing -occurs "in the background". This is explained in detail in the next section. - -The class conforms with the OOP principle of keeping the logic associated with -the device within the class. Further, the way `uasyncio` works ensures that -while the LED is flashing the application can respond to other events. The -example below flashes the four Pyboard LED's at different rates while also -responding to the USR button which terminates the program. - -```python -import pyb -import uasyncio as asyncio -from led_async import LED_async # Class as listed above - -async def killer(): - sw = pyb.Switch() - while not sw.value(): - await asyncio.sleep_ms(100) - -leds = [LED_async(n) for n in range(1, 4)] -for n, led in enumerate(leds): - led.flash(0.7 + n/4) -loop = asyncio.get_event_loop() -loop.run_until_complete(killer()) -``` - -In contrast to the event loop example the logic associated with the switch is -in a function separate from the LED functionality. Note the code used to start -the scheduler: - -```python -loop = asyncio.get_event_loop() -loop.run_until_complete(killer()) # Execution passes to coroutines. - # It only continues here once killer() terminates, when the - # scheduler has stopped. -``` - -###### [Contents](./TUTORIAL.md#contents) - -## 7.4 Scheduling in uasyncio - -Python 3.5 and MicroPython support the notion of an asynchronous function, -also known as a coroutine (coro) or task. A coro must include at least one -`await` statement. - -```python -async def hello(): - for _ in range(10): - print('Hello world.') - await asyncio.sleep(1) -``` - -This function prints the message ten times at one second intervals. While the -function is paused pending the time delay asyncio will schedule other tasks, -providing an illusion of concurrency. - -When a coro issues `await asyncio.sleep_ms()` or `await asyncio.sleep()` the -current task pauses: it is placed on a queue which is ordered on time due, and -execution passes to the task at the top of the queue. The queue is designed so -that even if the specified sleep is zero other due tasks will run before the -current one is resumed. This is "fair round-robin" scheduling. It is common -practice to issue `await asyncio.sleep(0)` in loops to ensure a task doesn't -hog execution. The following shows a busy-wait loop which waits for another -task to set the global `flag`. Alas it monopolises the CPU preventing other -coros from running: - -```python -async def bad_code(): - global flag - while not flag: - pass - flag = False - # code omitted -``` - -The problem here is that while the `flag` is `False` the loop never yields to -the scheduler so no other task will get to run. The correct approach is: - -```python -async def good_code(): - global flag - while not flag: - await asyncio.sleep(0) - flag = False - # code omitted -``` - -For the same reason it's bad practice to issue delays like `utime.sleep(1)` -because that will lock out other tasks for 1s; use `await asyncio.sleep(1)`. -Note that the delays implied by `uasyncio` methods `sleep` and `sleep_ms` can -overrun the specified time. This is because while the delay is in progress -other tasks will run. When the delay period completes, execution will not -resume until the running task issues `await` or terminates. A well-behaved coro -will always issue `await` at regular intervals. Where a precise delay is -required, especially one below a few ms, it may be neccessary to use -`utime.sleep_us(us)`. - -###### [Contents](./TUTORIAL.md#contents) - -## 7.5 Why cooperative rather than pre-emptive? - -The initial reaction of beginners to the idea of cooperative multi-tasking is -often one of disappointment. Surely pre-emptive is better? Why should I have to -explicitly yield control when the Python virtual machine can do it for me? - -When it comes to embedded systems the cooperative model has two advantages. -Fistly, it is lightweight. It is possible to have large numbers of coroutines -because unlike descheduled threads, paused coroutines contain little state. -Secondly it avoids some of the subtle problems associated with pre-emptive -scheduling. In practice cooperative multi-tasking is widely used, notably in -user interface applications. - -To make a case for the defence a pre-emptive model has one advantage: if -someone writes - -```python -for x in range(1000000): - # do something time consuming -``` - -it won't lock out other threads. Under cooperative schedulers the loop must -explicitly yield control every so many iterations e.g. by putting the code in -a coro and periodically issuing `await asyncio.sleep(0)`. - -Alas this benefit of pre-emption pales into insignificance compared to the -drawbacks. Some of these are covered in the documentation on writing -[interrupt handlers](http://docs.micropython.org/en/latest/reference/isr_rules.html). -In a pre-emptive model every thread can interrupt every other thread, changing -data which might be used in other threads. It is generally much easier to find -and fix a lockup resulting from a coro which fails to yield than locating the -sometimes deeply subtle and rarely occurring bugs which can occur in -pre-emptive code. - -To put this in simple terms, if you write a MicroPython coroutine, you can be -sure that variables won't suddenly be changed by another coro: your coro has -complete control until it issues `await asyncio.sleep(0)`. - -Bear in mind that interrupt handlers are pre-emptive. This applies to both hard -and soft interrupts, either of which can occur at any point in your code. - -An eloquent discussion of the evils of threading may be found -[in threads are bad](https://glyph.twistedmatrix.com/2014/02/unyielding.html). - -###### [Contents](./TUTORIAL.md#contents) - -## 7.6 Communication - -In non-trivial applications coroutines need to communicate. Conventional Python -techniques can be employed. These include the use of global variables or -declaring coros as object methods: these can then share instance variables. -Alternatively a mutable object may be passed as a coro argument. - -Pre-emptive systems mandate specialist classes to achieve "thread safe" -communications; in a cooperative system these are seldom required. - -###### [Contents](./TUTORIAL.md#contents) - -## 7.7 Polling - -Some hardware devices such as the Pyboard accelerometer don't support -interrupts, and therefore must be polled (i.e. checked periodically). Polling -can also be used in conjunction with interrupt handlers: the interrupt handler -services the hardware and sets a flag. A coro polls the flag: if it's set it -handles the data and clears the flag. A better approach is to use an `Event`. - -###### [Contents](./TUTORIAL.md#contents) - -# 8 Modifying uasyncio - -The library is designed to be extensible, an example being the -`asyncio_priority` module. By following the following guidelines a module can -be constructed which alters the functionality of asyncio without the need to -change the official library. Such a module may be used where `uasyncio` is -implemented as frozen bytecode. - -Assume that the aim is to alter the event loop. The module should issue - -```python -from uasyncio import * -``` - -The event loop should be subclassed from `PollEventLoop` (defined in -`__init__.py`). - -The event loop is instantiated by the first call to `get_event_loop()`: this -creates a singleton instance. This is returned by every call to -`get_event_loop()`. On the assumption that the constructor arguments for the -new class differ from those of the base class, the module will need to redefine -`get_event_loop()` along the following lines: - -```python -_event_loop = None # The singleton instance -_event_loop_class = MyNewEventLoopClass # The class, not an instance -def get_event_loop(args): - global _event_loop - if _event_loop is None: - _event_loop = _event_loop_class(args) # Instantiate once only - return _event_loop -``` diff --git a/aledflash.py b/aledflash.py deleted file mode 100644 index ea40854..0000000 --- a/aledflash.py +++ /dev/null @@ -1,35 +0,0 @@ -# aledflash.py Demo/test program for MicroPython asyncio -# Author: Peter Hinch -# Copyright Peter Hinch 2017 Released under the MIT license -# Flashes the onboard LED's each at a different rate. Stops after ten seconds. -# Run on MicroPython board bare hardware - -import pyb -try: - import asyncio_priority as asyncio -except ImportError: - import uasyncio as asyncio - -async def killer(duration): - await asyncio.sleep(duration) - -async def toggle(objLED, time_ms): - while True: - await asyncio.sleep_ms(time_ms) - objLED.toggle() - -# TEST FUNCTION - -def test(duration): - loop = asyncio.get_event_loop() - duration = int(duration) - if duration > 0: - print("Flash LED's for {:3d} seconds".format(duration)) - leds = [pyb.LED(x) for x in range(1,5)] # Initialise all four on board LED's - for x, led in enumerate(leds): # Create a coroutine for each LED - t = int((0.2 + x/2) * 1000) - loop.create_task(toggle(leds[x], t)) - loop.run_until_complete(killer(duration)) - loop.close() - -test(10) diff --git a/apoll.py b/apoll.py deleted file mode 100644 index 963c8c0..0000000 --- a/apoll.py +++ /dev/null @@ -1,67 +0,0 @@ -# Demonstration of a device driver using a coroutine to poll a dvice. -# Runs on Pyboard: displays results from the onboard accelerometer. -# Uses crude filtering to discard noisy data. - -# Author: Peter Hinch -# Copyright Peter Hinch 2017 Released under the MIT license - -try: - import asyncio_priority as asyncio -except ImportError: - import uasyncio as asyncio -import pyb -import utime as time - -class Accelerometer(object): - threshold_squared = 16 - def __init__(self, accelhw, timeout): - self.loop = asyncio.get_event_loop() - self.accelhw = accelhw - self.timeout = timeout - self.last_change = self.loop.time() - self.coords = [accelhw.x(), accelhw.y(), accelhw.z()] - - def dsquared(self, xyz): # Return the square of the distance between this and a passed - return sum(map(lambda p, q : (p-q)**2, self.coords, xyz)) # acceleration vector - - def poll(self): # Device is noisy. Only update if change exceeds a threshold - xyz = [self.accelhw.x(), self.accelhw.y(), self.accelhw.z()] - if self.dsquared(xyz) > Accelerometer.threshold_squared: - self.coords = xyz - self.last_change = self.loop.time() - return 0 - return time.ticks_diff(self.loop.time(), self.last_change) - - def vector(self): - return self.coords - - def timed_out(self): # Time since last change or last timeout report - if time.ticks_diff(self.loop.time(), self.last_change) > self.timeout: - self.last_change = self.loop.time() - return True - return False - -async def accel_coro(timeout = 2000): - loop = asyncio.get_event_loop() - accelhw = pyb.Accel() # Instantiate accelerometer hardware - await asyncio.sleep_ms(30) # Allow it to settle - accel = Accelerometer(accelhw, timeout) - while True: - result = accel.poll() - if result == 0: # Value has changed - x, y, z = accel.vector() - print("Value x:{:3d} y:{:3d} z:{:3d}".format(x, y, z)) - elif accel.timed_out(): # Report every 2 secs - print("Timeout waiting for accelerometer change") - await asyncio.sleep_ms(100) # Poll every 100ms - - -async def main(delay): - print('Testing accelerometer for {} secs. Move the Pyboard!'.format(delay)) - print('Test runs for 20s.') - await asyncio.sleep(delay) - print('Test complete!') - -loop = asyncio.get_event_loop() -loop.create_task(accel_coro()) -loop.run_until_complete(main(20)) diff --git a/aqtest.py b/aqtest.py deleted file mode 100644 index 2216f09..0000000 --- a/aqtest.py +++ /dev/null @@ -1,38 +0,0 @@ -# aqtest.py Demo/test program for MicroPython library micropython-uasyncio.queues -# Author: Peter Hinch -# Copyright Peter Hinch 2017 Released under the MIT license - -try: - import asyncio_priority as asyncio -except ImportError: - import uasyncio as asyncio - -from uasyncio.queues import Queue - -q = Queue() - -async def slow_process(): - await asyncio.sleep(2) - return 42 - -async def bar(): - print('Waiting for slow process.') - result = await slow_process() - print('Putting result onto queue') - await q.put(result) # Put result on q - -async def foo(): - print("Running foo()") - result = await(q.get()) - print('Result was {}'.format(result)) - -async def main(delay): - await asyncio.sleep(delay) - print("I've seen starships burn off the shoulder of Orion...") - print("Time to die...") - -print('Test takes 3 secs') -loop = asyncio.get_event_loop() -loop.create_task(foo()) -loop.create_task(bar()) -loop.run_until_complete(main(3)) diff --git a/astests.py b/astests.py deleted file mode 100644 index 94396a7..0000000 --- a/astests.py +++ /dev/null @@ -1,98 +0,0 @@ -# Test/demo programs for the aswitch module. -# Tested on Pyboard but should run on other microcontroller platforms -# running MicroPython with uasyncio library. -# Author: Peter Hinch. -# Copyright Peter Hinch 2017 Released under the MIT license. - -from machine import Pin -from pyb import LED -from aswitch import Switch, Pushbutton -# Verify it works under standard and priority version -try: - import asyncio_priority as asyncio -except ImportError: - import uasyncio as asyncio - -helptext = ''' -Test using switch or pushbutton between X1 and gnd. -Ground pin X2 to terminate test. -Soft reset (ctrl-D) after each test. -''' - -# Pulse an LED (coroutine) -async def pulse(led, ms): - led.on() - await asyncio.sleep_ms(ms) - led.off() - -# Toggle an LED (callback) -def toggle(led): - led.toggle() - -# Quit test by connecting X2 to ground -async def killer(): - pin = Pin('X2', Pin.IN, Pin.PULL_UP) - while pin.value(): - await asyncio.sleep_ms(50) - -# Test for the Switch class passing coros -def test_sw(): - print('Test of switch scheduling coroutines.') - print(helptext) - pin = Pin('X1', Pin.IN, Pin.PULL_UP) - red = LED(1) - green = LED(2) - sw = Switch(pin) - # Register coros to launch on contact close and open - sw.close_func(pulse, (green, 1000)) - sw.open_func(pulse, (red, 1000)) - loop = asyncio.get_event_loop() - loop.run_until_complete(killer()) - -# Test for the switch class with a callback -def test_swcb(): - print('Test of switch executing callbacks.') - print(helptext) - pin = Pin('X1', Pin.IN, Pin.PULL_UP) - red = LED(1) - green = LED(2) - sw = Switch(pin) - # Register a coro to launch on contact close - sw.close_func(toggle, (red,)) - sw.open_func(toggle, (green,)) - loop = asyncio.get_event_loop() - loop.run_until_complete(killer()) - -# Test for the Pushbutton class (coroutines) -def test_btn(): - print('Test of pushbutton scheduling coroutines.') - print(helptext) - pin = Pin('X1', Pin.IN, Pin.PULL_UP) - red = LED(1) - green = LED(2) - yellow = LED(3) - blue = LED(4) - pb = Pushbutton(pin) - pb.press_func(pulse, (red, 1000)) - pb.release_func(pulse, (green, 1000)) - pb.double_func(pulse, (yellow, 1000)) - pb.long_func(pulse, (blue, 1000)) - loop = asyncio.get_event_loop() - loop.run_until_complete(killer()) - -# Test for the Pushbutton class (callbacks) -def test_btncb(): - print('Test of pushbutton executing callbacks.') - print(helptext) - pin = Pin('X1', Pin.IN, Pin.PULL_UP) - red = LED(1) - green = LED(2) - yellow = LED(3) - blue = LED(4) - pb = Pushbutton(pin) - pb.press_func(toggle, (red,)) - pb.release_func(toggle, (green,)) - pb.double_func(toggle, (yellow,)) - pb.long_func(toggle, (blue,)) - loop = asyncio.get_event_loop() - loop.run_until_complete(killer()) diff --git a/aswitch.py b/aswitch.py deleted file mode 100644 index 80ec4cf..0000000 --- a/aswitch.py +++ /dev/null @@ -1,188 +0,0 @@ -# aswitch.py Switch and pushbutton classes for asyncio -# Delay_ms A retriggerable delay class. Can schedule a coro on timeout. -# Switch Simple debounced switch class for normally open grounded switch. -# Pushbutton extend the above to support logical state, long press and -# double-click events -# Tested on Pyboard but should run on other microcontroller platforms -# running MicroPython and uasyncio. - -# The MIT License (MIT) -# -# Copyright (c) 2017 Peter Hinch -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -try: - import asyncio_priority as asyncio -except ImportError: - import uasyncio as asyncio -import utime as time -from asyn import launch -# launch: run a callback or initiate a coroutine depending on which is passed. - - -class Delay_ms(object): - def __init__(self, func=None, args=(), can_alloc=True): - self.func = func - self.args = args - self.can_alloc = can_alloc - self.tstop = None # Not running - self.loop = asyncio.get_event_loop() - if not can_alloc: - self.loop.create_task(self._run()) - - async def _run(self): - while True: - if self.tstop is None: # Not running - await asyncio.sleep_ms(0) - else: - await self.killer() - - def stop(self): - self.tstop = None - - def trigger(self, duration): # Update end time - if self.can_alloc and self.tstop is None: - self.tstop = time.ticks_add(time.ticks_ms(), duration) - # Start a task which stops the delay after its period has elapsed - self.loop.create_task(self.killer()) - self.tstop = time.ticks_add(time.ticks_ms(), duration) - - def running(self): - return self.tstop is not None - - async def killer(self): - twait = time.ticks_diff(self.tstop, time.ticks_ms()) - while twait > 0: # Must loop here: might be retriggered - await asyncio.sleep_ms(twait) - if self.tstop is None: - break # Return if stop() called during wait - twait = time.ticks_diff(self.tstop, time.ticks_ms()) - if self.tstop is not None and self.func is not None: - launch(self.func, self.args) # Timed out: execute callback - self.tstop = None # Not running - -class Switch(object): - debounce_ms = 50 - def __init__(self, pin): - self.pin = pin # Should be initialised for input with pullup - self._open_func = False - self._close_func = False - self.switchstate = self.pin.value() # Get initial state - loop = asyncio.get_event_loop() - loop.create_task(self.switchcheck()) # Thread runs forever - - def open_func(self, func, args=()): - self._open_func = func - self._open_args = args - - def close_func(self, func, args=()): - self._close_func = func - self._close_args = args - - # Return current state of switch (0 = pressed) - def __call__(self): - return self.switchstate - - async def switchcheck(self): - loop = asyncio.get_event_loop() - while True: - state = self.pin.value() - if state != self.switchstate: - # State has changed: act on it now. - self.switchstate = state - if state == 0 and self._close_func: - launch(self._close_func, self._close_args) - elif state == 1 and self._open_func: - launch(self._open_func, self._open_args) - # Ignore further state changes until switch has settled - await asyncio.sleep_ms(Switch.debounce_ms) - -class Pushbutton(object): - debounce_ms = 50 - long_press_ms = 1000 - double_click_ms = 400 - def __init__(self, pin): - self.pin = pin # Initialise for input - self._true_func = False - self._false_func = False - self._double_func = False - self._long_func = False - self.sense = pin.value() # Convert from electrical to logical value - self.buttonstate = self.rawstate() # Initial state - loop = asyncio.get_event_loop() - loop.create_task(self.buttoncheck()) # Thread runs forever - - def press_func(self, func, args=()): - self._true_func = func - self._true_args = args - - def release_func(self, func, args=()): - self._false_func = func - self._false_args = args - - def double_func(self, func, args=()): - self._double_func = func - self._double_args = args - - def long_func(self, func, args=()): - self._long_func = func - self._long_args = args - - # Current non-debounced logical button state: True == pressed - def rawstate(self): - return bool(self.pin.value() ^ self.sense) - - # Current debounced state of button (True == pressed) - def __call__(self): - return self.buttonstate - - async def buttoncheck(self): - loop = asyncio.get_event_loop() - if self._long_func: - longdelay = Delay_ms(self._long_func, self._long_args) - if self._double_func: - doubledelay = Delay_ms() - while True: - state = self.rawstate() - # State has changed: act on it now. - if state != self.buttonstate: - self.buttonstate = state - if state: - # Button is pressed - if self._long_func and not longdelay.running(): - # Start long press delay - longdelay.trigger(Pushbutton.long_press_ms) - if self._double_func: - if doubledelay.running(): - launch(self._double_func, self._double_args) - else: - # First click: start doubleclick timer - doubledelay.trigger(Pushbutton.double_click_ms) - if self._true_func: - launch(self._true_func, self._true_args) - else: - # Button release - if self._long_func and longdelay.running(): - # Avoid interpreting a second click as a long push - longdelay.stop() - if self._false_func: - launch(self._false_func, self._false_args) - # Ignore state changes until switch has settled - await asyncio.sleep_ms(Pushbutton.debounce_ms) diff --git a/asyn.py b/asyn.py deleted file mode 100644 index 68c1d10..0000000 --- a/asyn.py +++ /dev/null @@ -1,478 +0,0 @@ -# asyn.py 'micro' synchronisation primitives for uasyncio -# Test/demo programs asyntest.py, barrier_test.py -# Provides Lock, Event, Barrier, Semaphore, BoundedSemaphore, Condition, -# NamedTask and Cancellable classes, also sleep coro. -# Uses low_priority where available and appropriate. -# Updated 31 Dec 2017 for uasyncio.core V1.6 and to provide task cancellation. - -# The MIT License (MIT) -# -# Copyright (c) 2017 Peter Hinch -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -# CPython 3.5 compatibility -# (ignore RuntimeWarning: coroutine '_g' was never awaited) - -# Check availability of 'priority' version -try: - import asyncio_priority as asyncio - p_version = True -except ImportError: - p_version = False - try: - import uasyncio as asyncio - except ImportError: - import asyncio - -after = asyncio.after if p_version else asyncio.sleep - -async def _g(): - pass -type_coro = type(_g()) - -# If a callback is passed, run it and return. -# If a coro is passed initiate it and return. -# coros are passed by name i.e. not using function call syntax. -def launch(func, tup_args): - res = func(*tup_args) - if isinstance(res, type_coro): - loop = asyncio.get_event_loop() - loop.create_task(res) - - -# To access a lockable resource a coro should issue -# async with lock_instance: -# access the locked resource - -# Alternatively: -# await lock.acquire() -# try: -# do stuff with locked resource -# finally: -# lock.release -# Uses normal scheduling on assumption that locks are held briefly. -class Lock(): - def __init__(self, delay_ms=0): - self._locked = False - self.delay_ms = delay_ms - - def locked(self): - return self._locked - - async def __aenter__(self): - await self.acquire() - return self - - async def __aexit__(self, *args): - self.release() - await asyncio.sleep(0) - - async def acquire(self): - while True: - if self._locked: - await asyncio.sleep_ms(self.delay_ms) - else: - self._locked = True - break - - def release(self): - if not self._locked: - raise RuntimeError('Attempt to release a lock which has not been set') - self._locked = False - - -# A coro waiting on an event issues await event -# A coro rasing the event issues event.set() -# When all waiting coros have run -# event.clear() should be issued -# Use of low_priority may be specified in the constructor -# when it will be used if available. -class Event(): - def __init__(self, lp=False): - self.after = after if (p_version and lp) else asyncio.sleep - self.clear() - - def clear(self): - self._flag = False - self._data = None - - def __await__(self): - while not self._flag: - yield from self.after(0) - - __iter__ = __await__ - - def is_set(self): - return self._flag - - def set(self, data=None): - self._flag = True - self._data = data - - def value(self): - return self._data - -# A Barrier synchronises N coros. Each issues await barrier. -# Execution pauses until all other participant coros are waiting on it. -# At that point the callback is executed. Then the barrier is 'opened' and -# execution of all participants resumes. - -# The nowait arg is to support task cancellation. It enables usage where one or -# more coros can register that they have reached the barrier without waiting -# for it. Any coros waiting normally on the barrier will pause until all -# non-waiting coros have passed the barrier and all waiting ones have reached -# it. The use of nowait promotes efficiency by enabling tasks which have been -# cancelled to leave the task queue as soon as possible. - -# Uses low_priority if available - -class Barrier(): - def __init__(self, participants, func=None, args=()): - self._participants = participants - self._func = func - self._args = args - self._reset(True) - - def __await__(self): - self._update() - if self._at_limit(): # All other threads are also at limit - if self._func is not None: - launch(self._func, self._args) - self._reset(not self._down) # Toggle direction to release others - return - - direction = self._down - while True: # Wait until last waiting thread changes the direction - if direction != self._down: - return - yield from after(0) - - __iter__ = __await__ - - def trigger(self): - self._update() - if self._at_limit(): # All other threads are also at limit - if self._func is not None: - launch(self._func, self._args) - self._reset(not self._down) # Toggle direction to release others - - def _reset(self, down): - self._down = down - self._count = self._participants if down else 0 - - def busy(self): - if self._down: - done = self._count == self._participants - else: - done = self._count == 0 - return not done - - def _at_limit(self): # Has count reached up or down limit? - limit = 0 if self._down else self._participants - return self._count == limit - - def _update(self): - self._count += -1 if self._down else 1 - if self._count < 0 or self._count > self._participants: - raise ValueError('Too many tasks accessing Barrier') - -# A Semaphore is typically used to limit the number of coros running a -# particular piece of code at once. The number is defined in the constructor. -class Semaphore(): - def __init__(self, value=1): - self._count = value - - async def __aenter__(self): - await self.acquire() - return self - - async def __aexit__(self, *args): - self.release() - await asyncio.sleep(0) - - async def acquire(self): - while self._count == 0: - await after(0) - self._count -= 1 - - def release(self): - self._count += 1 - -class BoundedSemaphore(Semaphore): - def __init__(self, value=1): - super().__init__(value) - self._initial_value = value - - def release(self): - if self._count < self._initial_value: - self._count += 1 - else: - raise ValueError('Semaphore released more than acquired') - -# Task Cancellation -try: - StopTask = asyncio.CancelledError # More descriptive name -except AttributeError: - raise OSError('asyn.py requires uasyncio V1.7.1 or above.') - -class TaskId(): - def __init__(self, taskid): - self.taskid = taskid - - def __call__(self): - return self.taskid - -# Sleep coro breaks up a sleep into shorter intervals to ensure a rapid -# response to StopTask exceptions -async def sleep(t, granularity=100): # 100ms default - if granularity <= 0: - raise ValueError('sleep granularity must be > 0') - t = int(t * 1000) # ms - if t <= granularity: - await asyncio.sleep_ms(t) - else: - n, rem = divmod(t, granularity) - for _ in range(n): - await asyncio.sleep_ms(granularity) - await asyncio.sleep_ms(rem) - -# Anonymous cancellable tasks. These are members of a group which is identified -# by a user supplied name/number (default 0). Class method cancel_all() cancels -# all tasks in a group and awaits confirmation. Confirmation of ending (whether -# normally or by cancellation) is signalled by a task calling the _stopped() -# class method. Handled by the @cancellable decorator. - - -class Cancellable(): - task_no = 0 # Generated task ID, index of tasks dict - tasks = {} # Value is [coro, group, barrier] indexed by integer task_no - - @classmethod - def _cancel(cls, task_no): - task = cls.tasks[task_no][0] - asyncio.cancel(task) - - @classmethod - async def cancel_all(cls, group=0, nowait=False): - tokill = cls._get_task_nos(group) - barrier = Barrier(len(tokill) + 1) # Include this task - for task_no in tokill: - cls.tasks[task_no][2] = barrier - cls._cancel(task_no) - if nowait: - barrier.trigger() - else: - await barrier - - @classmethod - def _is_running(cls, group=0): - tasks = cls._get_task_nos(group) - if tasks == []: - return False - for task_no in tasks: - barrier = cls.tasks[task_no][2] - if barrier is None: # Running, not yet cancelled - return True - if barrier.busy(): - return True - return False - - @classmethod - def _get_task_nos(cls, group): # Return task nos in a group - return [task_no for task_no in cls.tasks if cls.tasks[task_no][1] == group] - - @classmethod - def _get_group(cls, task_no): # Return group given a task_no - return cls.tasks[task_no][1] - - @classmethod - def _stopped(cls, task_no): - if task_no in cls.tasks: - barrier = cls.tasks[task_no][2] - if barrier is not None: # Cancellation in progress - barrier.trigger() - del cls.tasks[task_no] - - def __init__(self, gf, *args, group=0, **kwargs): - task = gf(TaskId(Cancellable.task_no), *args, **kwargs) - if task in self.tasks: - raise ValueError('Task already exists.') - self.tasks[Cancellable.task_no] = [task, group, None] - self.task_no = Cancellable.task_no # For subclass - Cancellable.task_no += 1 - self.task = task - - def __call__(self): - return self.task - - def __await__(self): # Return any value returned by task. - return (yield from self.task) - - __iter__ = __await__ - - -# @cancellable decorator - -def cancellable(f): - def new_gen(*args, **kwargs): - if isinstance(args[0], TaskId): # Not a bound method - task_id = args[0] - g = f(*args[1:], **kwargs) - else: # Task ID is args[1] if a bound method - task_id = args[1] - args = (args[0],) + args[2:] - g = f(*args, **kwargs) - try: - res = await g - return res - finally: - NamedTask._stopped(task_id) - return new_gen - -# The NamedTask class enables a coro to be identified by a user defined name. -# It constrains Cancellable to allow groups of one coro only. -# It maintains a dict of barriers indexed by name. -class NamedTask(Cancellable): - instances = {} - - @classmethod - async def cancel(cls, name, nowait=True): - if name in cls.instances: - await cls.cancel_all(group=name, nowait=nowait) - return True - return False - - @classmethod - def is_running(cls, name): - return cls._is_running(group=name) - - @classmethod - def _stopped(cls, task_id): # On completion remove it - name = cls._get_group(task_id()) # Convert task_id to task_no - if name in cls.instances: - instance = cls.instances[name] - barrier = instance.barrier - if barrier is not None: - barrier.trigger() - del cls.instances[name] - Cancellable._stopped(task_id()) - - def __init__(self, name, gf, *args, barrier=None, **kwargs): - if name in self.instances: - raise ValueError('Task name "{}" already exists.'.format(name)) - super().__init__(gf, *args, group=name, **kwargs) - self.barrier = barrier - self.instances[name] = self - - -# @namedtask -namedtask = cancellable # compatibility with old code - -# Condition class - -class Condition(): - def __init__(self, lock=None): - self.lock = Lock() if lock is None else lock - self.events = [] - - async def acquire(self): - await self.lock.acquire() - -# enable this syntax: -# with await condition [as cond]: - def __await__(self): - yield from self.lock.acquire() - return self - - __iter__ = __await__ - - def __enter__(self): - return self - - def __exit__(self, *_): - self.lock.release() - - def locked(self): - return self.lock.locked() - - def release(self): - self.lock.release() # Will raise RuntimeError if not locked - - def notify(self, n=1): # Caller controls lock - if not self.lock.locked(): - raise RuntimeError('Condition notify with lock not acquired.') - for _ in range(min(n, len(self.events))): - ev = self.events.pop() - ev.set() - - def notify_all(self): - self.notify(len(self.events)) - - async def wait(self): - if not self.lock.locked(): - raise RuntimeError('Condition wait with lock not acquired.') - ev = Event() - self.events.append(ev) - self.lock.release() - await ev - await self.lock.acquire() - assert ev not in self.events, 'condition wait assertion fail' - return True # CPython compatibility - - async def wait_for(self, predicate): - result = predicate() - while not result: - await self.wait() - result = predicate() - return result - -# Provide functionality similar to asyncio.gather() - -class Gather(): - def __init__(self, gatherables): - ncoros = len(gatherables) - self.barrier = Barrier(ncoros + 1) - self.results = [None] * ncoros - loop = asyncio.get_event_loop() - for n, gatherable in enumerate(gatherables): - loop.create_task(self.wrap(gatherable, n)()) - - def __iter__(self): - yield from self.barrier.__await__() - return self.results - - def wrap(self, gatherable, idx): - async def wrapped(): - coro, args, kwargs = gatherable() - try: - tim = kwargs.pop('timeout') - except KeyError: - self.results[idx] = await coro(*args, **kwargs) - else: - self.results[idx] = await asyncio.wait_for(coro(*args, **kwargs), tim) - self.barrier.trigger() - return wrapped - -class Gatherable(): - def __init__(self, coro, *args, **kwargs): - self.arguments = coro, args, kwargs - - def __call__(self): - return self.arguments diff --git a/asyn_demos.py b/asyn_demos.py deleted file mode 100644 index e4781b1..0000000 --- a/asyn_demos.py +++ /dev/null @@ -1,131 +0,0 @@ -# asyn_demos.py Simple demos of task cancellation -# Test/demo of official asyncio library and official Lock class - -# The MIT License (MIT) -# -# Copyright (c) 2017 Peter Hinch -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -import uasyncio as asyncio -import asyn - -def print_tests(): - st = '''Minimal demo programs of uasyncio task cancellation. -Issue ctrl-D to soft reset the board between test runs. -Available demos: -cancel_test() Demo of Cancellable tasks. -named_test() Demo of NamedTask. -method_test() Cancellable and NamedTask coros as bound methods. -''' - print('\x1b[32m') - print(st) - print('\x1b[39m') - -print_tests() - -# Cancellable task minimal example -@asyn.cancellable -async def print_nums(num): - while True: - print(num) - num += 1 - await asyn.sleep(1) - -@asyn.cancellable -async def add_one(num): - num += 1 - await asyn.sleep(1) - return num - -async def run_cancel_test(loop): - res = await asyn.Cancellable(add_one, 41) - print('Result: ', res) - loop.create_task(asyn.Cancellable(print_nums, res)()) - await asyn.sleep(7.5) - # Cancel any cancellable tasks still running - await asyn.Cancellable.cancel_all() - print('Done') - -def cancel_test(): - loop = asyncio.get_event_loop() - loop.run_until_complete(run_cancel_test(loop)) - -# NamedTask minimal example - -@asyn.cancellable -async def print_nums_named(num): - while True: - print(num) - num += 1 - await asyn.sleep(1) - -@asyn.cancellable -async def add_one_named(num): - num += 1 - await asyn.sleep(1) - return num - -async def run_named_test(loop): - res = await asyn.NamedTask('not cancelled', add_one_named, 99) - print('Result: ', res) - loop.create_task(asyn.NamedTask('print nums', print_nums_named, res)()) - await asyn.sleep(7.5) - asyn.NamedTask.cancel('not cancelled') # Nothing to do: task has finished - asyn.NamedTask.cancel('print nums') # Stop the continuously running task - print('Done') - -def named_test(): - loop = asyncio.get_event_loop() - loop.run_until_complete(run_named_test(loop)) - -# Tasks as bound methods - -class CanDemo(): - async def start(self, loop): - loop.create_task(asyn.Cancellable(self.foo, 1)()) # 3 instances in default group 0 - loop.create_task(asyn.Cancellable(self.foo, 2)()) - loop.create_task(asyn.Cancellable(self.foo, 3)()) - loop.create_task(asyn.NamedTask('my bar', self.bar, 4)()) - print('bar running status is', asyn.NamedTask.is_running('my bar')) - await asyncio.sleep(4.5) - await asyn.NamedTask.cancel('my bar') - print('bar instance scheduled for cancellation.') - await asyn.Cancellable.cancel_all() - print('foo instances have been cancelled.') - await asyncio.sleep(0.2) # Allow for 100ms latency in bar() - print('bar running status is', asyn.NamedTask.is_running('my bar')) - print('Done') - - @asyn.cancellable - async def foo(self, arg): - while True: - await asyn.sleep(1) - print('foo running, arg', arg) - - @asyn.cancellable - async def bar(self, arg): - while True: - await asyn.sleep(1) - print('bar running, arg', arg) - -def method_test(): - cantest = CanDemo() - loop = asyncio.get_event_loop() - loop.run_until_complete(cantest.start(loop)) diff --git a/asyncio_priority.py b/asyncio_priority.py deleted file mode 100644 index 08d6939..0000000 --- a/asyncio_priority.py +++ /dev/null @@ -1,244 +0,0 @@ -# asyncio_priority.py Modified version of uasyncio with priority mechanism. - -# Updated 18th Dec 2017 for uasyncio.core V1.6 -# New low priority algorithm reduces differences in run_forever compared to -# standard uasyncio. - -# The MIT License (MIT) -# -# Copyright (c) 2017 Peter Hinch -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -import utime as time -import utimeq -import ucollections -from uasyncio import * - -class PriorityEventLoop(PollEventLoop): - def __init__(self, runq_len=16, waitq_len=16, lpqlen=42): - super().__init__(runq_len, waitq_len) - self._max_overdue_ms = 0 - self.lpq = utimeq.utimeq(lpqlen) - self.hp_tasks = [] - - def max_overdue_ms(self, t=None): - if t is not None: - self._max_overdue_ms = int(t) - return self._max_overdue_ms - - # Low priority versions of call_later() call_later_ms() and call_at_() - def call_after_ms(self, delay, callback, *args): - self.call_at_lp_(time.ticks_add(self.time(), delay), callback, *args) - - def call_after(self, delay, callback, *args): - self.call_at_lp_(time.ticks_add(self.time(), int(delay * 1000)), callback, *args) - - def call_at_lp_(self, time, callback, *args): - self.lpq.push(time, callback, args) - - def _schedule_hp(self, func, callback, *args): - # If there's an empty slot, assign without allocation - for entry in self.hp_tasks: # O(N) search - but N is typically 1 or 2... - if not entry[0]: - entry[0] = func - entry[1] = callback - entry[2] = args - break - else: - self.hp_tasks.append([func, callback, args]) - -# Low priority (LP) scheduling. -# Schedule a single low priority task if one is ready or overdue. -# The most overdue task is scheduled even if normal tasks are pending. -# The most due task is scheduled only if no normal tasks are pending. - - def run_forever(self): - cur_task = [0, 0, 0] - while True: - tnow = self.time() - if self.lpq: - # Schedule a LP task if overdue or if no normal task is ready - to_run = False # Assume no LP task is to run - t = self.lpq.peektime() - tim = time.ticks_diff(t, tnow) - to_run = self._max_overdue_ms > 0 and tim < -self._max_overdue_ms - if not (to_run or self.runq): # No overdue LP task or task on runq - # zero delay tasks go straight to runq. So don't schedule LP if runq - to_run = tim <= 0 # True if LP task is due - if to_run and self.waitq: # Set False if normal tasks due. - t = self.waitq.peektime() - to_run = time.ticks_diff(t, tnow) > 0 # No normal task is ready - if to_run: - self.lpq.pop(cur_task) - self.call_soon(cur_task[1], *cur_task[2]) - - # Expire entries in waitq and move them to runq - while self.waitq: - t = self.waitq.peektime() - delay = time.ticks_diff(t, tnow) - if delay > 0: - break - self.waitq.pop(cur_task) - if __debug__ and DEBUG: - log.debug("Moving from waitq to runq: %s", cur_task[1]) - self.call_soon(cur_task[1], *cur_task[2]) - - # Process runq - l = len(self.runq) - if __debug__ and DEBUG: - log.debug("Entries in runq: %d", l) - while l: - # Check list of high priority tasks - cb = None - for entry in self.hp_tasks: - if entry[0] and entry[0](): # Ready to run - entry[0] = 0 - cb = entry[1] - args = entry[2] - break - - if cb is None: - cb = self.runq.popleft() - l -= 1 - args = () - if not isinstance(cb, type_gen): - args = self.runq.popleft() - l -= 1 - if __debug__ and DEBUG: - log.info("Next callback to run: %s", (cb, args)) - cb(*args) - continue - - if __debug__ and DEBUG: - log.info("Next coroutine to run: %s", (cb, args)) - self.cur_task = cb - delay = 0 - func = None - low_priority = False # Assume normal priority - try: - if args is (): - ret = next(cb) - else: - ret = cb.send(*args) - if __debug__ and DEBUG: - log.info("Coroutine %s yield result: %s", cb, ret) - if isinstance(ret, SysCall1): - arg = ret.arg - if isinstance(ret, SleepMs): - delay = arg - if isinstance(ret, AfterMs): - low_priority = True - if isinstance(ret, After): - delay = int(delay*1000) - elif isinstance(ret, When): - if callable(arg): - func = arg - else: - assert False, "Argument to 'when' must be a function or method." - elif isinstance(ret, IORead): - cb.pend_throw(False) - self.add_reader(arg, cb) - continue - elif isinstance(ret, IOWrite): - cb.pend_throw(False) - self.add_writer(arg, cb) - continue - elif isinstance(ret, IOReadDone): - self.remove_reader(arg) - elif isinstance(ret, IOWriteDone): - self.remove_writer(arg) - elif isinstance(ret, StopLoop): - return arg - else: - assert False, "Unknown syscall yielded: %r (of type %r)" % (ret, type(ret)) - elif isinstance(ret, type_gen): - self.call_soon(ret) - elif isinstance(ret, int): - # Delay - delay = ret - elif ret is None: - # Just reschedule - pass - elif ret is False: - # Don't reschedule - continue - else: - assert False, "Unsupported coroutine yield value: %r (of type %r)" % (ret, type(ret)) - except StopIteration as e: - if __debug__ and DEBUG: - log.debug("Coroutine finished: %s", cb) - continue - except CancelledError as e: - if __debug__ and DEBUG: - log.debug("Coroutine cancelled: %s", cb) - continue - if func is not None: - self._schedule_hp(func, cb) - continue - # Currently all syscalls don't return anything, so we don't - # need to feed anything to the next invocation of coroutine. - # If that changes, need to pass that value below. - if low_priority: - self.call_after_ms(delay, cb) # Put on lpq - elif delay: - self.call_later_ms(delay, cb) # waitq - else: - self.call_soon(cb) # runq - - # Wait until next waitq task or I/O availability - delay = 0 - if not self.runq: - delay = -1 - tnow = self.time() - if self.waitq: - t = self.waitq.peektime() - delay = time.ticks_diff(t, tnow) - if delay < 0: - delay = 0 - if self.lpq: - t = self.lpq.peektime() - lpdelay = time.ticks_diff(t, tnow) - if lpdelay < 0: - lpdelay = 0 - if lpdelay < delay or delay < 0: - delay = lpdelay - self.wait(delay) - -# Low priority -class AfterMs(SleepMs): - pass - -class After(AfterMs): - pass - -# High Priority -class When(SleepMs): - pass - -after_ms = AfterMs() -after = After() -when = When() - -import uasyncio.core -uasyncio.core._event_loop_class = PriorityEventLoop -def get_event_loop(runq_len=16, waitq_len=16, lpqlen=16): - if uasyncio.core._event_loop is None: # Add a q entry for lp_monitor() - uasyncio.core._event_loop = uasyncio.core._event_loop_class(runq_len, waitq_len, lpqlen) - return uasyncio.core._event_loop diff --git a/asyntest.py b/asyntest.py deleted file mode 100644 index 070c5d8..0000000 --- a/asyntest.py +++ /dev/null @@ -1,404 +0,0 @@ -# asyntest.py Test/demo of the 'micro' Event, Barrier and Semaphore classes -# Test/demo of official asyncio library and official Lock class - -# The MIT License (MIT) -# -# Copyright (c) 2017-2018 Peter Hinch -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -# CPython 3.5 compatibility -# (ignore RuntimeWarning: coroutine '_g' was never awaited) - -try: - import uasyncio as asyncio -except ImportError: - import asyncio - -import asyn - -def print_tests(): - st = '''Available functions: -print_tests() Print this list. -ack_test() Test event acknowledge. -event_test(lp=True) Test Event and Lock objects. If lp use low priority mechanism. -barrier_test() Test the Barrier class. -semaphore_test(bounded=False) Test Semaphore or BoundedSemaphore. -condition_test() Test the Condition class. -gather_test() Test the Gather class - -Recommended to issue ctrl-D after running each test. -''' - print('\x1b[32m') - print(st) - print('\x1b[39m') - -print_tests() - -def printexp(exp, runtime=0): - print('Expected output:') - print('\x1b[32m') - print(exp) - print('\x1b[39m') - if runtime: - print('Running (runtime = {}s):'.format(runtime)) - else: - print('Running (runtime < 1s):') - -# ************ Test Event class ************ -# Demo use of acknowledge event - -async def event_wait(event, ack_event, n): - await event - print('Eventwait {} got event with value {}'.format(n, event.value())) - ack_event.set() - -async def run_ack(): - loop = asyncio.get_event_loop() - event = asyn.Event() - ack1 = asyn.Event() - ack2 = asyn.Event() - count = 0 - while True: - loop.create_task(event_wait(event, ack1, 1)) - loop.create_task(event_wait(event, ack2, 2)) - event.set(count) - count += 1 - print('event was set') - await ack1 - ack1.clear() - print('Cleared ack1') - await ack2 - ack2.clear() - print('Cleared ack2') - event.clear() - print('Cleared event') - await asyncio.sleep(1) - -async def ack_coro(delay): - await asyncio.sleep(delay) - print("I've seen attack ships burn on the shoulder of Orion...") - print("Time to die...") - -def ack_test(): - printexp('''event was set -Eventwait 1 got event with value 0 -Eventwait 2 got event with value 0 -Cleared ack1 -Cleared ack2 -Cleared event -event was set -Eventwait 1 got event with value 1 -Eventwait 2 got event with value 1 - -... text omitted ... - -Eventwait 1 got event with value 9 -Eventwait 2 got event with value 9 -Cleared ack1 -Cleared ack2 -Cleared event -I've seen attack ships burn on the shoulder of Orion... -Time to die... -''', 10) - loop = asyncio.get_event_loop() - loop.create_task(run_ack()) - loop.run_until_complete(ack_coro(10)) - -# ************ Test Lock and Event classes ************ - -async def run_lock(n, lock): - print('run_lock {} waiting for lock'.format(n)) - await lock.acquire() - print('run_lock {} acquired lock'.format(n)) - await asyncio.sleep(1) # Delay to demo other coros waiting for lock - lock.release() - print('run_lock {} released lock'.format(n)) - -async def eventset(event): - print('Waiting 5 secs before setting event') - await asyncio.sleep(5) - event.set() - print('event was set') - -async def eventwait(event): - print('waiting for event') - await event - print('got event') - event.clear() - -async def run_event_test(lp): - print('Test Lock class') - loop = asyncio.get_event_loop() - lock = asyn.Lock() - loop.create_task(run_lock(1, lock)) - loop.create_task(run_lock(2, lock)) - loop.create_task(run_lock(3, lock)) - print('Test Event class') - event = asyn.Event(lp) - loop.create_task(eventset(event)) - await eventwait(event) # run_event_test runs fast until this point - print('Event status {}'.format('Incorrect' if event.is_set() else 'OK')) - print('Tasks complete') - -def event_test(lp=True): # Option to use low priority scheduling - printexp('''Test Lock class -Test Event class -waiting for event -run_lock 1 waiting for lock -run_lock 1 acquired lock -run_lock 2 waiting for lock -run_lock 3 waiting for lock -Waiting 5 secs before setting event -run_lock 1 released lock -run_lock 2 acquired lock -run_lock 2 released lock -run_lock 3 acquired lock -run_lock 3 released lock -event was set -got event -Event status OK -Tasks complete -''', 5) - loop = asyncio.get_event_loop() - loop.run_until_complete(run_event_test(lp)) - -# ************ Barrier test ************ - -async def killer(duration): - await asyncio.sleep(duration) - -def callback(text): - print(text) - -async def report(barrier): - for i in range(5): - print('{} '.format(i), end='') - await barrier - -def barrier_test(): - printexp('''0 0 0 Synch -1 1 1 Synch -2 2 2 Synch -3 3 3 Synch -4 4 4 Synch -''') - barrier = asyn.Barrier(3, callback, ('Synch',)) - loop = asyncio.get_event_loop() - for _ in range(3): - loop.create_task(report(barrier)) - loop.run_until_complete(killer(2)) - loop.close() - -# ************ Semaphore test ************ - -async def run_sema(n, sema, barrier): - print('run_sema {} trying to access semaphore'.format(n)) - async with sema: - print('run_sema {} acquired semaphore'.format(n)) - # Delay demonstrates other coros waiting for semaphore - await asyncio.sleep(1 + n/10) # n/10 ensures deterministic printout - print('run_sema {} has released semaphore'.format(n)) - barrier.trigger() - -async def run_sema_test(bounded): - num_coros = 5 - loop = asyncio.get_event_loop() - barrier = asyn.Barrier(num_coros + 1) - if bounded: - semaphore = asyn.BoundedSemaphore(3) - else: - semaphore = asyn.Semaphore(3) - for n in range(num_coros): - loop.create_task(run_sema(n, semaphore, barrier)) - await barrier # Quit when all coros complete - try: - semaphore.release() - except ValueError: - print('Bounded semaphore exception test OK') - -def semaphore_test(bounded=False): - if bounded: - exp = '''run_sema 0 trying to access semaphore -run_sema 0 acquired semaphore -run_sema 1 trying to access semaphore -run_sema 1 acquired semaphore -run_sema 2 trying to access semaphore -run_sema 2 acquired semaphore -run_sema 3 trying to access semaphore -run_sema 4 trying to access semaphore -run_sema 0 has released semaphore -run_sema 4 acquired semaphore -run_sema 1 has released semaphore -run_sema 3 acquired semaphore -run_sema 2 has released semaphore -run_sema 4 has released semaphore -run_sema 3 has released semaphore -Bounded semaphore exception test OK - -Exact sequence of acquisition may vary when 3 and 4 compete for semaphore.''' - else: - exp = '''run_sema 0 trying to access semaphore -run_sema 0 acquired semaphore -run_sema 1 trying to access semaphore -run_sema 1 acquired semaphore -run_sema 2 trying to access semaphore -run_sema 2 acquired semaphore -run_sema 3 trying to access semaphore -run_sema 4 trying to access semaphore -run_sema 0 has released semaphore -run_sema 3 acquired semaphore -run_sema 1 has released semaphore -run_sema 4 acquired semaphore -run_sema 2 has released semaphore -run_sema 3 has released semaphore -run_sema 4 has released semaphore - -Exact sequence of acquisition may vary when 3 and 4 compete for semaphore.''' - printexp(exp, 3) - loop = asyncio.get_event_loop() - loop.run_until_complete(run_sema_test(bounded)) - -# ************ Condition test ************ - -cond = asyn.Condition() -tim = 0 - -@asyn.cancellable -async def cond01(): - while True: - await asyncio.sleep(2) - with await cond: - cond.notify(2) # Notify 2 tasks - -async def cond02(n, barrier): - with await cond: - print('cond02', n, 'Awaiting notification.') - await cond.wait() - print('cond02', n, 'triggered. tim =', tim) - barrier.trigger() - -@asyn.cancellable -async def cond03(): # Maintain a count of seconds - global tim - await asyncio.sleep(0.5) - while True: - await asyncio.sleep(1) - tim += 1 - -def predicate(): - return tim >= 12 - -async def cond04(n, barrier): - with await cond: - print('cond04', n, 'Awaiting notification and predicate.') - await cond.wait_for(predicate) - print('cond04', n, 'triggered. tim =', tim) - barrier.trigger() - -async def cond_go(loop): - ntasks = 7 - barrier = asyn.Barrier(ntasks + 1) - loop.create_task(asyn.Cancellable(cond01)()) - loop.create_task(asyn.Cancellable(cond03)()) - for n in range(ntasks): - loop.create_task(cond02(n, barrier)) - await barrier # All instances of cond02 have completed - # Test wait_for - barrier = asyn.Barrier(2) - loop.create_task(cond04(99, barrier)) - await barrier - # cancel continuously running coros. - await asyn.Cancellable.cancel_all() - print('Done.') - -def condition_test(): - printexp('''cond02 0 Awaiting notification. -cond02 1 Awaiting notification. -cond02 2 Awaiting notification. -cond02 3 Awaiting notification. -cond02 4 Awaiting notification. -cond02 5 Awaiting notification. -cond02 6 Awaiting notification. -cond02 5 triggered. tim = 1 -cond02 6 triggered. tim = 1 -cond02 3 triggered. tim = 3 -cond02 4 triggered. tim = 3 -cond02 1 triggered. tim = 5 -cond02 2 triggered. tim = 5 -cond02 0 triggered. tim = 7 -cond04 99 Awaiting notification and predicate. -cond04 99 triggered. tim = 13 -Done. -''', 16) - loop = asyncio.get_event_loop() - loop.run_until_complete(cond_go(loop)) - -# ************ Gather test ************ - -# Task with one positional arg. Demonstrate that result order depends on -# original list order not termination order. -async def gath01(n): - print('gath01', n, 'started') - await asyncio.sleep(3 - n/10) - print('gath01', n, 'done') - return n - -# Takes kwarg. This is last to terminate. -async def gath02(x, y, rats): - print('gath02 started') - await asyncio.sleep(7) - print('gath02 done') - return x * y, rats - -# Only quits on timeout -async def gath03(n): - print('gath03 started') - try: - while True: - await asyncio.sleep(1) - n += 1 - except asyncio.TimeoutError: - print('gath03 timeout') - return n - -async def gath_go(): - gatherables = [asyn.Gatherable(gath01, n) for n in range(4)] - gatherables.append(asyn.Gatherable(gath02, 7, 8, rats=77)) - gatherables.append(asyn.Gatherable(gath03, 0, timeout=5)) - res = await asyn.Gather(gatherables) - print(res) - -def gather_test(): - printexp('''gath01 0 started -gath01 1 started -gath01 2 started -gath01 3 started -gath02 started -gath03 started -gath01 3 done -gath01 2 done -gath01 1 done -gath01 0 done -gath03 timeout -gath02 done -[0, 1, 2, 3, (56, 77), 4] -''', 7) - loop = asyncio.get_event_loop() - loop.run_until_complete(gath_go()) diff --git a/auart.py b/auart.py deleted file mode 100644 index 8600529..0000000 --- a/auart.py +++ /dev/null @@ -1,25 +0,0 @@ -# Test of uasyncio stream I/O using UART -# Author: Peter Hinch -# Copyright Peter Hinch 2017 Released under the MIT license -# Link X1 and X2 to test. - -import uasyncio as asyncio -from pyb import UART -uart = UART(4, 9600) - -async def sender(): - swriter = asyncio.StreamWriter(uart, {}) - while True: - await swriter.awrite('Hello uart\n') - await asyncio.sleep(2) - -async def receiver(): - sreader = asyncio.StreamReader(uart) - while True: - res = await sreader.readline() - print('Recieved', res) - -loop = asyncio.get_event_loop() -loop.create_task(sender()) -loop.create_task(receiver()) -loop.run_forever() diff --git a/awaitable.py b/awaitable.py deleted file mode 100644 index c0fe9d6..0000000 --- a/awaitable.py +++ /dev/null @@ -1,35 +0,0 @@ -# awaitable.py Demo of an awaitable class -# Author: Peter Hinch -# Copyright Peter Hinch 2017 Released under the MIT license -# runs in CPython and MicroPython -# Trivial fix for MicroPython issue #2678 - -try: - import asyncio_priority as asyncio -except ImportError: - try: - import uasyncio as asyncio - except ImportError: - import asyncio - -class Hardware(object): - def __init__(self, count): - self.count = count - - def __await__(self): # Typical use, loop until an interface becomes ready. - while self.count: - print(self.count) - yield - self.count -= 1 - - __iter__ = __await__ # issue #2678 - -loop = asyncio.get_event_loop() - -hardware = Hardware(10) - -async def run(): - await hardware - print('Done') - -loop.run_until_complete(run()) diff --git a/benchmarks/call_lp.py b/benchmarks/call_lp.py deleted file mode 100644 index 57110b5..0000000 --- a/benchmarks/call_lp.py +++ /dev/null @@ -1,39 +0,0 @@ -# call_lp.py Demo of low priority callback. Author Peter Hinch April 2017. -# Requires experimental version of core.py - -try: - import asyncio_priority as asyncio -except ImportError: - print('This demo requires asyncio_priority.py') -import pyb - -count = 0 -numbers = 0 - -async def report(): - await asyncio.after(2) - print('Callback executed {} times. Expected count 2000/20 = 100 times.'.format(count)) - print('Avg. of {} random numbers in range 0 to 1023 was {}'.format(count, numbers // count)) - -def callback(num): - global count, numbers - count += 1 - numbers += num // 2**20 # range 0 to 1023 - -def cb(arg): - print(arg) - -async def run_test(): - loop = asyncio.get_event_loop() - loop.call_after(1, cb, 'One second has elapsed.') # Test args - loop.call_after_ms(500, cb, '500ms has elapsed.') - print('Callbacks scheduled.') - while True: - loop.call_after(0, callback, pyb.rng()) # demo use of args - yield 20 # 20ms - -print('Test runs for 2 seconds') -loop = asyncio.get_event_loop() -loop.create_task(run_test()) -loop.run_until_complete(report()) - diff --git a/benchmarks/latency.py b/benchmarks/latency.py deleted file mode 100644 index 5fec4eb..0000000 --- a/benchmarks/latency.py +++ /dev/null @@ -1,122 +0,0 @@ -# latency.py Benchmark for uasyncio. Author Peter Hinch May 2017. - -# This measures the scheduling latency of a notional device driver running in the -# presence of other coros. This can test asyncio_priority.py which incorporates -# the priority mechanism. (In the home directory of this repo). - -# When running the test that uses the priority mechanism the latency is 300us which -# is determined by the time it takes uasyncio to schedule a coro (see rate.py). -# This is because, when the priority() coro issues await device it is the only coro -# on the normal queue and it therefore is immediately scheduled. - -# When running the test without the priority mechanism, the latency is D*Nms where N -# is the number of instances of the foo() coro and D is the processing period of -# foo() in ms (2). This is because priority() will only be rescheduled after every -# foo() instance has run. - -# For compute-intensive tasks a yield every 2ms is reasonably efficient. A shorter -# period implies a significant proportion of CPU cycles being taken up in scheduling. - -try: - import asyncio_priority as asyncio - lp_version = True -except ImportError: - import uasyncio as asyncio - lp_version = False - -import pyb -import utime as time -import gc - -num_coros = (5, 10, 100, 200) -duration = 2 # Time to run for each number of coros -done = False - -tmax = 0 -tmin = 1000000 -dtotal = 0 -count = 0 -lst_tmax = [tmax] * len(num_coros) # Max, min and avg error values -lst_tmin = [tmin] * len(num_coros) -lst_sd = [0] * len(num_coros) - -class DummyDeviceDriver(): - def __iter__(self): - yield - -async def report(): - # Don't compromise results by executing too soon. Time round loop is duration + 1 - await after(1 + len(num_coros) * (duration + 1)) - print('Awaiting result...') - while not done: - await after_ms(1000) - s = 'Coros {:4d} Latency = {:6.2f}ms min. {:6.2f}ms max. {:6.2f}ms avg.' - for x, n in enumerate(num_coros): - print(s.format(n, lst_tmin[x] / 1000, lst_tmax[x] /1000, lst_sd[x] / 1000)) - -async def lp_task(delay): - await after_ms(0) # If running low priority get on LP queue ASAP - while True: - time.sleep_ms(delay) # Simulate processing - await after_ms(0) - -async def priority(): - global tmax, tmin, dtotal, count - device = DummyDeviceDriver() - while True: - await after(0) # Ensure low priority coros get to run - tstart = time.ticks_us() - await device # Measure the latency - delta = time.ticks_diff(time.ticks_us(), tstart) - tmax = max(tmax, delta) - tmin = min(tmin, delta) - dtotal += delta - count += 1 - -async def run_test(delay): - global done, tmax, tmin, dtotal, count - loop.create_task(priority()) - old_n = 0 - for n, n_coros in enumerate(num_coros): - print('{:4d} coros. Test for {}s'.format(n_coros, duration)) - for _ in range(n_coros - old_n): - loop.create_task(lp_task(delay)) - await asyncio.sleep(1) # ensure tasks are all on LP queue before we measure - gc.collect() # ensure gc doesn't cloud the issue - old_n = n_coros - tmax = 0 - tmin = 1000000 - dtotal = 0 - count = 0 - await asyncio.sleep(duration) - lst_tmin[n] = tmin - lst_tmax[n] = tmax - lst_sd[n] = dtotal / count - done = True - -def test(use_priority=True): - global after, after_ms, loop, lp_version - processing_delay = 2 # Processing time in low priority task (ms) - if use_priority and not lp_version: - print('To test priority mechanism you must use asyncio_priority.py') - else: - ntasks = max(num_coros) + 10 #4 - if use_priority: - loop = asyncio.get_event_loop(ntasks, ntasks, ntasks) - after = asyncio.after - after_ms = asyncio.after_ms - else: - lp_version = False - after = asyncio.sleep - after_ms = asyncio.sleep_ms - loop = asyncio.get_event_loop(ntasks, ntasks) - s = 'Testing latency of priority task with coros blocking for {}ms.' - print(s.format(processing_delay)) - if lp_version: - print('Using priority mechanism.') - else: - print('Not using priority mechanism.') - loop.create_task(run_test(processing_delay)) - loop.run_until_complete(report()) - -print('Issue latency.test() to test priority mechanism, latency.test(False) to test standard algo.') diff --git a/benchmarks/overdue.py b/benchmarks/overdue.py deleted file mode 100644 index 068f785..0000000 --- a/benchmarks/overdue.py +++ /dev/null @@ -1,37 +0,0 @@ -# overdue.py Test for "low priority" uasyncio. Author Peter Hinch April 2017. -try: - import asyncio_priority as asyncio -except ImportError: - print('This demo requires asyncio_priority.py') - -ntimes = 0 - -async def lp_task(): - global ntimes - while True: - await asyncio.after_ms(100) - print('LP task runs.') - ntimes += 1 - -async def hp_task(): # Hog the scheduler - while True: - await asyncio.sleep_ms(0) - -async def report(): - global ntimes - loop.max_overdue_ms(1000) - loop.create_task(hp_task()) - loop.create_task(lp_task()) - print('First test runs for 10 secs. Max overdue time = 1s.') - await asyncio.sleep(10) - print('Low priority coro was scheduled {} times: (should be 9).'.format(ntimes)) - loop.max_overdue_ms(0) - ntimes = 0 - print('Second test runs for 10 secs. Default scheduling.') - print('Low priority coro should not be scheduled.') - await asyncio.sleep(10) - print('Low priority coro was scheduled {} times: (should be 0).'.format(ntimes)) - -loop = asyncio.get_event_loop() -loop.run_until_complete(report()) - diff --git a/benchmarks/priority.py b/benchmarks/priority.py deleted file mode 100644 index 57b3da3..0000000 --- a/benchmarks/priority.py +++ /dev/null @@ -1,111 +0,0 @@ -# priority.py Demonstrate high priority scheduling in modified uasyncio. -# Author Peter Hinch May 2017. - -# Measures the maximum latency of a high priority task. This tests a flag set -# by a timer interrupt to ensure a realistic measurement. The "obvious" way, -# using a coro to set the flag, produces unrealistically optimistic results -# because the scheduler is started immediately after the flag is set. - -try: - import asyncio_priority as asyncio -except ImportError: - print('This demo requires asyncio_priority.py') -import pyb -import utime as time -import gc -import micropython -micropython.alloc_emergency_exception_buf(100) - -n_hp_tasks = 2 # Number of high priority tasks -n_tasks = 4 # Number of normal priority tasks - -max_latency = 0 # Results: max latency of priority task -tmax = 0 # Latency of normal task -tmin = 1000000 - -class DummyDeviceDriver(): - def __iter__(self): - yield - -# boolean flag records time between setting and clearing it. -class Flag(): - def __init__(self): - self.flag = False - self.time_us = 0 - - def __call__(self): - return self.flag - - def set_(self): - self.flag = True - self.time_us = time.ticks_us() - - def clear(self): - self.flag = False - return time.ticks_diff(time.ticks_us(), self.time_us) - -# Instantiate a flag for each priority task -flags = [Flag() for _ in range(n_hp_tasks)] - -# Wait for a flag then clear it, updating global max_latency. -async def urgent(n): - global max_latency - flag = flags[n] - while True: - # Pause until flag is set. The callback is the bound method flag.__call__() - await asyncio.when(flag) # callback is passed not using function call syntax - latency = flag.clear() # Timer ISR has set the flag. Clear it. - max_latency = max(max_latency, latency) - -# Timer callback: hard IRQ which sets a flag to be tested by a priority coro, -# set each flag in turn -nflag = 0 -def trig(t): - global nflag - flags[nflag].set_() - nflag += 1 - nflag %= n_hp_tasks - -tim = pyb.Timer(4) - - -# Have a number of normal tasks each using some CPU time -async def normal_task(delay): - while True: - time.sleep_ms(delay) # Simulate processing - await asyncio.sleep_ms(0) - -# Measure the scheduling latency of a normal task which waits on an event. -# In this instance the driver returns immediately emulating an event which has -# already occurred - so we measure the scheduling latency. -async def norm_latency(): - global tmax, tmin - device = DummyDeviceDriver() - while True: - await asyncio.sleep_ms(100) - gc.collect() # For precise timing - tstart = time.ticks_us() - await device # Measure the latency - delta = time.ticks_diff(time.ticks_us(), tstart) - tmax = max(tmax, delta) - tmin = min(tmin, delta) - -# Ensure coros are running before we start the timer and measurement. -async def report(): - await asyncio.sleep_ms(100) - tim.init(freq=10) - tim.callback(trig) - await asyncio.sleep(2) - print('Max latency of urgent tasks: {}us'.format(max_latency)) - print('Latency of normal tasks: {:6.2f}ms max {:6.2f}ms min.'.format(tmax / 1000, tmin / 1000)) - tim.deinit() - -print('Test runs for two seconds.') -loop = asyncio.get_event_loop() -#loop.allocate_hpq(n_hp_tasks) # Allocate a (small) high priority queue -loop.create_task(norm_latency()) # Measure latency of a normal task -for _ in range(n_tasks): - loop.create_task(normal_task(1)) # Hog CPU for 1ms -for n in range(n_hp_tasks): - loop.create_task(urgent(n)) -loop.run_until_complete(report()) diff --git a/benchmarks/rate.py b/benchmarks/rate.py deleted file mode 100644 index a366bca..0000000 --- a/benchmarks/rate.py +++ /dev/null @@ -1,46 +0,0 @@ -# rate.py Benchmark for uasyncio. Author Peter Hinch Feb 2018. -# Benchmark uasyncio round-robin scheduling performance -# This measures the rate at which uasyncio can schedule a minimal coro which -# mereley increments a global. - -# Outcome: minimal coros are scheduled at an interval of ~150us - -import uasyncio as asyncio - -num_coros = (100, 200, 500, 1000) -iterations = [0, 0, 0, 0] -duration = 2 # Time to run for each number of coros -count = 0 -done = False - -async def report(): - while not done: - await asyncio.sleep(1) - for x, n in enumerate(num_coros): - print('Coros {:4d} Iterations/sec {:5d} Duration {:3d}us'.format( - n, int(iterations[x]/duration), int(duration*1000000/iterations[x]))) - -async def foo(): - global count - while True: - yield - count += 1 - -async def test(): - global count, done - old_n = 0 - for n, n_coros in enumerate(num_coros): - print('Testing {} coros for {}secs'.format(n_coros, duration)) - count = 0 - for _ in range(n_coros - old_n): - loop.create_task(foo()) - old_n = n_coros - await asyncio.sleep(duration) - iterations[n] = count - done = True - -ntasks = max(num_coros) + 2 -loop = asyncio.get_event_loop(ntasks, ntasks) -loop.create_task(test()) -loop.run_until_complete(report()) - diff --git a/benchmarks/rate_p.py b/benchmarks/rate_p.py deleted file mode 100644 index ba5e31e..0000000 --- a/benchmarks/rate_p.py +++ /dev/null @@ -1,53 +0,0 @@ -# rate_p.py Benchmark for asyncio_priority.py aiming to measure overhead of -# this version. Compare results with those from rate.py which uses the official -# version. -# Author Peter Hinch Feb 2018. -# Benchmark uasyncio round-robin scheduling performance -# This measures the rate at which uasyncio can schedule a minimal coro which -# mereley increments a global. - -# Outcome: minimal coros are scheduled at an interval of ~190us, independent of -# the number of instances. Overhead relative to official version ~25%. - -try: - import asyncio_priority as asyncio -except ImportError: - print('This demo requires asyncio_priority.py') - -num_coros = (100, 200, 500, 1000) -iterations = [0, 0, 0, 0] -duration = 2 # Time to run for each number of coros -count = 0 -done = False - -async def report(): - while not done: - await asyncio.sleep(1) - for x, n in enumerate(num_coros): - print('Coros {:4d} Iterations/sec {:5d} Duration {:3d}us'.format( - n, int(iterations[x]/duration), int(duration*1000000/iterations[x]))) - -async def foo(): - global count - while True: - yield - count += 1 - -async def test(): - global count, done - old_n = 0 - for n, n_coros in enumerate(num_coros): - print('Testing {} coros for {}secs'.format(n_coros, duration)) - count = 0 - for _ in range(n_coros - old_n): - loop.create_task(foo()) - old_n = n_coros - await asyncio.sleep(duration) - iterations[n] = count - done = True - -ntasks = max(num_coros) + 3 -loop = asyncio.get_event_loop(ntasks, ntasks) -loop.create_task(test()) -loop.run_until_complete(report()) - diff --git a/benchmarks/timing.py b/benchmarks/timing.py deleted file mode 100644 index 8578bc3..0000000 --- a/benchmarks/timing.py +++ /dev/null @@ -1,111 +0,0 @@ -# timing.py Benchmark for uasyncio. Author Peter Hinch May 2017. - -# This measures the accuracy of uasyncio.sleep_ms() in the presence of a number of -# other coros. This can test asyncio_priority.py which incorporates the priority -# mechanism. (In the home directory of this repo). - -# Outcome: when the priority mechanism is used the worst-case 10ms delay was 11.0ms -# With the normal algorithm the 10ms delay takes ~N*Dms where N is the number of -# lp_task() instances and D is the lp_task() processing delay (2ms). -# So for 200 coros the 10ms delay takes up to 411ms. - - -try: - import asyncio_priority as asyncio - lp_version = True -except ImportError: - import uasyncio as asyncio - lp_version = False - -import pyb -import utime as time -import gc - -num_coros = (5, 10, 100, 200) -duration = 2 # Time to run for each number of coros -done = False - -tmax = 0 -tmin = 1000000 -dtotal = 0 -count = 0 -lst_tmax = [tmax] * len(num_coros) -lst_tmin = [tmin] * len(num_coros) -lst_sd = [0] * len(num_coros) - -async def report(target_delay): - # Don't compromise results by executing too soon. Time round loop is duration + 1 - await after(1 + len(num_coros) * (duration + 1)) - print('Awaiting result...') - while not done: - await after_ms(1000) - print('Nominal delay of priority task was {}ms.'.format(target_delay)) - s = 'Coros {:4d} Actual delay = {:6.2f}ms min. {:6.2f}ms max. {:6.2f}ms avg.' - for x, n in enumerate(num_coros): - print(s.format(n, lst_tmin[x] / 1000, lst_tmax[x] /1000, lst_sd[x] / 1000)) - -async def lp_task(delay): - await after_ms(0) # If running low priority get on LP queue ASAP - while True: - time.sleep_ms(delay) # Simulate processing - await after_ms(0) # LP yield - -async def priority(ms): - global tmax, tmin, dtotal, count - while True: - gc.collect() # GC was affecting result - tstart = time.ticks_us() - await asyncio.sleep_ms(ms) # Measure the actual delay - delta = time.ticks_diff(time.ticks_us(), tstart) - tmax = max(tmax, delta) - tmin = min(tmin, delta) - dtotal += delta - count += 1 - -async def run_test(delay, ms_delay): - global done, tmax, tmin, dtotal, count - loop.create_task(priority(ms_delay)) - old_n = 0 - for n, n_coros in enumerate(num_coros): - print('{:4d} coros. Test for {}s'.format(n_coros, duration)) - for _ in range(n_coros - old_n): - loop.create_task(lp_task(delay)) - await asyncio.sleep(1) # ensure tasks are all on LP queue before we measure - old_n = n_coros - tmax = 0 - tmin = 1000000 - dtotal = 0 - count = 0 - await asyncio.sleep(duration) - lst_tmin[n] = tmin - lst_tmax[n] = tmax - lst_sd[n] = dtotal / count - done = True - -def test(use_priority=True): - global after, after_ms, lp_version, loop - target_delay = 10 # Nominal delay in priority task (ms) - processing_delay = 2 # Processing time in low priority task (ms) - if use_priority and not lp_version: - print('To test priority mechanism you must use asyncio_priority.py') - else: - ntasks = max(num_coros) + 4 - if use_priority: - loop = asyncio.get_event_loop(ntasks, ntasks, ntasks) - after = asyncio.after - after_ms = asyncio.after_ms - else: - lp_version = False - after = asyncio.sleep - after_ms = asyncio.sleep_ms - loop = asyncio.get_event_loop(ntasks, ntasks) - s = 'Testing accuracy of {}ms nominal delay with coros blocking for {}ms.' - print(s.format(target_delay, processing_delay)) - if lp_version: - print('Using priority mechanism.') - else: - print('Not using priority mechanism.') - loop.create_task(run_test(processing_delay, target_delay)) - loop.run_until_complete(report(target_delay)) - -print('Issue timing.test() to test priority mechanism, timing.test(False) to test standard algo.') diff --git a/cantest.py b/cantest.py deleted file mode 100644 index e6cc43f..0000000 --- a/cantest.py +++ /dev/null @@ -1,422 +0,0 @@ -# cantest.py Tests of task cancellation - -# The MIT License (MIT) -# -# Copyright (c) 2017-2018 Peter Hinch -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - - -import uasyncio as asyncio -import asyn -import utime as time - -def print_tests(): - st = '''Available functions: -test1() Basic NamedTask cancellation. -test2() Use of Barrier to synchronise NamedTask cancellation. Demo of latency. -test3() Cancellation of a NamedTask which has run to completion. -test4() Test of Cancellable class. -test5() Cancellable and NamedTask instances as bound methods. -test6() Test of NamedTask.is_running() and awaiting NamedTask cancellation. -Recommended to issue ctrl-D after running each test. -''' - print('\x1b[32m') - print(st) - print('\x1b[39m') - -print_tests() - -def printexp(exp, runtime=0): - print('Expected output:') - print('\x1b[32m') - print(exp) - print('\x1b[39m') - if runtime: - print('Running (runtime = {}s):'.format(runtime)) - else: - print('Running (runtime < 1s):') - -# cancel_test1() - -@asyn.cancellable -async def foo(num): - try: - await asyncio.sleep(4) - except asyn.StopTask: - print('foo was cancelled.') - return -1 - else: - return num + 42 - -async def kill(task_name): - res = await asyn.NamedTask.cancel(task_name) - if res: - print(task_name, 'will be cancelled when next scheduled') - else: - print(task_name, 'was not cancellable.') - -# Example of a task which cancels another -async def bar(): - await asyncio.sleep(1) - await kill('foo') - await kill('not me') # Will fail because not yet scheduled - -async def run_cancel_test1(): - loop = asyncio.get_event_loop() - loop.create_task(bar()) - res = await asyn.NamedTask('foo', foo, 5) - print(res, asyn.NamedTask.is_running('foo')) - res = await asyn.NamedTask('not me', foo, 0) # Runs to completion - print(res, asyn.NamedTask.is_running('not me')) - -def test1(): - printexp('''foo will be cancelled when next scheduled -not me was not cancellable. -foo was cancelled. --1 False -42 False -''', 8) - loop = asyncio.get_event_loop() - loop.run_until_complete(run_cancel_test1()) - -# test2() -# This test uses a barrier so that cancelling task pauses until cancelled tasks -# have actually terminated. Also tests the propagation of the thrown exception -# to the awaiting coro. - -async def forever(n): - print('Started forever() instance', n) - while True: - await asyncio.sleep(7 + n) - print('Running instance', n) - -# Intercepting the StopTask exception. -@asyn.cancellable -async def rats(n): - try: - await forever(n) - except asyn.StopTask: - print('Instance', n, 'was cancelled') - -async def run_cancel_test2(): - barrier = asyn.Barrier(3) - loop = asyncio.get_event_loop() - loop.create_task(asyn.NamedTask('rats_1', rats, 1, barrier=barrier)()) - loop.create_task(asyn.NamedTask('rats_2', rats, 2, barrier=barrier)()) - print('Running two tasks') - await asyncio.sleep(10) - print('About to cancel tasks') - await asyn.NamedTask.cancel('rats_1') # These will stop when their wait is complete - await asyn.NamedTask.cancel('rats_2') - await barrier # So wait for that to occur. - print('tasks were cancelled') - -def test2(): - printexp('''Running two tasks -Started forever() instance 1 -Started forever() instance 2 -Running instance 1 -Running instance 2 -About to cancel tasks -Instance 1 was cancelled -Instance 2 was cancelled -tasks were cancelled -''', 20) - loop = asyncio.get_event_loop() - loop.run_until_complete(run_cancel_test2()) - -# test3() -# Test of cancelling a task which has already terminated - -# Intercepting the StopTask exception. -@asyn.cancellable -async def cant3(): - try: - await asyncio.sleep(1) - print('Task cant3 has ended.') - except asyn.StopTask: - print('Task cant3 was cancelled') - -async def run_cancel_test3(): - barrier = asyn.Barrier(2) - loop = asyncio.get_event_loop() - loop.create_task(asyn.NamedTask('cant3', cant3, barrier=barrier)()) - print('Task cant3 running status', asyn.NamedTask.is_running('cant3')) - await asyncio.sleep(3) - print('Task cant3 running status', asyn.NamedTask.is_running('cant3')) - print('About to cancel task') - await asyn.NamedTask.cancel('cant3') - print('Cancelled') - print('Task cant3 running status', asyn.NamedTask.is_running('cant3')) - await barrier - print('tasks were cancelled') - -def test3(): - printexp('''Task cant3 running status True -Task cant3 has ended. -Task cant3 running status False -About to cancel task -Cancelled -Task cant3 running status False -tasks were cancelled -''', 3) - loop = asyncio.get_event_loop() - loop.run_until_complete(run_cancel_test3()) - -# test4() -# Test of cancelling a task which has already terminated - -# Cancellable coros can trap the StopTask. They are passed the -# task_id automatically - -@asyn.cancellable -async def cant40(num): - while True: - try: - await asyn.sleep(1) - print('Task cant40 no. {} running.'.format(num)) - except asyn.StopTask: - print('Task cant40 no. {} was cancelled'.format(num)) - return - -@asyn.cancellable -async def cant41(num, arg=0): - try: - await asyn.sleep(1) - print('Task cant41 no. {} running, arg {}.'.format(num, arg)) - except asyn.StopTask: - print('Task cant41 no. {} was cancelled.'.format(num)) - return - else: - print('Task cant41 no. {} ended.'.format(num)) - -async def cant42(num): - while True: - print('Task cant42 no. {} running'.format(num)) - await asyn.sleep(1.2) - -# Test await syntax and throwing exception to subtask -@asyn.cancellable -async def chained(num, x, y, *, red, blue): - print('Args:', x, y, red, blue) # Test args and kwargs - try: - await cant42(num) - except asyn.StopTask: - print('Task chained no. {} was cancelled'.format(num)) - -async def run_cancel_test4(): - await asyn.Cancellable(cant41, 0, 5) - loop = asyncio.get_event_loop() - loop.create_task(asyn.Cancellable(cant40, 1)()) # 3 instances in default group 0 - loop.create_task(asyn.Cancellable(cant40, 2)()) - loop.create_task(asyn.Cancellable(cant40, 3)()) - loop.create_task(asyn.Cancellable(chained, 4, 1, 2, red=3, blue=4, group=1)()) - loop.create_task(asyn.Cancellable(cant41, 5)()) # Runs to completion - print('Running tasks') - await asyncio.sleep(3) - print('About to cancel group 0 tasks') - await asyn.Cancellable.cancel_all() # All in default group 0 - print('Group 0 tasks were cancelled') - await asyncio.sleep(1) # Demo chained still running - print('About to cancel group 1 tasks') - await asyn.Cancellable.cancel_all(1) # Group 1 - print('Group 1 tasks were cancelled') - await asyncio.sleep(1) - -def test4(): - printexp('''Task cant41 no. 0 running, arg 5. -Task cant41 no. 0 ended. -Running tasks -Args: 1 2 3 4 -Task cant42 no. 4 running -Task cant40 no. 1 running. -Task cant40 no. 2 running. -Task cant40 no. 3 running. -Task cant41 no. 5 running, arg 0. -Task cant41 no. 5 ended. -Task cant42 no. 4 running -Task cant40 no. 1 running. -Task cant40 no. 2 running. -Task cant40 no. 3 running. -Task cant42 no. 4 running -About to cancel group 0 tasks -Task cant40 no. 1 was cancelled -Task cant40 no. 2 was cancelled -Task cant40 no. 3 was cancelled -Group 0 tasks were cancelled -Task cant42 no. 4 running -About to cancel group 1 tasks -Task chained no. 4 was cancelled -Group 1 tasks were cancelled -''', 6) - loop = asyncio.get_event_loop() - loop.run_until_complete(run_cancel_test4()) - -# test5 -# Test of task cancellation where tasks are bound methods - -class CanTest(): - async def start(self, loop): - loop.create_task(asyn.Cancellable(self.foo, 1)()) # 3 instances in default group 0 - loop.create_task(asyn.Cancellable(self.foo, 2)()) - loop.create_task(asyn.Cancellable(self.foo, 3)()) - loop.create_task(asyn.NamedTask('my bar', self.bar, 4, y=42)()) - await asyncio.sleep(4.5) - await asyn.NamedTask.cancel('my bar') - await asyn.Cancellable.cancel_all() - await asyncio.sleep(1) - print('Done') - - @asyn.cancellable - async def foo(self, arg): - try: - while True: - await asyn.sleep(1) - print('foo running, arg', arg) - except asyn.StopTask: - print('foo was cancelled') - - @asyn.cancellable - async def bar(self, arg, *, x=1, y=2): - try: - while True: - await asyn.sleep(1) - print('bar running, arg', arg, x, y) - except asyn.StopTask: - print('bar was cancelled') - -def test5(): - printexp('''foo running, arg 1 -foo running, arg 2 -foo running, arg 3 -bar running, arg 4 1 42 -foo running, arg 1 -foo running, arg 2 -foo running, arg 3 -bar running, arg 4 1 42 -foo running, arg 1 -foo running, arg 2 -foo running, arg 3 -bar running, arg 4 1 42 -foo running, arg 1 -foo running, arg 2 -foo running, arg 3 -bar running, arg 4 1 42 -foo was cancelled -foo was cancelled -foo was cancelled -bar was cancelled -Done -''', 6) - cantest = CanTest() - loop = asyncio.get_event_loop() - loop.run_until_complete(cantest.start(loop)) - -# test 6: test NamedTask.is_running() -@asyn.cancellable -async def cant60(name): - print('Task cant60 name \"{}\" running.'.format(name)) - try: - for _ in range(5): - await asyncio.sleep(2) # 2 secs latency. - except asyn.StopTask: - print('Task cant60 name \"{}\" was cancelled.'.format(name)) - return - else: - print('Task cant60 name \"{}\" ended.'.format(name)) - -@asyn.cancellable -async def cant61(): - try: - while True: - for name in ('complete', 'cancel me'): - res = asyn.NamedTask.is_running(name) - print('Task \"{}\" running: {}'.format(name, res)) - await asyncio.sleep(1) - except asyn.StopTask: - print('Task cant61 cancelled.') - -async def run_cancel_test6(loop): - for name in ('complete', 'cancel me'): - loop.create_task(asyn.NamedTask(name, cant60, name)()) - loop.create_task(asyn.Cancellable(cant61)()) - await asyncio.sleep(4.5) - print('Cancelling task \"{}\". 1.5 secs latency.'.format(name)) - await asyn.NamedTask.cancel(name) - await asyncio.sleep(7) - name = 'cancel wait' - loop.create_task(asyn.NamedTask(name, cant60, name)()) - await asyncio.sleep(0.5) - print('Cancelling task \"{}\". 1.5 secs latency.'.format(name)) - t = time.ticks_ms() - await asyn.NamedTask.cancel('cancel wait', nowait=False) - print('Was cancelled in {} ms'.format(time.ticks_diff(time.ticks_ms(), t))) - print('Cancelling cant61') - await asyn.Cancellable.cancel_all() - print('Done') - - -def test6(): - printexp('''Task cant60 name "complete" running. -Task cant60 name "cancel me" running. -Task "complete" running: True -Task "cancel me" running: True -Task "complete" running: True -Task "cancel me" running: True -Task "complete" running: True -Task "cancel me" running: True -Task "complete" running: True -Task "cancel me" running: True -Task "complete" running: True -Task "cancel me" running: True -Cancelling task "cancel me". 1.5 secs latency. -Task "complete" running: True -Task "cancel me" running: True -Task cant60 name "cancel me" was cancelled. -Task "complete" running: True -Task "cancel me" running: False -Task "complete" running: True -Task "cancel me" running: False -Task "complete" running: True -Task "cancel me" running: False -Task "complete" running: True -Task "cancel me" running: False -Task cant60 name "complete" ended. -Task "complete" running: False -Task "cancel me" running: False -Task "complete" running: False -Task "cancel me" running: False -Task cant60 name "cancel wait" running. -Cancelling task "cancel wait". 1.5 secs latency. -Task "complete" running: False -Task "cancel me" running: False -Task "complete" running: False -Task "cancel me" running: False -Task cant60 name "cancel wait" was cancelled. -Was cancelled in 1503 ms -Cancelling cant61 -Task cant61 cancelled. -Done - - -[Duration of cancel wait may vary depending on platform 1500 <= range <= 1600ms] -''', 14) - loop = asyncio.get_event_loop() - loop.run_until_complete(run_cancel_test6(loop)) diff --git a/chain.py b/chain.py deleted file mode 100644 index 38e6f1a..0000000 --- a/chain.py +++ /dev/null @@ -1,20 +0,0 @@ -# chain.py Demo of chained coros under MicroPython uasyncio -# Author: Peter Hinch -# Copyright Peter Hinch 2017 Released under the MIT license -try: - import uasyncio as asyncio -except ImportError: - import asyncio - -async def compute(x, y): - print("Compute %s + %s ..." % (x, y)) - await asyncio.sleep(1.0) - return x + y - -async def print_sum(x, y): - result = await compute(x, y) - print("%s + %s = %s" % (x, y, result)) - -loop = asyncio.get_event_loop() -loop.run_until_complete(print_sum(1, 2)) -loop.close() diff --git a/check_async_code.py b/check_async_code.py deleted file mode 100755 index f7907a7..0000000 --- a/check_async_code.py +++ /dev/null @@ -1,206 +0,0 @@ -#! /usr/bin/python3 -# -*- coding: utf-8 -*- -# check_async_code.py -# A simple script to identify a common error which causes silent failures under -# MicroPython (issue #3241). -# This is where a task is declared with async def and then called as if it were -# a regular function. -# Copyright Peter Hinch 2017 -# Issued under the MIT licence - -import sys -import re - -tasks = set() -mismatch = False - -def pass1(part, lnum): - global mismatch - opart = part - sysnames = ('__aenter__', '__aexit__', '__aiter__', '__anext__') - # These are the commonest system functions declared with async def. - # Mimimise spurious duplicate function definition error messages. - good = True - if not part.startswith('#'): - mismatch = False - part = stripquotes(part, lnum) # Remove quoted strings (which might contain code) - good &= not mismatch - if part.startswith('async'): - pos = part.find('def') - if pos >= 0: - part = part[pos + 3:] - part = part.lstrip() - pos = part.find('(') - if pos >= 0: - fname = part[:pos].strip() - if fname in tasks and fname not in sysnames: - # Note this gives a false positive if a method of the same name - # exists in more than one class. - print('Duplicate function declaration "{}" in line {}'.format(fname, lnum)) - print(opart) - print() - good = False - else: - tasks.add(fname) - return good - -# Strip quoted strings (which may contain code) -def stripquotes(part, lnum=0): - global mismatch - for qchar in ('"', "'"): - pos = part.find(qchar) - if pos >= 0: - part = part[:pos] + part[pos + 1:] # strip 1st qchar - pos1 = part.find(qchar) - if pos > 0: - part = part[:pos] + part[pos1+1:] # Strip whole quoted string - part = stripquotes(part, lnum) - else: - print('Mismatched quotes in line', lnum) - mismatch = True - return part # for what it's worth - return part - -def pass2(part, lnum): - global mismatch - opart = part - good = True - if not part.startswith('#') and not part.startswith('async'): - mismatch = False - part = stripquotes(part, lnum) # Remove quoted strings (which might contain code) - good &= not mismatch - for task in tasks: - sstr = ''.join((task, r'\w*')) - match = re.search(sstr, part) - if match is None: # No match - continue - if match.group(0) != task: # No exact match - continue - # Accept await task, await task(args), a = await task(args) - sstr = ''.join((r'.*await[ \t]+', task)) - if re.search(sstr, part): - continue - # Accept await obj.task, await obj.task(args), a = await obj.task(args) - sstr = ''.join((r'.*await[ \t]+\w+\.', task)) - if re.search(sstr, part): - continue - # Accept assignments e.g. a = mytask or - # after = asyncio.after if p_version else asyncio.sleep - # or comparisons thistask == thattask - sstr = ''.join((r'=[ \t]*', task, r'[ \t]*[^(]')) - if re.search(sstr, part): - continue - # Not awaited but could be passed to function e.g. - # run_until_complete(mytask(args)) - sstr = ''.join((r'.*\w+[ \t]*\([ \t]*', task, r'[ \t]*\(')) - if re.search(sstr, part): - sstr = r'run_until_complete|run_forever|create_task|NamedTask' - if re.search(sstr, part): - continue - print('Please review line {}: async function "{}" is passed to a function.'.format(lnum, task)) - print(opart) - print() - good = False - continue - # func(mytask, more_args) may or may not be an error - sstr = ''.join((r'.*\w+[ \t]*\([ \t]*', task, r'[ \t]*[^\(]')) - if re.search(sstr, part): - print('Please review line {}: async function "{}" is passed to a function.'.format(lnum, task)) - print(opart) - print() - good = False - continue - - # Might be a method. Discard object. - sstr = ''.join((r'.*\w+[ \t]*\([ \t]*\w+\.', task)) - if re.search(sstr, part): - continue - print('Please review line {}: async function "{}" is not awaited.'.format(lnum, task)) - print(opart) - print() - good = False - return good - -txt = '''check_async_code.py -usage: check_async_code.py sourcefile.py - -This rather crude script is designed to locate a single type of coding error -which leads to silent runtime failure and hence can be hard to locate. - -It is intended to be used on otherwise correct source files and is not robust -in the face of syntax errors. Use pylint or other tools for general syntax -checking. - -It assumes code is written in the style advocated in the tutorial where coros -are declared with "async def". - -Under certain circumstances it can produce false positives. In some cases this -is by design. Given an asynchronous function foo the following is correct: -loop.run_until_complete(foo()) -The following line may or may not be an error depending on the design of bar() -bar(foo, args) -Likewise asynchronous functions can be put into objects such as dicts, lists or -sets. You may wish to review such lines to check that the intention was to put -the function rather than its result into the object. - -A false positive which is a consequence of the hacky nature of this script is -where a task has the same name as a synchronous bound method of some class. A -call to the bound method will produce an erroneous warning. This is because the -code does not parse class definitions. - -In practice the odd false positive is easily spotted in the code. -''' - -def usage(code=0): - print(txt) - sys.exit(code) - -# Process a line -in_triple_quote = False -def do_line(line, passn, lnum): - global in_triple_quote - ignore = False - good = True - # TODO The following isn't strictly correct. A line might be of the form - # erroneous Python ; ''' start of string - # It could therefore miss the error. - if re.search(r'[^"]*"""|[^\']*\'\'\'', line): - if in_triple_quote: - # Discard rest of line which terminates triple quote - ignore = True - in_triple_quote = not in_triple_quote - if not in_triple_quote and not ignore: - parts = line.split(';') - for part in parts: - # discard comments and whitespace at start and end - part = part.split('#')[0].strip() - if part: - good &= passn(part, lnum) - return good - -def main(fn): - global in_triple_quote - good = True - try: - with open(fn, 'r') as f: - for passn in (pass1, pass2): - in_triple_quote = False - lnum = 1 - for line in f: - good &= do_line(line, passn, lnum) - lnum += 1 - f.seek(0) - - except FileNotFoundError: - print('File {} does not exist.'.format(fn)) - return - if good: - print('No errors found!') - -if __name__ == "__main__": - if len(sys.argv) !=2: - usage(1) - arg = sys.argv[1].strip() - if arg == '--help' or arg == '-h': - usage() - main(arg) diff --git a/exit_gate_test.py b/exit_gate_test.py deleted file mode 100644 index c30ad31..0000000 --- a/exit_gate_test.py +++ /dev/null @@ -1,30 +0,0 @@ -# exit_gate_test.py Test/demo of the ExitGate class -# Author: Peter Hinch -# Copyright Peter Hinch 2017 Released under the MIT license -import uasyncio as asyncio -from utime import ticks_ms, ticks_diff -from asyn import ExitGate - -async def bar(exit_gate, t): - async with exit_gate: - result = 'normal' if await exit_gate.sleep(t) else 'abort' - tim = ticks_diff(ticks_ms(), tstart) / 1000 - print('{:5.2f} bar() with time value {} completed. Result {}.'.format(tim, t, result)) - -async def foo(): - exit_gate = ExitGate() - loop = asyncio.get_event_loop() - for t in range(1, 10): - loop.create_task(bar(exit_gate, t)) - print('Task queue length = ', len(loop.q)) - await asyncio.sleep(3) - print('Task queue length = ', len(loop.q)) - print('Now foo is causing tasks to terminate.') - await exit_gate - print('foo() complete.') - print('Task queue length = ', len(loop.q)) - - -tstart = ticks_ms() -loop = asyncio.get_event_loop() -loop.run_until_complete(foo()) diff --git a/htu21d/README.md b/htu21d/README.md deleted file mode 100644 index 947a679..0000000 --- a/htu21d/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# The HTU21D temperature/humidity sensor. - -A breakout board is available from -[Sparkfun](https://www.sparkfun.com/products/12064). - -This driver was derived from the synchronous Pyboard-specific driver -[here](https://github.com/manitou48/pyboard/blob/master/htu21d.py). It is -designed to be multi-platform and uses `uasyncio` to achieve asynchronous (non- -blocking) operation. The driver maintains `temperature` and `humidity` bound -variables as a non-blocking background task. Consequently reading the values is -effectively instantaneous. - -###### [Main README](../README.md) - -# Files - - 1. `htu21d_mc.py` The asynchronous driver. - 2. `htu_test.py` Test/demo program. - -# The driver - -This provides a single class `HTU21D`. - -Constructor. -This takes two args, `i2c` (mandatory) and an optional `read_delay=10`. The -former must be an initialised I2C bus instance. The `read_delay` (secs) -determines how frequently the data values are updated. - -Public bound values - 1. `temperature` Latest value in Celcius. - 2. `humidity` Latest value of relative humidity (%). - -Initial readings will not be complete until about 120ms after the class is -instantiated. Prior to this the values will be `None`. To avoid such invalid -readings the class is awaitable and may be used as follows. - -```python -async def show_values(): - htu = htu21d_mc.HTU21D(i2c) - await htu # Will pause ~120ms - # Data is now valid - while True: - fstr = 'Temp {:5.1f} Humidity {:5.1f}' - print(fstr.format(htu.temperature, htu.humidity)) - await asyncio.sleep(5) -``` - -Thermal inertia of the chip packaging means that there is a lag between the -occurrence of a temperature change and the availability of accurate readings. -There is therefore little practical benefit in reducing the `read_delay`. diff --git a/io.py b/io.py deleted file mode 100644 index 348f1a3..0000000 --- a/io.py +++ /dev/null @@ -1,39 +0,0 @@ -# io.py Failed attempt to use uasyncio IORead mechanism in a custom class. -# It turns out that the necessary support has not been implemented, and -# it is unlikely that this will occur. -import uasyncio as asyncio - -MP_STREAM_POLL_RD = 1 -MP_STREAM_POLL = 3 - -import uasyncio as asyncio -class Device(): - def __init__(self): - self.ready = False - - def fileno(self): - return 999 - - def ioctl(self, cmd, flags): - res = 0 - print('Got here') - if cmd == MP_STREAM_POLL and (flags & MP_STREAM_POLL_RD): - if self.ready: - res = MP_STREAM_POLL_RD - return res - - def read(self): - return - def write(self): - return - - async def readloop(self): - while True: - print('About to yield') - yield asyncio.IORead(self) - print('Should never happen') - -loop = asyncio.get_event_loop() -device = Device() -loop.create_task(device.readloop()) -loop.run_forever() diff --git a/nec_ir/art.py b/nec_ir/art.py deleted file mode 100644 index 2174372..0000000 --- a/nec_ir/art.py +++ /dev/null @@ -1,43 +0,0 @@ -# art.py Test program for IR remote control decoder aremote.py -# Supports Pyboard and ESP8266 - -# Author: Peter Hinch -# Copyright Peter Hinch 2017 Released under the MIT license - -# Run this to characterise a remote. - -from sys import platform -import uasyncio as asyncio -if platform == 'pyboard': - from pyb import Pin -elif platform == 'esp8266': - from machine import Pin, freq -else: - print('Unsupported platform', platform) - -from aremote import * - -errors = {BADSTART : 'Invalid start pulse', BADBLOCK : 'Error: bad block', - BADREP : 'Error: repeat', OVERRUN : 'Error: overrun', - BADDATA : 'Error: invalid data', BADADDR : 'Error: invalid address'} - -def cb(data, addr): - if data == REPEAT: - print('Repeat') - elif data >= 0: - print(hex(data), hex(addr)) - else: - print('{} Address: {}'.format(errors[data], hex(addr))) - -def test(): - print('Test for IR receiver. Assumes NEC protocol.') - if platform == 'pyboard': - p = Pin('X3', Pin.IN) - elif platform == 'esp8266': - freq(160000000) - p = Pin(13, Pin.IN) - ir = NEC_IR(p, cb, True) # Assume r/c uses extended addressing - loop = asyncio.get_event_loop() - loop.run_forever() - -test() diff --git a/priority_test.py b/priority_test.py deleted file mode 100644 index eb580eb..0000000 --- a/priority_test.py +++ /dev/null @@ -1,79 +0,0 @@ -# priority_test.py -# Test/demo of task cancellation of low priority tasks -# Author: Peter Hinch -# Copyright Peter Hinch 2018 Released under the MIT license - -# Check availability of 'priority' version -try: - import asyncio_priority as asyncio - p_version = True -except ImportError: - p_version = False - -if not p_version: - print('This program tests and therefore requires asyncio_priority.') - -import asyn - -def printexp(exp, runtime=0): - print('Expected output:') - print('\x1b[32m') - print(exp) - print('\x1b[39m') - if runtime: - print('Running (runtime = {}s):'.format(runtime)) - else: - print('Running (runtime < 1s):') - -@asyn.cancellable -async def foo(num): - print('Starting foo', num) - try: - await asyncio.after(1) - print('foo', num, 'ran to completion.') - except asyn.StopTask: - print('foo', num, 'was cancelled.') - -async def kill(task_name): - if await asyn.NamedTask.cancel(task_name): - print(task_name, 'will be cancelled when next scheduled') - else: - print(task_name, 'was not cancellable.') - -# Example of a task which cancels another -async def bar(): - await asyncio.sleep(1) - await kill('foo 0') # Will fail because it has completed - await kill('foo 1') - await kill('foo 3') # Will fail because not yet scheduled - -async def run_cancel_test(): - loop = asyncio.get_event_loop() - await asyn.NamedTask('foo 0', foo, 0) - loop.create_task(asyn.NamedTask('foo 1', foo, 1)()) - loop.create_task(bar()) - await asyncio.sleep(5) - await asyn.NamedTask('foo 2', foo, 2) - await asyn.NamedTask('foo 4', foo, 4) - loop.create_task(asyn.NamedTask('foo 3', foo, 3)()) - await asyncio.sleep(5) - -def test(): - printexp('''Starting foo 0 -foo 0 ran to completion. -Starting foo 1 -foo 0 was not cancellable. -foo 1 will be cancelled when next scheduled -foo 3 was not cancellable. -foo 1 was cancelled. -Starting foo 2 -foo 2 ran to completion. -Starting foo 4 -foo 4 ran to completion. -Starting foo 3 -foo 3 ran to completion. -''', 14) - loop = asyncio.get_event_loop() - loop.run_until_complete(run_cancel_test()) - -test() diff --git a/roundrobin.py b/roundrobin.py deleted file mode 100644 index 096b397..0000000 --- a/roundrobin.py +++ /dev/null @@ -1,40 +0,0 @@ -# roundrobin.py Test/demo of round-robin scheduling -# Author: Peter Hinch -# Copyright Peter Hinch 2017 Released under the MIT license - -# Result on Pyboard with print('Foo', n) commented out -# executions/second: -# Using yield: 4249 -# Using sleep_ms(0) 2750 -# Note using yield in a coro is "unofficial" and may not -# work in future uasyncio revisions. - -try: - import asyncio_priority as asyncio -except ImportError: - import uasyncio as asyncio - -count = 0 -period = 5 - - -async def foo(n): - global count - while True: -# yield - await asyncio.sleep_ms(0) - count += 1 - print('Foo', n) - - -async def main(delay): - print('Testing for {} seconds'.format(period)) - await asyncio.sleep(delay) - - -loop = asyncio.get_event_loop() -loop.create_task(foo(1)) -loop.create_task(foo(2)) -loop.create_task(foo(3)) -loop.run_until_complete(main(period)) -print('Coro executions per sec =', count/period) diff --git a/sock_nonblock.py b/sock_nonblock.py deleted file mode 100644 index 2f44464..0000000 --- a/sock_nonblock.py +++ /dev/null @@ -1,110 +0,0 @@ -# sock_nonblock.py Illustration of the type of code required to use nonblocking -# sockets. It is not a working demo and probably has silly errors. -# It is intended as an outline of requirements and also to illustrate some of the -# nasty hacks required on current builds of ESP32 firmware. Platform detection is -# done at runtime. -# If running on ESP8266 these hacks can be eliminated. -# Working implementations may be found in the asynchronous MQTT library. -# https://github.com/peterhinch/micropython-mqtt - -# Author: Peter Hinch -# Copyright Peter Hinch 2018 Released under the MIT license - -import usocket as socket -import network -import machine -import sys -from micropython import const -from uerrno import EINPROGRESS, ETIMEDOUT -from utime import ticks_ms, ticks_diff, sleep_ms - -ESP32 = sys.platform == 'esp32' - -BUSY_ERRORS = [EINPROGRESS, ETIMEDOUT] - -# ESP32. It is not enough to regularly yield to RTOS with machine.idle(). There are -# two cases where an explicit sleep() is required. Where data has been written to the -# socket and a response is awaited, a timeout may occur without a >= 20ms sleep. -# Secondly during WiFi connection sleeps are required to prevent hangs. -if ESP32: - # https://forum.micropython.org/viewtopic.php?f=16&t=3608&p=20942#p20942 - BUSY_ERRORS += [118, 119] # Add in weird ESP32 errors - # 20ms seems about the minimum before we miss data read from a socket. - def esp32_pause(): # https://github.com/micropython/micropython-esp32/issues/167 - sleep_ms(20) # This is horrible. -else: - esp32_pause = lambda *_ : None # Do nothing on sane platforms - -# How long to delay between polls. Too long affects throughput, too short can -# starve other coroutines. -_SOCKET_POLL_DELAY = const(5) # ms -_RESPONSE_TIME = const(30000) # ms. max server latency before timeout - -class FOO: - def __init__(self, server, port): - # On ESP32 need to submit WiFi credentials - self._sta_if = network.WLAN(network.STA_IF) - self._sta_if.active(True) - # Note that the following blocks, potentially for seconds, owing to DNS lookup - self._addr = socket.getaddrinfo(server, port)[0][-1] - self._sock = socket.socket() - self._sock.setblocking(False) - try: - self._sock.connect(addr) - except OSError as e: - if e.args[0] not in BUSY_ERRORS: - raise - if ESP32: # Revolting kludge :-( - loop = asyncio.get_event_loop() - loop.create_task(self._idle_task()) - - def _timeout(self, t): - return ticks_diff(ticks_ms(), t) > _RESPONSE_TIME - - # Read and return n bytes. Raise OSError on timeout ( caught by superclass). - async def _as_read(self, n): - sock = self._sock - data = b'' - t = ticks_ms() - while len(data) < n: - esp32_pause() # Necessary on ESP32 or we can time out. - if self._timeout(t) or not self._sta_if.isconnected(): - raise OSError(-1) - try: - msg = sock.read(n - len(data)) - except OSError as e: # ESP32 issues weird 119 errors here - msg = None - if e.args[0] not in BUSY_ERRORS: - raise - if msg == b'': # Connection closed by host (?) - raise OSError(-1) - if msg is not None: # data received - data = b''.join((data, msg)) - t = ticks_ms() # reset timeout - await asyncio.sleep_ms(_SOCKET_POLL_DELAY) - return data - - # Write a buffer - async def _as_write(self, bytes_wr): - sock = self._sock - t = ticks_ms() - while bytes_wr: - if self._timeout(t) or not self._sta_if.isconnected(): - raise OSError(-1) - try: - n = sock.write(bytes_wr) - except OSError as e: # ESP32 issues weird 119 errors here - n = 0 - if e.args[0] not in BUSY_ERRORS: - raise - if n: # Bytes still to write - t = ticks_ms() # Something was written: reset t/o - bytes_wr = bytes_wr[n:] - esp32_pause() # Precaution. How to prove whether it's necessary? - await asyncio.sleep_ms(_SOCKET_POLL_DELAY) - - # ESP32 kludge :-( - async def _idle_task(self): - while True: - await asyncio.sleep_ms(10) - machine.idle() # Yield to underlying RTOS diff --git a/v3/README.md b/v3/README.md new file mode 100644 index 0000000..95d7344 --- /dev/null +++ b/v3/README.md @@ -0,0 +1,155 @@ +# 1. Guide to asyncio + +MicroPython's `asyncio` is pre-installed on all platforms except severely +constrained ones such as the 1MB ESP8266. It supports CPython 3.8 syntax and +aims to be a compatible subset of `asyncio`. The current version is 3.0.0. + +## 1.1 Documents + +[asyncio official docs](http://docs.micropython.org/en/latest/library/asyncio.html) + +[Tutorial](./docs/TUTORIAL.md) Intended for users with all levels of experience +of asynchronous programming, including beginners. + +[Drivers](https://github.com/peterhinch/micropython-async/blob/master/v3/docs/DRIVERS.md) +describes device drivers for switches, pushbuttons, ESP32 touch buttons, ADC's +and incremental encoders. + +[Interrupts](https://github.com/peterhinch/micropython-async/blob/master/v3/docs/INTERRUPTS.md) +is a guide to interfacing interrupts to `asyncio`. + +[Event-based programming](./docs/EVENTS.md) is a guide to a way of writing +applications and device drivers which largely does away with callbacks. The doc +assumes some knowledge of `asyncio`. + +[Threading](./docs/THREADING.md) is a guide to the use of multi-threaded and +multi-core programming. Code is offered to enable a `asyncio` application to +deal with blocking functions. + +## 1.2 Debugging tools + +[aiorepl](https://github.com/micropython/micropython-lib/tree/master/micropython/aiorepl) +This official tool enables an application to launch a REPL which is active +while the application is running. From this you can modify and query the +application and run `asyncio` scripts concurrently with the running +application. Author Jim Mussared @jimmo. + +[aioprof](https://gitlab.com/alelec/aioprof/-/tree/main) A profiler for +`asyncio` applications: show the number of calls and the total time used by +each task. Author Andrew Leech @andrewleech. + +[monitor](https://github.com/peterhinch/micropython-monitor) enables a running +`asyncio` application to be monitored using a Pi Pico, ideally with a scope or +logic analyser. Normally requires only one GPIO pin on the target. + +![Image](https://github.com/peterhinch/micropython-monitor/raw/master/images/monitor.jpg) + +## 1.3 Resources in this repo + +### 1.3.1 Test/demo scripts + +Documented in the [tutorial](./docs/TUTORIAL.md). + +### 1.3.2 Synchronisation primitives + +Documented in the [tutorial](./docs/TUTORIAL.md). Comprises: + * Implementations of unsupported CPython primitives including `barrier`, + `queue` and others. + * A software retriggerable monostable timer class `Delay_ms`, similar to a + watchdog. + * Two primitives enabling waiting on groups of `Event` instances. + +### 1.3.3 Threadsafe primitives + +[This doc](https://github.com/peterhinch/micropython-async/blob/master/v3/docs/THREADING.md) +describes issues linking `asyncio` code with code running on other cores or in +other threads. The `threadsafe` directory provides: + + * A threadsafe primitive `Message`. + * `ThreadSafeQueue` + * `ThreadSafeEvent` Extends `ThreadsafeFlag`. + +The doc also provides code to enable `asyncio` to handle blocking functions +using threading. + +### 1.3.4 Asynchronous device drivers + +These are documented +[here](https://github.com/peterhinch/micropython-async/blob/master/v3/docs/DRIVERS.md): + * Classes for interfacing switches, pushbuttons and ESP32 touch buttons. + * Drivers for ADC's + * Drivers for incremental encoders. + +### 1.3.5 A scheduler + +This [lightweight scheduler](./docs/SCHEDULE.md) enables tasks to be scheduled +at future times. These can be assigned in a flexible way: a task might run at +4.10am on Monday and Friday if there's no "r" in the month. + +### 1.3.6 Asynchronous interfaces + +These device drivers are intended as examples of asynchronous code which are +useful in their own right: + + * [GPS driver](./docs/GPS.md) Includes various GPS utilities. + * [HTU21D](./docs/HTU21D.md) Temperature and humidity sensor. + * [I2C](./docs/I2C.md) Use Pyboard I2C slave mode to implement a UART-like + asynchronous stream interface. Uses: communication with ESP8266, or (with + coding) to interface a Pyboard to I2C masters. + * [NEC IR](./docs/NEC_IR.md) A receiver for signals from IR remote controls + using the popular NEC protocol. + * [HD44780](./docs/hd44780.md) Driver for common character based LCD displays + based on the Hitachi HD44780 controller. + +# 2. V3 Overview + +These notes are intended for users familiar with `asyncio` under CPython. + +The MicroPython language is based on CPython 3.4. The `asyncio` library now +supports a subset of the CPython 3.8 `asyncio` library. There are non-standard +extensions to optimise services such as millisecond level timing. Its design +focus is on high performance. Scheduling runs without RAM allocation. + +The `asyncio` library supports the following features: + + * `async def` and `await` syntax. + * Awaitable classes (using `__iter__` rather than `__await__`). + * Asynchronous context managers. + * Asynchronous iterators. + * `asyncio.sleep(seconds)`. + * Timeouts (`asyncio.wait_for`). + * Task cancellation (`Task.cancel`). + * Gather. + +It supports millisecond level timing with the following: + * `asyncio.sleep_ms(time)` + +It includes the following CPython compatible synchronisation primitives: + * `Event`. + * `Lock`. + * `gather`. + +This repo includes code for the CPython primitives which are not yet officially +supported. + +The `Future` class is not supported, nor are the `event_loop` methods +`call_soon`, `call_later`, `call_at`. + +## 2.1 Outstanding issues with V3 + +V3 is still a work in progress. The following is a list of issues which I hope +will be addressed in due course. + +### 2.1.1 Fast I/O scheduling + +There is currently no support for this: I/O is scheduled in round robin fashion +with other tasks. There are situations where this is too slow and the scheduler +should be able to poll I/O whenever it gains control. + +### 2.1.2 Synchronisation primitives + +These CPython primitives are outstanding: + * `Semaphore`. + * `BoundedSemaphore`. + * `Condition`. + * `Queue`. diff --git a/v3/__init__.py b/v3/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/v3/as_demos/__init__.py b/v3/as_demos/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/v3/as_demos/aledflash.py b/v3/as_demos/aledflash.py new file mode 100644 index 0000000..6e8fb12 --- /dev/null +++ b/v3/as_demos/aledflash.py @@ -0,0 +1,39 @@ +# aledflash.py Demo/test program for MicroPython asyncio +# Author: Peter Hinch +# Copyright Peter Hinch 2020 Released under the MIT license +# Flashes the onboard LED's each at a different rate. Stops after ten seconds. +# Run on MicroPython board bare hardware + +import pyb +import asyncio + + +async def toggle(objLED, time_ms): + while True: + await asyncio.sleep_ms(time_ms) + objLED.toggle() + + +# TEST FUNCTION + + +async def main(duration): + print("Flash LED's for {} seconds".format(duration)) + leds = [pyb.LED(x) for x in range(1, 4)] # Initialise three on board LED's + for x, led in enumerate(leds): # Create a task for each LED + t = int((0.2 + x / 2) * 1000) + asyncio.create_task(toggle(leds[x], t)) + await asyncio.sleep(duration) + + +def test(duration=10): + try: + asyncio.run(main(duration)) + except KeyboardInterrupt: + print("Interrupted") + finally: + asyncio.new_event_loop() + print("as_demos.aledflash.test() to run again.") + + +test() diff --git a/v3/as_demos/apoll.py b/v3/as_demos/apoll.py new file mode 100644 index 0000000..abb609d --- /dev/null +++ b/v3/as_demos/apoll.py @@ -0,0 +1,65 @@ +# Demonstration of a device driver using a coroutine to poll a device. +# Runs on Pyboard: displays results from the onboard accelerometer. +# Uses crude filtering to discard noisy data. + +# Author: Peter Hinch +# Copyright Peter Hinch 2017 Released under the MIT license + +import asyncio +import pyb +import utime as time + + +class Accelerometer(object): + threshold_squared = 16 + + def __init__(self, accelhw, timeout): + self.accelhw = accelhw + self.timeout = timeout + self.last_change = time.ticks_ms() + self.coords = [accelhw.x(), accelhw.y(), accelhw.z()] + + def dsquared(self, xyz): # Return the square of the distance between this and a passed + return sum(map(lambda p, q: (p - q) ** 2, self.coords, xyz)) # acceleration vector + + def poll(self): # Device is noisy. Only update if change exceeds a threshold + xyz = [self.accelhw.x(), self.accelhw.y(), self.accelhw.z()] + if self.dsquared(xyz) > Accelerometer.threshold_squared: + self.coords = xyz + self.last_change = time.ticks_ms() + return 0 + return time.ticks_diff(time.ticks_ms(), self.last_change) + + def vector(self): + return self.coords + + def timed_out(self): # Time since last change or last timeout report + if time.ticks_diff(time.ticks_ms(), self.last_change) > self.timeout: + self.last_change = time.ticks_ms() + return True + return False + + +async def accel_coro(timeout=2000): + accelhw = pyb.Accel() # Instantiate accelerometer hardware + await asyncio.sleep_ms(30) # Allow it to settle + accel = Accelerometer(accelhw, timeout) + while True: + result = accel.poll() + if result == 0: # Value has changed + x, y, z = accel.vector() + print("Value x:{:3d} y:{:3d} z:{:3d}".format(x, y, z)) + elif accel.timed_out(): # Report every 2 secs + print("Timeout waiting for accelerometer change") + await asyncio.sleep_ms(100) # Poll every 100ms + + +async def main(delay): + print("Testing accelerometer for {} secs. Move the Pyboard!".format(delay)) + print("Test runs for {}s.".format(delay)) + asyncio.create_task(accel_coro()) + await asyncio.sleep(delay) + print("Test complete!") + + +asyncio.run(main(20)) diff --git a/v3/as_demos/auart.py b/v3/as_demos/auart.py new file mode 100644 index 0000000..1a312b0 --- /dev/null +++ b/v3/as_demos/auart.py @@ -0,0 +1,45 @@ +# Test of uasyncio stream I/O using UART +# Author: Peter Hinch +# Copyright Peter Hinch 2017-2022 Released under the MIT license +# Link X1 and X2 to test. + +# We run with no UART timeout: UART read never blocks. +import asyncio +from machine import UART + +uart = UART(4, 9600, timeout=0) + + +async def sender(): + swriter = asyncio.StreamWriter(uart, {}) + while True: + swriter.write("Hello uart\n") + await swriter.drain() + await asyncio.sleep(2) + + +async def receiver(): + sreader = asyncio.StreamReader(uart) + while True: + res = await sreader.readline() + print("Received", res) + + +async def main(): + asyncio.create_task(sender()) + asyncio.create_task(receiver()) + while True: + await asyncio.sleep(1) + + +def test(): + try: + asyncio.run(main()) + except KeyboardInterrupt: + print("Interrupted") + finally: + asyncio.new_event_loop() + print("as_demos.auart.test() to run again.") + + +test() diff --git a/v3/as_demos/auart_hd.py b/v3/as_demos/auart_hd.py new file mode 100644 index 0000000..5a6783f --- /dev/null +++ b/v3/as_demos/auart_hd.py @@ -0,0 +1,119 @@ +# auart_hd.py +# Author: Peter Hinch +# Copyright Peter Hinch 2018-2020 Released under the MIT license + +# Demo of running a half-duplex protocol to a device. The device never sends +# unsolicited messages. An example is a communications device which responds +# to AT commands. +# The master sends a message to the device, which may respond with one or more +# lines of data. The master assumes that the device has sent all its data when +# a timeout has elapsed. + +# In this test a physical device is emulated by the Device class +# To test link X1-X4 and X2-X3 + +from pyb import UART +import asyncio +from primitives.delay_ms import Delay_ms + +# Dummy device waits for any incoming line and responds with 4 lines at 1 second +# intervals. +class Device: + def __init__(self, uart_no=4): + self.uart = UART(uart_no, 9600) + self.swriter = asyncio.StreamWriter(self.uart, {}) + self.sreader = asyncio.StreamReader(self.uart) + asyncio.create_task(self._run()) + + async def _run(self): + responses = ["Line 1", "Line 2", "Line 3", "Goodbye"] + while True: + res = await self.sreader.readline() + for response in responses: + await self.swriter.awrite("{}\r\n".format(response)) + # Demo the fact that the master tolerates slow response. + await asyncio.sleep_ms(300) + + +# The master's send_command() method sends a command and waits for a number of +# lines from the device. The end of the process is signified by a timeout, when +# a list of lines is returned. This allows line-by-line processing. +# A special test mode demonstrates the behaviour with a non-responding device. If +# None is passed, no commend is sent. The master waits for a response which never +# arrives and returns an empty list. +class Master: + def __init__(self, uart_no=2, timeout=4000): + self.uart = UART(uart_no, 9600) + self.timeout = timeout + self.swriter = asyncio.StreamWriter(self.uart, {}) + self.sreader = asyncio.StreamReader(self.uart) + self.delay = Delay_ms() + self.response = [] + asyncio.create_task(self._recv()) + + async def _recv(self): + while True: + res = await self.sreader.readline() + self.response.append(res) # Append to list of lines + self.delay.trigger(self.timeout) # Got something, retrigger timer + + async def send_command(self, command): + self.response = [] # Discard any pending messages + if command is None: + print("Timeout test.") + else: + await self.swriter.awrite("{}\r\n".format(command)) + print("Command sent:", command) + self.delay.trigger(self.timeout) # Re-initialise timer + while self.delay.running(): + await asyncio.sleep(1) # Wait for 4s after last msg received + return self.response + + +async def main(): + print("This test takes 10s to complete.") + master = Master() + device = Device() + for cmd in ["Run", None]: + print() + res = await master.send_command(cmd) + # can use b''.join(res) if a single string is required. + if res: + print("Result is:") + for line in res: + print(line.decode("UTF8"), end="") + else: + print("Timed out waiting for result.") + + +def printexp(): + st = """Expected output: +This test takes 10s to complete. + +Command sent: Run +Result is: +Line 1 +Line 2 +Line 3 +Goodbye + +Timeout test. +Timed out waiting for result. +""" + print("\x1b[32m") + print(st) + print("\x1b[39m") + + +def test(): + printexp() + try: + asyncio.run(main()) + except KeyboardInterrupt: + print("Interrupted") + finally: + asyncio.new_event_loop() + print("as_demos.auart_hd.test() to run again.") + + +test() diff --git a/v3/as_demos/gather.py b/v3/as_demos/gather.py new file mode 100644 index 0000000..45205f9 --- /dev/null +++ b/v3/as_demos/gather.py @@ -0,0 +1,108 @@ +# gather.py Demo of Gatherable coroutines. Includes 3 cases: +# 1. A normal coro +# 2. A coro with a timeout +# 3. A cancellable coro + +import asyncio + + +async def barking(n): + print("Start normal coro barking()") + for _ in range(6): + await asyncio.sleep(1) + print("Done barking.") + return 2 * n + + +async def foo(n): + print("Start timeout coro foo()") + try: + while True: + await asyncio.sleep(1) + n += 1 + except asyncio.CancelledError: + print("Trapped foo timeout.") + raise + return n + + +async def bar(n): + print("Start cancellable bar()") + try: + while True: + await asyncio.sleep(1) + n += 1 + except asyncio.CancelledError: # Demo of trapping + print("Trapped bar cancellation.") + raise + return n + + +async def do_cancel(task): + await asyncio.sleep(5) + print("About to cancel bar") + task.cancel() + + +async def main(rex): + bar_task = asyncio.create_task(bar(70)) # Note args here + tasks = [] + tasks.append(barking(21)) + tasks.append(asyncio.wait_for(foo(10), 7)) + asyncio.create_task(do_cancel(bar_task)) + try: + res = await asyncio.gather(*tasks, return_exceptions=rex) + except asyncio.TimeoutError: + print("foo timed out.") + res = "No result" + print("Result: ", res) + + +exp_false = """Test runs for 10s. Expected output: + +Start cancellable bar() +Start normal coro barking() +Start timeout coro foo() +About to cancel bar +Trapped bar cancellation. +Done barking. +Trapped foo timeout. +foo timed out. +Result: No result + +""" +exp_true = """Test runs for 10s. Expected output: + +Start cancellable bar() +Start normal coro barking() +Start timeout coro foo() +About to cancel bar +Trapped bar cancellation. +Done barking. +Trapped foo timeout. +Result: [42, TimeoutError()] + +""" + + +def printexp(st): + print("\x1b[32m") + print(st) + print("\x1b[39m") + + +def test(rex): + st = exp_true if rex else exp_false + printexp(st) + try: + asyncio.run(main(rex)) + except KeyboardInterrupt: + print("Interrupted") + finally: + asyncio.new_event_loop() + print() + print("as_demos.gather.test() to run again.") + print("as_demos.gather.test(True) to see effect of return_exceptions.") + + +test(rex=False) diff --git a/v3/as_demos/iorw.py b/v3/as_demos/iorw.py new file mode 100644 index 0000000..8d91f5d --- /dev/null +++ b/v3/as_demos/iorw.py @@ -0,0 +1,124 @@ +# iorw.py Emulate a device which can read and write one character at a time. + +# Slow hardware is emulated using timers. +# MyIO.write() ouputs a single character and sets the hardware not ready. +# MyIO.readline() returns a single character and sets the hardware not ready. +# Timers asynchronously set the hardware ready. + +import io, pyb +import asyncio +import micropython + +micropython.alloc_emergency_exception_buf(100) + +MP_STREAM_POLL_RD = const(1) +MP_STREAM_POLL_WR = const(4) +MP_STREAM_POLL = const(3) +MP_STREAM_ERROR = const(-1) + + +def printbuf(this_io): + print(bytes(this_io.wbuf[: this_io.wprint_len]).decode(), end="") + + +class MyIO(io.IOBase): + def __init__(self, read=False, write=False): + self.ready_rd = False # Read and write not ready + self.rbuf = b"ready\n" # Read buffer + self.ridx = 0 + pyb.Timer(4, freq=5, callback=self.do_input) + self.wch = b"" + self.wbuf = bytearray(100) # Write buffer + self.wprint_len = 0 + self.widx = 0 + pyb.Timer(5, freq=10, callback=self.do_output) + + # Read callback: emulate asynchronous input from hardware. + # Typically would put bytes into a ring buffer and set .ready_rd. + def do_input(self, t): + self.ready_rd = True # Data is ready to read + + # Write timer callback. Emulate hardware: if there's data in the buffer + # write some or all of it + def do_output(self, t): + if self.wch: + self.wbuf[self.widx] = self.wch + self.widx += 1 + if self.wch == ord("\n"): + self.wprint_len = self.widx # Save for schedule + micropython.schedule(printbuf, self) + self.widx = 0 + self.wch = b"" + + def ioctl(self, req, arg): # see ports/stm32/uart.c + ret = MP_STREAM_ERROR + if req == MP_STREAM_POLL: + ret = 0 + if arg & MP_STREAM_POLL_RD: + if self.ready_rd: + ret |= MP_STREAM_POLL_RD + if arg & MP_STREAM_POLL_WR: + if not self.wch: + ret |= MP_STREAM_POLL_WR # Ready if no char pending + return ret + + # Test of device that produces one character at a time + def readline(self): + self.ready_rd = False # Set by timer cb do_input + ch = self.rbuf[self.ridx] + if ch == ord("\n"): + self.ridx = 0 + else: + self.ridx += 1 + return chr(ch) + + # Emulate unbuffered hardware which writes one character: uasyncio waits + # until hardware is ready for the next. Hardware ready is emulated by write + # timer callback. + def write(self, buf, off=0, sz=0): + self.wch = buf[off] # Hardware starts to write a char + return 1 # 1 byte written. uasyncio waits on ioctl write ready + + +async def receiver(myior): + sreader = asyncio.StreamReader(myior) + while True: + res = await sreader.readline() + print("Received", res) + + +async def sender(myiow): + swriter = asyncio.StreamWriter(myiow, {}) + await asyncio.sleep(5) + count = 0 + while True: + count += 1 + tosend = "Wrote Hello MyIO {}\n".format(count) + await swriter.awrite(tosend.encode("UTF8")) + await asyncio.sleep(2) + + +def printexp(): + st = """Received b'ready\\n' +Received b'ready\\n' +Received b'ready\\n' +Received b'ready\\n' +Received b'ready\\n' +Wrote Hello MyIO 1 +Received b'ready\\n' +Received b'ready\\n' +Received b'ready\\n' +Wrote Hello MyIO 2 +Received b'ready\\n' +... +Runs until interrupted (ctrl-c). +""" + print("\x1b[32m") + print(st) + print("\x1b[39m") + + +printexp() +myio = MyIO() +asyncio.create_task(receiver(myio)) +asyncio.run(sender(myio)) diff --git a/v3/as_demos/monitor/README.md b/v3/as_demos/monitor/README.md new file mode 100644 index 0000000..9595dc7 --- /dev/null +++ b/v3/as_demos/monitor/README.md @@ -0,0 +1,3 @@ +# This repo has moved + +[new location](https://github.com/peterhinch/micropython-monitor) diff --git a/v3/as_demos/rate.py b/v3/as_demos/rate.py new file mode 100644 index 0000000..46cb5b2 --- /dev/null +++ b/v3/as_demos/rate.py @@ -0,0 +1,62 @@ +# rate.py Benchmark for uasyncio. Author Peter Hinch Feb 2018-Apr 2020. +# Benchmark uasyncio round-robin scheduling performance +# This measures the rate at which uasyncio can schedule a minimal coro which +# mereley increments a global. + +# Outcome on a Pyboard 1.1 +# 100 minimal coros are scheduled at an interval of 195μs on uasyncio V3 +# Compares with ~156μs on official uasyncio V2. + +# Results for 100 coros on other platforms at standard clock rate: +# Pyboard D SF2W 124μs +# Pico 481μs +# ESP32 322μs +# ESP8266 1495μs (could not run 500 or 1000 coros) + +# Note that ESP32 benchmarks are notoriously fickle. Above figure was for +# the reference board running MP V1.18. Results may vary with firmware +# depending on the layout of code in RAM/IRAM + +import asyncio + +num_coros = (100, 200, 500, 1000) +iterations = [0, 0, 0, 0] +duration = 2 # Time to run for each number of coros +count = 0 +done = False + + +async def foo(): + global count + while True: + await asyncio.sleep_ms(0) + count += 1 + + +async def test(): + global count, done + old_n = 0 + for n, n_coros in enumerate(num_coros): + print("Testing {} coros for {}secs".format(n_coros, duration)) + count = 0 + for _ in range(n_coros - old_n): + asyncio.create_task(foo()) + old_n = n_coros + await asyncio.sleep(duration) + iterations[n] = count + done = True + + +async def report(): + asyncio.create_task(test()) + while not done: + await asyncio.sleep(1) + for x, n in enumerate(num_coros): + print( + "Coros {:4d} Iterations/sec {:5d} Duration {:3d}us".format( + n, int(iterations[x] / duration), int(duration * 1000000 / iterations[x]) + ) + ) + + +asyncio.run(report()) diff --git a/v3/as_demos/roundrobin.py b/v3/as_demos/roundrobin.py new file mode 100644 index 0000000..79bc60d --- /dev/null +++ b/v3/as_demos/roundrobin.py @@ -0,0 +1,34 @@ +# roundrobin.py Test/demo of round-robin scheduling +# Author: Peter Hinch +# Copyright Peter Hinch 2017-2020 Released under the MIT license + +# Result on Pyboard 1.1 with print('Foo', n) commented out +# executions/second 5575.6 on uasyncio V3 + +# uasyncio V2 produced the following results +# 4249 - with a hack where sleep_ms(0) was replaced with yield +# Using sleep_ms(0) 2750 + +import asyncio + +count = 0 +period = 5 + + +async def foo(n): + global count + while True: + await asyncio.sleep_ms(0) + count += 1 + print("Foo", n) + + +async def main(delay): + for n in range(1, 4): + asyncio.create_task(foo(n)) + print("Testing for {:d} seconds".format(delay)) + await asyncio.sleep(delay) + + +asyncio.run(main(period)) +print("Coro executions per sec =", count / period) diff --git a/v3/as_demos/stream_to.py b/v3/as_demos/stream_to.py new file mode 100644 index 0000000..a0d7267 --- /dev/null +++ b/v3/as_demos/stream_to.py @@ -0,0 +1,74 @@ +# stream_to.py Demo of StreamReader with timeout. +# Hardware: Pico or Pico W with pin GPIO0 linked to GPIO1 +# Copyright Peter Hinch 2024 Released under the MIT license + +import asyncio +from primitives import Delay_ms +from machine import UART + +_uart = UART(0, 115200, tx=0, rx=1, timeout=0) # Adapt for other hardware + +# Class extends StreamReader to enable read with timeout +class StreamReaderTo(asyncio.StreamReader): + def __init__(self, source): + super().__init__(source) + self._delay_ms = Delay_ms() # Allocate once only + + # Task cancels itself if timeout elapses without a byte being received + async def readintotim(self, buf: bytearray, toms: int) -> int: # toms: timeout in ms + mvb = memoryview(buf) + timer = self._delay_ms + timer.callback(asyncio.current_task().cancel) + timer.trigger(toms) # Start cancellation timer + n = 0 + nbytes = len(buf) + try: + while n < nbytes: + n += await super().readinto(mvb[n:]) + timer.trigger(toms) # Retrigger when bytes received + except asyncio.CancelledError: + pass + timer.stop() + return n + + +# Simple demo +EOT = b"QUIT" # End of transmission + + +async def sender(writer): + s = "The quick brown fox jumps over the lazy dog!" + for _ in range(2): + writer.write(s) + writer.drain() + await asyncio.sleep(1) # < reader timeout + writer.write(s) + writer.drain() + await asyncio.sleep(4) # > reader timeout + writer.write(EOT) + writer.drain() + + +async def receiver(reader): + buf = bytearray(16) # Read in blocks of 16 cbytes + print("Receiving. Demo runs for ~15s...") + while not buf.startswith(EOT): + n = await reader.readintotim(buf, 3000) + if n < len(buf): + print("Timeout: ", end="") + print(bytes(buf[:n])) + if n < len(buf): + print("") + print("Demo complete.") + + +async def main(): + reader = StreamReaderTo(_uart) + writer = asyncio.StreamWriter(_uart, {}) + await asyncio.gather(sender(writer), receiver(reader)) + + +try: + asyncio.run(main()) +finally: + _ = asyncio.new_event_loop() diff --git a/v3/as_drivers/as_GPS/__init__.py b/v3/as_drivers/as_GPS/__init__.py new file mode 100644 index 0000000..e7979ed --- /dev/null +++ b/v3/as_drivers/as_GPS/__init__.py @@ -0,0 +1 @@ +from .as_GPS import * diff --git a/v3/as_drivers/as_GPS/as_GPS.py b/v3/as_drivers/as_GPS/as_GPS.py new file mode 100644 index 0000000..f1f553c --- /dev/null +++ b/v3/as_drivers/as_GPS/as_GPS.py @@ -0,0 +1,626 @@ +# as_GPS.py Asynchronous device driver for GPS devices using a UART. +# Sentence parsing based on MicropyGPS by Michael Calvin McCoy +# https://github.com/inmcm/micropyGPS +# http://www.gpsinformation.org/dale/nmea.htm +# Docstrings removed because of question marks over their use in resource +# constrained systems e.g. https://github.com/micropython/micropython/pull/3748 + +# Copyright (c) 2018-2020 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +# astests.py runs under CPython but not MicroPython because mktime is missing +# from Unix build of utime + +# Ported to uasyncio V3 OK. + +import asyncio + +try: + from micropython import const +except ImportError: + const = lambda x: x + +from math import modf + +# Float conversion tolerant of empty field +# gfloat = lambda x : float(x) if x else 0.0 + +# Angle formats +DD = const(1) +DMS = const(2) +DM = const(3) +KML = const(4) +# Speed units +KPH = const(10) +MPH = const(11) +KNOT = const(12) +# Date formats +MDY = const(20) +DMY = const(21) +LONG = const(22) + +# Sentence types +RMC = const(1) +GLL = const(2) +VTG = const(4) +GGA = const(8) +GSA = const(16) +GSV = const(32) +# Messages carrying data +POSITION = const(RMC | GLL | GGA) +ALTITUDE = const(GGA) +DATE = const(RMC) +COURSE = const(RMC | VTG) + + +class AS_GPS(object): + # Can omit time consuming checks: CRC 6ms Bad char and line length 9ms + FULL_CHECK = True + _SENTENCE_LIMIT = 76 # Max sentence length (based on GGA sentence) + _NO_FIX = 1 + + # Return day of week from date. Pyboard RTC format: 1-7 for Monday through Sunday. + # https://stackoverflow.com/questions/9847213/how-do-i-get-the-day-of-week-given-a-date-in-python?noredirect=1&lq=1 + # Adapted for Python 3 and Pyboard RTC format. + @staticmethod + def _week_day( + year, month, day, offset=[0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334] + ): + aux = year - 1700 - (1 if month <= 2 else 0) + # day_of_week for 1700/1/1 = 5, Friday + day_of_week = 5 + # partial sum of days betweem current date and 1700/1/1 + day_of_week += (aux + (1 if month <= 2 else 0)) * 365 + # leap year correction + day_of_week += aux // 4 - aux // 100 + (aux + 100) // 400 + # sum monthly and day offsets + day_of_week += offset[month - 1] + (day - 1) + day_of_week %= 7 + day_of_week = day_of_week if day_of_week else 7 + return day_of_week + + # 8-bit xor of characters between "$" and "*". Takes 6ms on Pyboard! + @staticmethod + def _crc_check(res, ascii_crc): + try: + crc = int(ascii_crc, 16) + except ValueError: + return False + x = 1 + crc_xor = 0 + while res[x] != "*": + crc_xor ^= ord(res[x]) + x += 1 + return crc_xor == crc + + def __init__( + self, sreader, local_offset=0, fix_cb=lambda *_: None, cb_mask=RMC, fix_cb_args=() + ): + self._sreader = sreader # If None testing: update is called with simulated data + self._fix_cb = fix_cb + self.cb_mask = cb_mask + self._fix_cb_args = fix_cb_args + self.battery = False # Assume no backup battery + + # CPython compatibility. Import utime or time for fix time handling. + try: + import utime + + self._get_time = utime.ticks_ms + self._time_diff = utime.ticks_diff + self._localtime = utime.localtime + self._mktime = utime.mktime + except ImportError: + # Otherwise default to time module for non-embedded implementations + # Should still support millisecond resolution. + import time + + self._get_time = time.time + self._time_diff = lambda start, end: 1000 * (start - end) + self._localtime = time.localtime + self._mktime = time.mktime + + # Key: currently supported NMEA sentences. Value: parse method. + self.supported_sentences = { + "RMC": self._gprmc, + "GGA": self._gpgga, + "VTG": self._gpvtg, + "GSA": self._gpgsa, + "GSV": self._gpgsv, + "GLL": self._gpgll, + } + + ##################### + # Object Status Flags + self._fix_time = None + + ##################### + # Sentence Statistics + self.crc_fails = 0 + self.clean_sentences = 0 + self.parsed_sentences = 0 + self.unsupported_sentences = 0 + + ##################### + # Data From Sentences + # Time. http://www.gpsinformation.org/dale/nmea.htm indicates seconds + # is an integer. However hardware returns a float, but the fractional + # part is always zero. So treat seconds value as an integer. For + # precise timing use PPS signal and as_tGPS library. + self.local_offset = local_offset # hrs + self.epoch_time = 0 # Integer secs since epoch (Y2K under MicroPython) + # Add ms if supplied by device. Only used by timing drivers. + self.msecs = 0 + + # Position/Motion + self._latitude = [0, 0.0, "N"] # (°, mins, N/S) + self._longitude = [0, 0.0, "W"] # (°, mins, E/W) + self._speed = 0.0 # Knot + self.course = 0.0 # ° clockwise from N + self.altitude = 0.0 # Metres + self.geoid_height = 0.0 # Metres + self.magvar = 0.0 # Magnetic variation (°, -ve == west) + + # State variables + self._last_sv_sentence = 0 # for GSV parsing + self._total_sv_sentences = 0 + self._satellite_data = dict() # for get_satellite_data() + self._update_ms = 1000 # Update rate for timing drivers. Default 1 sec. + + # GPS Info + self.satellites_in_view = 0 + self.satellites_in_use = 0 + self.satellites_used = [] + self.hdop = 0.0 + self.pdop = 0.0 + self.vdop = 0.0 + + # Received status + self._valid = 0 # Bitfield of received sentences + if sreader is not None: # Running with UART data + asyncio.create_task(self._run()) + + ########################################## + # Data Stream Handler Functions + ########################################## + + async def _run(self): + while True: + res = await self._sreader.readline() + try: + res = res.decode("utf8") + except UnicodeError: # Garbage: can happen e.g. on baudrate change + continue + asyncio.create_task(self._update(res)) + await asyncio.sleep(0) # Ensure task runs and res is copied + + # Update takes a line of text + async def _update(self, line): + line = line.rstrip() # Copy line + # Basic integrity check: may have received partial line e.g on power up + if not line.startswith("$") or not "*" in line or len(line) > self._SENTENCE_LIMIT: + return + # 2.4ms on Pyboard: + if self.FULL_CHECK and not all(10 <= ord(c) <= 126 for c in line): + return # Bad character received + + a = line.split(",") + segs = a[:-1] + a[-1].split("*") + await asyncio.sleep(0) + + if self.FULL_CHECK: # 6ms on Pyboard + if not self._crc_check(line, segs[-1]): + self.crc_fails += 1 # Update statistics + return + await asyncio.sleep(0) + + self.clean_sentences += 1 # Sentence is good but unparsed. + segs[0] = segs[0][1:] # discard $ + segs = segs[:-1] # and checksum + seg0 = segs[0] # e.g. GPGLL + segx = seg0[2:] # e.g. GLL + if seg0.startswith("G") and segx in self.supported_sentences: + try: + s_type = self.supported_sentences[segx](segs) # Parse + except ValueError: + s_type = False + await asyncio.sleep(0) + if isinstance(s_type, int) and (s_type & self.cb_mask): + # Successfully parsed, data was valid and mask matches sentence type + self._fix_cb(self, s_type, *self._fix_cb_args) # Run the callback + if s_type: # Successfully parsed + if self.reparse(segs): # Subclass hook + self.parsed_sentences += 1 + return seg0 # For test programs + else: + if self.parse(segs): # Subclass hook + self.parsed_sentences += 1 + self.unsupported_sentences += 1 + return seg0 # For test programs + + # Optional hooks for subclass + def parse(self, segs): # Parse unsupported sentences + return True + + def reparse(self, segs): # Re-parse supported sentences + return True + + ######################################## + # Fix and Time Functions + ######################################## + + # Caller traps ValueError + def _fix(self, gps_segments, idx_lat, idx_long): + # Latitude + l_string = gps_segments[idx_lat] + lat_degs = int(l_string[0:2]) + lat_mins = float(l_string[2:]) + lat_hemi = gps_segments[idx_lat + 1] + # Longitude + l_string = gps_segments[idx_long] + lon_degs = int(l_string[0:3]) + lon_mins = float(l_string[3:]) + lon_hemi = gps_segments[idx_long + 1] + + if lat_hemi not in "NS" or lon_hemi not in "EW": + raise ValueError + self._latitude[0] = lat_degs # In-place to avoid allocation + self._latitude[1] = lat_mins + self._latitude[2] = lat_hemi + self._longitude[0] = lon_degs + self._longitude[1] = lon_mins + self._longitude[2] = lon_hemi + self._fix_time = self._get_time() + + def _dtset(self, _): # For subclass + pass + + # A local offset may exist so check for date rollover. Local offsets can + # include fractions of an hour but not seconds (AFAIK). + # Caller traps ValueError + def _set_date_time(self, utc_string, date_string): + if not date_string or not utc_string: + raise ValueError + hrs = int(utc_string[0:2]) # h + mins = int(utc_string[2:4]) # mins + # Secs from MTK3339 chip is a float but others may return only 2 chars + # for integer secs. If a float keep epoch as integer seconds and store + # the fractional part as integer ms (ms since midnight fits 32 bits). + fss, fsecs = modf(float(utc_string[4:])) + secs = int(fsecs) + self.msecs = int(fss * 1000) + d = int(date_string[0:2]) # day + m = int(date_string[2:4]) # month + y = int(date_string[4:6]) + 2000 # year + wday = self._week_day(y, m, d) + t = int(self._mktime((y, m, d, hrs, mins, int(secs), wday - 1, 0, 0))) + self.epoch_time = t # This is the fundamental datetime reference. + self._dtset(wday) # Subclass may override + + ######################################## + # Sentence Parsers + ######################################## + + # For all parsers: + # Initially the ._valid bit for the sentence type is cleared. + # On error a ValueError is raised: trapped by the caller. + # On successful parsing the ._valid bit is set. + # The ._valid mechanism enables the data_received coro to determine what + # sentence types have been received. + + # Chip sends rubbish RMC messages before first PPS pulse, but these have + # data valid set to 'V' (void) + def _gprmc(self, gps_segments): # Parse RMC sentence + self._valid &= ~RMC + # Check Receiver Data Valid Flag ('A' active) + if not self.battery: + if gps_segments[2] != "A": + raise ValueError + + # UTC Timestamp and date. Can raise ValueError. + self._set_date_time(gps_segments[1], gps_segments[9]) + # Check Receiver Data Valid Flag ('A' active) + if gps_segments[2] != "A": + raise ValueError + + # Data from Receiver is Valid/Has Fix. Longitude / Latitude + # Can raise ValueError. + self._fix(gps_segments, 3, 5) + # Speed + spd_knt = float(gps_segments[7]) + # Course: adapt for Ublox ZED-F9P + course = float(gps_segments[8]) if gps_segments[8] else 0.0 + # Add Magnetic Variation if firmware supplies it + if gps_segments[10]: + mv = float( + gps_segments[10] + ) # Float conversions can throw ValueError, caught by caller. + if gps_segments[11] not in ("EW"): + raise ValueError + self.magvar = mv if gps_segments[11] == "E" else -mv + # Update Object Data + self._speed = spd_knt + self.course = course + self._valid |= RMC + return RMC + + def _gpgll(self, gps_segments): # Parse GLL sentence + self._valid &= ~GLL + # Check Receiver Data Valid Flag + if gps_segments[6] != "A": # Invalid. Don't update data + raise ValueError + + # Data from Receiver is Valid/Has Fix. Longitude / Latitude + self._fix(gps_segments, 1, 3) + # Update Last Fix Time + self._valid |= GLL + return GLL + + # Chip sends VTG messages with meaningless data before getting a fix. + def _gpvtg(self, gps_segments): # Parse VTG sentence + self._valid &= ~VTG + course = float(gps_segments[1]) + spd_knt = float(gps_segments[5]) + self._speed = spd_knt + self.course = course + self._valid |= VTG + return VTG + + def _gpgga(self, gps_segments): # Parse GGA sentence + self._valid &= ~GGA + # Number of Satellites in Use + satellites_in_use = int(gps_segments[7]) + # Horizontal Dilution of Precision + hdop = float(gps_segments[8]) + # Get Fix Status + fix_stat = int(gps_segments[6]) + + # Process Location and Altitude if Fix is GOOD + if fix_stat: + # Longitude / Latitude + self._fix(gps_segments, 2, 4) + # Altitude / Height Above Geoid + altitude = float(gps_segments[9]) + geoid_height = float(gps_segments[11]) + # Update Object Data + self.altitude = altitude + self.geoid_height = geoid_height + self._valid |= GGA + + # Update Object Data + self.satellites_in_use = satellites_in_use + self.hdop = hdop + return GGA + + def _gpgsa(self, gps_segments): # Parse GSA sentence + self._valid &= ~GSA + # Fix Type (None,2D or 3D) + fix_type = int(gps_segments[2]) + # Read All (up to 12) Available PRN Satellite Numbers + sats_used = [] + for sats in range(12): + sat_number_str = gps_segments[3 + sats] + if sat_number_str: + sat_number = int(sat_number_str) + sats_used.append(sat_number) + else: + break + # PDOP,HDOP,VDOP + pdop = float(gps_segments[15]) + hdop = float(gps_segments[16]) + vdop = float(gps_segments[17]) + + # If Fix is GOOD, update fix timestamp + if fix_type <= self._NO_FIX: # Deviation from Michael McCoy's logic. Is this right? + raise ValueError + self.satellites_used = sats_used + self.hdop = hdop + self.vdop = vdop + self.pdop = pdop + self._valid |= GSA + return GSA + + def _gpgsv(self, gps_segments): + # Parse Satellites in View (GSV) sentence. Updates no. of SV sentences, + # the no. of the last SV sentence parsed, and data on each satellite + # present in the sentence. + self._valid &= ~GSV + num_sv_sentences = int(gps_segments[1]) + current_sv_sentence = int(gps_segments[2]) + sats_in_view = int(gps_segments[3]) + + # Create a blank dict to store all the satellite data from this sentence in: + # satellite PRN is key, tuple containing telemetry is value + satellite_dict = dict() + + # Calculate Number of Satelites to pull data for and thus how many segment positions to read + if num_sv_sentences == current_sv_sentence: + sat_segment_limit = ( + (sats_in_view % 4) * 4 + ) + 4 # Last sentence may have 1-4 satellites + else: + sat_segment_limit = ( + 20 # Non-last sentences have 4 satellites and thus read up to position 20 + ) + + # Try to recover data for up to 4 satellites in sentence + for sats in range(4, sat_segment_limit, 4): + + # If a PRN is present, grab satellite data + if gps_segments[sats]: + try: + sat_id = int(gps_segments[sats]) + except IndexError: + raise ValueError # Abandon + + try: # elevation can be null (no value) when not tracking + elevation = int(gps_segments[sats + 1]) + except (ValueError, IndexError): + elevation = None + + try: # azimuth can be null (no value) when not tracking + azimuth = int(gps_segments[sats + 2]) + except (ValueError, IndexError): + azimuth = None + + try: # SNR can be null (no value) when not tracking + snr = int(gps_segments[sats + 3]) + except (ValueError, IndexError): + snr = None + # If no PRN is found, then the sentence has no more satellites to read + else: + break + + # Add Satellite Data to Sentence Dict + satellite_dict[sat_id] = (elevation, azimuth, snr) + + # Update Object Data + self._total_sv_sentences = num_sv_sentences + self._last_sv_sentence = current_sv_sentence + self.satellites_in_view = sats_in_view + + # For a new set of sentences, we either clear out the existing sat data or + # update it as additional SV sentences are parsed + if current_sv_sentence == 1: + self._satellite_data = satellite_dict + else: + self._satellite_data.update(satellite_dict) + # Flag that a msg has been received. Does not mean a full set of data is ready. + self._valid |= GSV + return GSV + + ######################################### + # User Interface Methods + ######################################### + + # Data Validity. On startup data may be invalid. During an outage it will be absent. + async def data_received(self, position=False, course=False, date=False, altitude=False): + self._valid = 0 # Assume no messages at start + result = False + while not result: + result = True + await asyncio.sleep(1) # Successfully parsed messages set ._valid bits + if position and not self._valid & POSITION: + result = False + if date and not self._valid & DATE: + result = False + # After a hard reset the chip sends course messages even though no fix + # was received. Ignore this garbage until a fix is received. + if course: + if self._valid & COURSE: + if not self._valid & POSITION: + result = False + else: + result = False + if altitude and not self._valid & ALTITUDE: + result = False + + def latitude(self, coord_format=DD): + # Format Latitude Data Correctly + if coord_format == DD: + decimal_degrees = self._latitude[0] + (self._latitude[1] / 60) + return [decimal_degrees, self._latitude[2]] + elif coord_format == DMS: + mins = int(self._latitude[1]) + seconds = round((self._latitude[1] - mins) * 60) + return [self._latitude[0], mins, seconds, self._latitude[2]] + elif coord_format == DM: + return self._latitude + raise ValueError("Unknown latitude format.") + + def longitude(self, coord_format=DD): + # Format Longitude Data Correctly + if coord_format == DD: + decimal_degrees = self._longitude[0] + (self._longitude[1] / 60) + return [decimal_degrees, self._longitude[2]] + elif coord_format == DMS: + mins = int(self._longitude[1]) + seconds = round((self._longitude[1] - mins) * 60) + return [self._longitude[0], mins, seconds, self._longitude[2]] + elif coord_format == DM: + return self._longitude + raise ValueError("Unknown longitude format.") + + def speed(self, units=KNOT): + if units == KNOT: + return self._speed + if units == KPH: + return self._speed * 1.852 + if units == MPH: + return self._speed * 1.151 + raise ValueError("Unknown speed units.") + + async def get_satellite_data(self): + self._total_sv_sentences = 0 + while self._total_sv_sentences == 0: + await asyncio.sleep(0) + while self._total_sv_sentences > self._last_sv_sentence: + await asyncio.sleep(0) + return self._satellite_data + + def time_since_fix(self): # ms since last valid fix + if self._fix_time is None: + return -1 # No fix yet found + return self._time_diff(self._get_time(), self._fix_time) + + def compass_direction(self): # Return cardinal point as string. + from .as_GPS_utils import compass_direction + + return compass_direction(self) + + def latitude_string(self, coord_format=DM): + if coord_format == DD: + return "{:3.6f}° {:s}".format(*self.latitude(DD)) + if coord_format == DMS: + return """{:3d}° {:2d}' {:2d}" {:s}""".format(*self.latitude(DMS)) + if coord_format == KML: + form_lat = self.latitude(DD) + return "{:4.6f}".format(form_lat[0] if form_lat[1] == "N" else -form_lat[0]) + return "{:3d}° {:3.4f}' {:s}".format(*self.latitude(coord_format)) + + def longitude_string(self, coord_format=DM): + if coord_format == DD: + return "{:3.6f}° {:s}".format(*self.longitude(DD)) + if coord_format == DMS: + return """{:3d}° {:2d}' {:2d}" {:s}""".format(*self.longitude(DMS)) + if coord_format == KML: + form_long = self.longitude(DD) + return "{:4.6f}".format(form_long[0] if form_long[1] == "E" else -form_long[0]) + return "{:3d}° {:3.4f}' {:s}".format(*self.longitude(coord_format)) + + def speed_string(self, unit=KPH): + sform = "{:3.2f} {:s}" + speed = self.speed(unit) + if unit == MPH: + return sform.format(speed, "mph") + elif unit == KNOT: + return sform.format(speed, "knots") + return sform.format(speed, "km/h") + + # Return local time (hrs: int, mins: int, secs:float) + @property + def local_time(self): + t = self.epoch_time + int(3600 * self.local_offset) + _, _, _, hrs, mins, secs, *_ = self._localtime(t) + return hrs, mins, secs + + @property + def date(self): + t = self.epoch_time + int(3600 * self.local_offset) + y, m, d, *_ = self._localtime(t) + return d, m, y - 2000 + + @property + def utc(self): + t = self.epoch_time + _, _, _, hrs, mins, secs, *_ = self._localtime(t) + return hrs, mins, secs + + def time_string(self, local=True): + hrs, mins, secs = self.local_time if local else self.utc + return "{:02d}:{:02d}:{:02d}".format(hrs, mins, secs) + + def date_string(self, formatting=MDY): + from .as_GPS_utils import date_string + + return date_string(self, formatting) diff --git a/v3/as_drivers/as_GPS/as_GPS_time.py b/v3/as_drivers/as_GPS/as_GPS_time.py new file mode 100644 index 0000000..5742c33 --- /dev/null +++ b/v3/as_drivers/as_GPS/as_GPS_time.py @@ -0,0 +1,192 @@ +# as_GPS_time.py Test scripts for as_tGPS.py read-only driver. +# Using GPS for precision timing and for calibrating Pyboard RTC + +# This is STM-specific: requires pyb module. + +# Copyright (c) 2018-2020 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +import asyncio +import pyb +import utime +import math +from .as_tGPS import GPS_Timer +from threadsafe.message import Message + +# Hardware assumptions. Change as required. +PPS_PIN = pyb.Pin.board.X3 +UART_ID = 4 + +print("Available tests:") +print("calibrate(minutes=5) Set and calibrate the RTC.") +print("drift(minutes=5) Repeatedly print the difference between RTC and GPS time.") +print("time(minutes=1) Print get_ms() and get_t_split values.") +print("usec(minutes=1) Measure accuracy of usec timer.") +print("Press ctrl-d to reboot after each test.") + +# Setup for tests. Red LED toggles on fix, green on PPS interrupt. +async def setup(): + red = pyb.LED(1) + green = pyb.LED(2) + uart = pyb.UART(UART_ID, 9600, read_buf_len=200) + sreader = asyncio.StreamReader(uart) + pps_pin = pyb.Pin(PPS_PIN, pyb.Pin.IN) + return GPS_Timer( + sreader, + pps_pin, + local_offset=1, + fix_cb=lambda *_: red.toggle(), + pps_cb=lambda *_: green.toggle(), + ) + + +# Test terminator: task sets the passed event after the passed time. +async def killer(end_event, minutes): + print("Will run for {} minutes.".format(minutes)) + await asyncio.sleep(minutes * 60) + end_event.set() + + +# ******** Calibrate and set the Pyboard RTC ******** +async def do_cal(minutes): + gps = await setup() + await gps.calibrate(minutes) + gps.close() + + +def calibrate(minutes=5): + asyncio.run(do_cal(minutes)) + + +# ******** Drift test ******** +# Every 10s print the difference between GPS time and RTC time +async def drift_test(terminate, gps): + dstart = await gps.delta() + while not terminate.is_set(): + dt = await gps.delta() + print("{} Delta {}μs".format(gps.time_string(), dt)) + await asyncio.sleep(10) + return dt - dstart + + +async def do_drift(minutes): + print("Setting up GPS.") + gps = await setup() + print("Waiting for time data.") + await gps.ready() + terminate = asyncio.Event() + asyncio.create_task(killer(terminate, minutes)) + print("Setting RTC.") + await gps.set_rtc() + print("Measuring drift.") + change = await drift_test(terminate, gps) + ush = int(60 * change / minutes) + spa = int(ush * 365 * 24 / 1000000) + print("Rate of change {}μs/hr {}secs/year".format(ush, spa)) + gps.close() + + +def drift(minutes=5): + asyncio.run(do_drift(minutes)) + + +# ******** Time printing demo ******** +# Every 10s print the difference between GPS time and RTC time +async def do_time(minutes): + fstr = "{}ms Time: {:02d}:{:02d}:{:02d}:{:06d}" + print("Setting up GPS.") + gps = await setup() + print("Waiting for time data.") + await gps.ready() + print("Setting RTC.") + await gps.set_rtc() + terminate = asyncio.Event() + asyncio.create_task(killer(terminate, minutes)) + while not terminate.is_set(): + await asyncio.sleep(1) + # In a precision app, get the time list without allocation: + t = gps.get_t_split() + print(fstr.format(gps.get_ms(), t[0], t[1], t[2], t[3])) + gps.close() + + +def time(minutes=1): + asyncio.run(do_time(minutes)) + + +# ******** Measure accracy of μs clock ******** +# At 9600 baud see occasional lag of up to 3ms followed by similar lead. +# This implies that the ISR is being disabled for that period (~3 chars). +# SD 584μs typical. +# Test produces better numbers at 57600 baud (SD 112μs) +# and better still at 10Hz update rate (SD 34μs). Why?? +# Unsure why. Setting of .FULL_CHECK has no effect (as expected). + +# Callback occurs in interrupt context +us_acquired = None + + +def us_cb(my_gps, tick, led): + global us_acquired # Time of previous PPS edge in ticks_us() + if us_acquired is not None: + # Trigger Message. Pass time between PPS measured by utime.ticks_us() + tick.set(utime.ticks_diff(my_gps.acquired, us_acquired)) + us_acquired = my_gps.acquired + led.toggle() + + +# Setup initialises with above callback +async def us_setup(tick): + red = pyb.LED(1) + yellow = pyb.LED(3) + uart = pyb.UART(UART_ID, 9600, read_buf_len=200) + sreader = asyncio.StreamReader(uart) + pps_pin = pyb.Pin(PPS_PIN, pyb.Pin.IN) + return GPS_Timer( + sreader, + pps_pin, + local_offset=1, + fix_cb=lambda *_: red.toggle(), + pps_cb=us_cb, + pps_cb_args=(tick, yellow), + ) + + +async def do_usec(minutes): + tick = Message() + print("Setting up GPS.") + gps = await us_setup(tick) + print("Waiting for time data.") + await gps.ready() + max_us = 0 + min_us = 0 + sd = 0 + nsamples = 0 + count = 0 + terminate = asyncio.Event() + asyncio.create_task(killer(terminate, minutes)) + while not terminate.is_set(): + await tick.wait() + usecs = tick.value() + tick.clear() + err = 1000000 - usecs + count += 1 + print("Timing discrepancy is {:4d}μs {}".format(err, "(skipped)" if count < 3 else "")) + if count < 3: # Discard 1st two samples from statistics + continue # as these can be unrepresentative + max_us = max(max_us, err) + min_us = min(min_us, err) + sd += err * err + nsamples += 1 + # SD: apply Bessel's correction for infinite population + sd = int(math.sqrt(sd / (nsamples - 1))) + print( + "Timing discrepancy is: {:5d}μs max {:5d}μs min. Standard deviation {:4d}μs".format( + max_us, min_us, sd + ) + ) + gps.close() + + +def usec(minutes=1): + asyncio.run(do_usec(minutes)) diff --git a/v3/as_drivers/as_GPS/as_GPS_utils.py b/v3/as_drivers/as_GPS/as_GPS_utils.py new file mode 100644 index 0000000..6993baf --- /dev/null +++ b/v3/as_drivers/as_GPS/as_GPS_utils.py @@ -0,0 +1,48 @@ +# as_GPS_utils.py Extra functionality for as_GPS.py +# Put in separate file to minimise size of as_GPS.py for resource constrained +# systems. + +# Copyright (c) 2018 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file +from .as_GPS import MDY, DMY, LONG + +_DIRECTIONS = ('N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW', + 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW') + +def compass_direction(gps): # Return cardinal point as string. + # Calculate the offset for a rotated compass + if gps.course >= 348.75: + offset_course = 360 - gps.course + else: + offset_course = gps.course + 11.25 + # Each compass point is separated by 22.5°, divide to find lookup value + return _DIRECTIONS[int(offset_course // 22.5)] + +_MONTHS = ('January', 'February', 'March', 'April', 'May', + 'June', 'July', 'August', 'September', 'October', + 'November', 'December') + +def date_string(gps, formatting=MDY): + day, month, year = gps.date + # Long Format January 1st, 2014 + if formatting == LONG: + dform = '{:s} {:2d}{:s}, 20{:2d}' + # Retrieve Month string from private set + month = _MONTHS[month - 1] + # Determine Date Suffix + if day in (1, 21, 31): + suffix = 'st' + elif day in (2, 22): + suffix = 'nd' + elif day in (3, 23): + suffix = 'rd' + else: + suffix = 'th' + return dform.format(month, day, suffix, year) + + dform = '{:02d}/{:02d}/{:02d}' + if formatting == DMY: + return dform.format(day, month, year) + elif formatting == MDY: # Default date format + return dform.format(month, day, year) + raise ValueError('Unknown date format.') diff --git a/v3/as_drivers/as_GPS/as_rwGPS.py b/v3/as_drivers/as_GPS/as_rwGPS.py new file mode 100644 index 0000000..d93ec71 --- /dev/null +++ b/v3/as_drivers/as_GPS/as_rwGPS.py @@ -0,0 +1,118 @@ +# as_rwGPS.py Asynchronous device driver for GPS devices using a UART. +# Supports a limited subset of the PMTK command packets employed by the +# widely used MTK3329/MTK3339 chip. +# Sentence parsing based on MicropyGPS by Michael Calvin McCoy +# https://github.com/inmcm/micropyGPS + +# Copyright (c) 2018 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +import as_drivers.as_GPS as as_GPS +try: + from micropython import const +except ImportError: + const = lambda x : x + +HOT_START = const(1) +WARM_START = const(2) +COLD_START = const(3) +FULL_COLD_START = const(4) +STANDBY = const(5) +DEFAULT_SENTENCES = const(6) +VERSION = const(7) +ENABLE = const(8) +ANTENNA = const(9) +NO_ANTENNA = const(10) + +# Return CRC of a bytearray. +def _crc(sentence): + x = 1 + crc = 0 + while sentence[x] != ord('*'): + crc ^= sentence[x] + x += 1 + return crc # integer + + +class GPS(as_GPS.AS_GPS): + fixed_commands = {HOT_START: b'$PMTK101*32\r\n', + WARM_START: b'$PMTK102*31\r\n', + COLD_START: b'$PMTK103*30\r\n', + FULL_COLD_START: b'$PMTK104*37\r\n', + STANDBY: b'$PMTK161,0*28\r\n', + DEFAULT_SENTENCES: b'$PMTK314,-1*04\r\n', + VERSION: b'$PMTK605*31\r\n', + ENABLE: b'$PMTK414*33\r\n', + ANTENNA: b'$PGCMD,33,1*6C', + NO_ANTENNA: b'$PGCMD,33,0*6D', + } + + def __init__(self, sreader, swriter, local_offset=0, + fix_cb=lambda *_ : None, cb_mask=as_GPS.RMC, fix_cb_args=(), + msg_cb=lambda *_ : None, msg_cb_args=()): + super().__init__(sreader, local_offset, fix_cb, cb_mask, fix_cb_args) + self._swriter = swriter + self.version = None # Response to VERSION query + self.enabled = None # Response to ENABLE query + self.antenna = 0 # Response to ANTENNA. + self._msg_cb = msg_cb + self._msg_cb_args = msg_cb_args + + async def _send(self, sentence): + # Create a bytes object containing hex CRC + bcrc = '{:2x}'.format(_crc(sentence)).encode() + sentence[-4] = bcrc[0] # Fix up CRC bytes + sentence[-3] = bcrc[1] + await self._swriter.awrite(sentence) + + async def baudrate(self, value=9600): + if value not in (4800,9600,14400,19200,38400,57600,115200): + raise ValueError('Invalid baudrate {:d}.'.format(value)) + + sentence = bytearray('$PMTK251,{:d}*00\r\n'.format(value).encode()) + await self._send(sentence) + + async def update_interval(self, ms=1000): + if ms < 100 or ms > 10000: + raise ValueError('Invalid update interval {:d}ms.'.format(ms)) + sentence = bytearray('$PMTK220,{:d}*00\r\n'.format(ms).encode()) + await self._send(sentence) + self._update_ms = ms # Save for timing driver + + async def enable(self, *, gll=0, rmc=1, vtg=1, gga=1, gsa=1, gsv=5, chan=0): + fstr = '$PMTK314,{:d},{:d},{:d},{:d},{:d},{:d},0,0,0,0,0,0,0,0,0,0,0,0,{:d}*00\r\n' + sentence = bytearray(fstr.format(gll, rmc, vtg, gga, gsa, gsv, chan).encode()) + await self._send(sentence) + + async def command(self, cmd): + if cmd not in self.fixed_commands: + raise ValueError('Invalid command {:s}.'.format(cmd)) + await self._swriter.awrite(self.fixed_commands[cmd]) + + # Should get 705 from VERSION 514 from ENABLE + def parse(self, segs): + if segs[0] == 'PMTK705': # Version response + self.version = segs[1:] + segs[0] = 'version' + self._msg_cb(self, segs, *self._msg_cb_args) + return True + + if segs[0] == 'PMTK514': + print('enabled segs', segs) + self.enabled = {'gll': segs[1], 'rmc': segs[2], 'vtg': segs[3], + 'gga': segs[4], 'gsa': segs[5], 'gsv': segs[6], + 'chan': segs[19]} + segs = ['enabled', self.enabled] + self._msg_cb(self, segs, *self._msg_cb_args) + return True + + if segs[0] == 'PGTOP': + self.antenna = segs[2] + segs = ['antenna', self.antenna] + self._msg_cb(self, segs, *self._msg_cb_args) + return True + + if segs[0][:4] == 'PMTK': + self._msg_cb(self, segs, *self._msg_cb_args) + return True + return False diff --git a/v3/as_drivers/as_GPS/as_rwGPS_time.py b/v3/as_drivers/as_GPS/as_rwGPS_time.py new file mode 100644 index 0000000..5cf94cd --- /dev/null +++ b/v3/as_drivers/as_GPS/as_rwGPS_time.py @@ -0,0 +1,261 @@ +# as_rwGPS_time.py Test scripts for as_tGPS read-write driver. +# Using GPS for precision timing and for calibrating Pyboard RTC +# This is STM-specific: requires pyb module. + +# Copyright (c) 2018-2020 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +# See README.md notes re setting baudrates. In particular 9600 does not work. +# So these tests issue a factory reset on completion to restore the baudrate. + +# String sent for 9600: $PMTK251,9600*17\r\n +# Data has (for 38400): $PMTK251,38400*27 +# Sending: $PMTK251,38400*27\r\n' + +import asyncio +from uasyncio import Event +from threadsafe.message import Message +import pyb +import utime +import math +from .as_tGPS import GPS_RWTimer +from .as_rwGPS import FULL_COLD_START + +# Hardware assumptions. Change as required. +PPS_PIN = pyb.Pin.board.X3 +UART_ID = 4 + +BAUDRATE = 57600 +UPDATE_INTERVAL = 100 +READ_BUF_LEN = 200 + +print("Available tests:") +print("calibrate(minutes=5) Set and calibrate the RTC.") +print("drift(minutes=5) Repeatedly print the difference between RTC and GPS time.") +print("time(minutes=1) Print get_ms() and get_t_split values.") +print("usec(minutes=1) Measure accuracy of usec timer.") +print("Press ctrl-d to reboot after each test.") + +# Initially use factory baudrate +uart = pyb.UART(UART_ID, 9600, read_buf_len=READ_BUF_LEN) + + +async def shutdown(): + global gps + # Normally UART is already at BAUDRATE. But if last session didn't restore + # factory baudrate we can restore connectivity in the subsequent stuck + # session with ctrl-c. + uart.init(BAUDRATE) + await asyncio.sleep(0.5) + await gps.command(FULL_COLD_START) + print("Factory reset") + gps.close() # Stop ISR + # print('Restoring default baudrate (9600).') + # await gps.baudrate(9600) + # uart.init(9600) + # gps.close() # Stop ISR + # print('Restoring default 1s update rate.') + # await asyncio.sleep(0.5) + # await gps.update_interval(1000) # 1s update rate + # print('Restoring satellite data.') + # await gps.command(as_rwGPS.DEFAULT_SENTENCES) # Restore satellite data + + +# Setup for tests. Red LED toggles on fix, blue on PPS interrupt. +async def setup(): + global uart, gps # For shutdown + red = pyb.LED(1) + blue = pyb.LED(4) + sreader = asyncio.StreamReader(uart) + swriter = asyncio.StreamWriter(uart, {}) + pps_pin = pyb.Pin(PPS_PIN, pyb.Pin.IN) + gps = GPS_RWTimer( + sreader, + swriter, + pps_pin, + local_offset=1, + fix_cb=lambda *_: red.toggle(), + pps_cb=lambda *_: blue.toggle(), + ) + gps.FULL_CHECK = False + await asyncio.sleep(2) + await gps.baudrate(BAUDRATE) + uart.init(BAUDRATE) + await asyncio.sleep(1) + await gps.enable(gsa=0, gsv=0) # Disable satellite data + await gps.update_interval(UPDATE_INTERVAL) + pstr = "Baudrate {} update interval {}ms satellite messages disabled." + print(pstr.format(BAUDRATE, UPDATE_INTERVAL)) + return gps + + +# Test terminator: task sets the passed event after the passed time. +async def killer(end_event, minutes): + print("Will run for {} minutes.".format(minutes)) + await asyncio.sleep(minutes * 60) + end_event.set() + + +# ******** Calibrate and set the Pyboard RTC ******** +async def do_cal(minutes): + gps = await setup() + await gps.calibrate(minutes) + + +def calibrate(minutes=5): + try: + asyncio.run(do_cal(minutes)) + finally: + asyncio.run(shutdown()) + + +# ******** Drift test ******** +# Every 10s print the difference between GPS time and RTC time +async def drift_test(terminate, gps): + dstart = await gps.delta() + while not terminate.is_set(): + dt = await gps.delta() + print("{} Delta {}μs".format(gps.time_string(), dt)) + await asyncio.sleep(10) + return dt - dstart + + +async def do_drift(minutes): + global gps + print("Setting up GPS.") + gps = await setup() + print("Waiting for time data.") + await gps.ready() + print("Setting RTC.") + await gps.set_rtc() + print("Measuring drift.") + terminate = Event() + asyncio.create_task(killer(terminate, minutes)) + change = await drift_test(terminate, gps) + ush = int(60 * change / minutes) + spa = int(ush * 365 * 24 / 1000000) + print("Rate of change {}μs/hr {}secs/year".format(ush, spa)) + + +def drift(minutes=5): + try: + asyncio.run(do_drift(minutes)) + finally: + asyncio.run(shutdown()) + + +# ******** Time printing demo ******** +# Every 10s print the difference between GPS time and RTC time +async def do_time(minutes): + global gps + fstr = "{}ms Time: {:02d}:{:02d}:{:02d}:{:06d}" + print("Setting up GPS.") + gps = await setup() + print("Waiting for time data.") + await gps.ready() + print("Setting RTC.") + await gps.set_rtc() + print("RTC is set.") + terminate = Event() + asyncio.create_task(killer(terminate, minutes)) + while not terminate.is_set(): + await asyncio.sleep(1) + # In a precision app, get the time list without allocation: + t = gps.get_t_split() + print(fstr.format(gps.get_ms(), t[0], t[1], t[2], t[3])) + + +def time(minutes=1): + try: + asyncio.run(do_time(minutes)) + finally: + asyncio.run(shutdown()) + + +# ******** Measure accracy of μs clock ******** +# Test produces better numbers at 57600 baud (SD 112μs) +# and better still at 10Hz update rate (SD 34μs). +# Unsure why. + +# Callback occurs in interrupt context +us_acquired = None # Time of previous PPS edge in ticks_us() + + +def us_cb(my_gps, tick, led): + global us_acquired + if us_acquired is not None: + # Trigger event. Pass time between PPS measured by utime.ticks_us() + tick.set(utime.ticks_diff(my_gps.acquired, us_acquired)) + us_acquired = my_gps.acquired + led.toggle() + + +# Setup initialises with above callback +async def us_setup(tick): + global uart, gps # For shutdown + red = pyb.LED(1) + blue = pyb.LED(4) + sreader = asyncio.StreamReader(uart) + swriter = asyncio.StreamWriter(uart, {}) + pps_pin = pyb.Pin(PPS_PIN, pyb.Pin.IN) + gps = GPS_RWTimer( + sreader, + swriter, + pps_pin, + local_offset=1, + fix_cb=lambda *_: red.toggle(), + pps_cb=us_cb, + pps_cb_args=(tick, blue), + ) + gps.FULL_CHECK = False + await asyncio.sleep(2) + await gps.baudrate(BAUDRATE) + uart.init(BAUDRATE) + await asyncio.sleep(1) + await gps.enable(gsa=0, gsv=0) # Disable satellite data + await gps.update_interval(UPDATE_INTERVAL) + pstr = "Baudrate {} update interval {}ms satellite messages disabled." + print(pstr.format(BAUDRATE, UPDATE_INTERVAL)) + + +async def do_usec(minutes): + global gps + tick = Message() + print("Setting up GPS.") + await us_setup(tick) + print("Waiting for time data.") + await gps.ready() + max_us = 0 + min_us = 0 + sd = 0 + nsamples = 0 + count = 0 + terminate = Event() + asyncio.create_task(killer(terminate, minutes)) + while not terminate.is_set(): + await tick + usecs = tick.value() + tick.clear() + err = 1000000 - usecs + count += 1 + print("Timing discrepancy is {:4d}μs {}".format(err, "(skipped)" if count < 3 else "")) + if count < 3: # Discard 1st two samples from statistics + continue # as these can be unrepresentative + max_us = max(max_us, err) + min_us = min(min_us, err) + sd += err * err + nsamples += 1 + # SD: apply Bessel's correction for infinite population + sd = int(math.sqrt(sd / (nsamples - 1))) + print( + "Timing discrepancy is: {:5d}μs max {:5d}μs min. Standard deviation {:4d}μs".format( + max_us, min_us, sd + ) + ) + + +def usec(minutes=1): + try: + asyncio.run(do_usec(minutes)) + finally: + asyncio.run(shutdown()) diff --git a/v3/as_drivers/as_GPS/as_tGPS.py b/v3/as_drivers/as_GPS/as_tGPS.py new file mode 100644 index 0000000..78bebf3 --- /dev/null +++ b/v3/as_drivers/as_GPS/as_tGPS.py @@ -0,0 +1,263 @@ +# as_tGPS.py Using GPS for precision timing and for calibrating Pyboard RTC + +# Copyright (c) 2018-2020 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file +# TODO Test machine version. Replace LED with callback. Update tests and doc. + +import asyncio +import machine + +try: + import pyb + + on_pyboard = True + rtc = pyb.RTC() +except ImportError: + on_pyboard = False +import utime +import micropython +import gc +from .as_GPS import RMC, AS_GPS +from .as_rwGPS import GPS + +micropython.alloc_emergency_exception_buf(100) + +# Convenience function. Return RTC seconds since midnight as float +def rtc_secs(): + if not on_pyboard: + raise OSError("Only available on STM targets.") + dt = rtc.datetime() + return 3600 * dt[4] + 60 * dt[5] + dt[6] + (255 - dt[7]) / 256 + + +# Constructor for GPS_Timer class +def gps_ro_t_init( + self, + sreader, + pps_pin, + local_offset=0, + fix_cb=lambda *_: None, + cb_mask=RMC, + fix_cb_args=(), + pps_cb=lambda *_: None, + pps_cb_args=(), +): + AS_GPS.__init__(self, sreader, local_offset, fix_cb, cb_mask, fix_cb_args) + self.setup(pps_pin, pps_cb, pps_cb_args) + + +# Constructor for GPS_RWTimer class +def gps_rw_t_init( + self, + sreader, + swriter, + pps_pin, + local_offset=0, + fix_cb=lambda *_: None, + cb_mask=RMC, + fix_cb_args=(), + msg_cb=lambda *_: None, + msg_cb_args=(), + pps_cb=lambda *_: None, + pps_cb_args=(), +): + GPS.__init__( + self, sreader, swriter, local_offset, fix_cb, cb_mask, fix_cb_args, msg_cb, msg_cb_args + ) + self.setup(pps_pin, pps_cb, pps_cb_args) + + +class GPS_Tbase: + def setup(self, pps_pin, pps_cb, pps_cb_args): + self._pps_pin = pps_pin + self._pps_cb = pps_cb + self._pps_cb_args = pps_cb_args + self.msecs = None # Integer time in ms since midnight at last PPS + self.t_ms = 0 # ms since midnight + self.acquired = None # Value of ticks_us at edge of PPS + self._rtc_set = False # Set RTC flag + self._rtcbuf = [0] * 8 # Buffer for RTC setting + self._time = [0] * 4 # get_t_split() time buffer. + asyncio.create_task(self._start()) + + async def _start(self): + await self.data_received(date=True) + self._pps_pin.irq(self._isr, trigger=machine.Pin.IRQ_RISING) + + def close(self): + self._pps_pin.irq(None) + + # If update rate > 1Hz, when PPS edge occurs the last RMC message will have + # a nonzero ms value. Need to set RTC to 1 sec after the last 1 second boundary + def _isr(self, _): + acquired = utime.ticks_us() # Save time of PPS + # Time in last NMEA sentence was time of last PPS. + # Reduce to integer secs since midnight local time. + isecs = (self.epoch_time + int(3600 * self.local_offset)) % 86400 + # ms since midnight (28 bits). Add in any ms in RMC data + msecs = isecs * 1000 + self.msecs + # This PPS is presumed to be one update later + msecs += self._update_ms + if msecs >= 86400000: # Next PPS will deal with rollover + return + if self.t_ms == msecs: # No RMC message has arrived: nothing to do + return + self.t_ms = msecs # Current time in ms past midnight + self.acquired = acquired + # Set RTC if required and if last RMC indicated a 1 second boundary + if self._rtc_set: + # Time as int(seconds) in last NMEA sentence. Earlier test ensures + # no rollover when we add 1. + self._rtcbuf[6] = (isecs + 1) % 60 + rtc.datetime(self._rtcbuf) + self._rtc_set = False + # Could be an outage here, so PPS arrives many secs after last sentence + # Is this right? Does PPS continue during outage? + self._pps_cb(self, *self._pps_cb_args) + + # Called when base class updates the epoch_time. + # Need local time for setting Pyboard RTC in interrupt context + def _dtset(self, wday): + t = self.epoch_time + int(3600 * self.local_offset) + y, m, d, hrs, mins, secs, *_ = self._localtime(t) + self._rtcbuf[0] = y + self._rtcbuf[1] = m + self._rtcbuf[2] = d + self._rtcbuf[3] = wday + self._rtcbuf[4] = hrs + self._rtcbuf[5] = mins + self._rtcbuf[6] = secs + + # Subsecs register is read-only. So need to set RTC on PPS leading edge. + # Set flag and let ISR set the RTC. Pause until done. + async def set_rtc(self): + if not on_pyboard: + raise OSError("Only available on STM targets.") + self._rtc_set = True + while self._rtc_set: + await asyncio.sleep_ms(250) + + # Value of RTC time at current instant. This is a notional arbitrary + # precision integer in μs since Y2K. Notional because RTC is set to + # local time. + def _get_rtc_usecs(self): + y, m, d, weekday, hrs, mins, secs, subsecs = rtc.datetime() + tim = 1000000 * utime.mktime((y, m, d, hrs, mins, secs, weekday - 1, 0)) + return tim + ((1000000 * (255 - subsecs)) >> 8) + + # Return no. of μs RTC leads GPS. Done by comparing times at the instant of + # PPS leading edge. + async def delta(self): + if not on_pyboard: + raise OSError("Only available on STM targets.") + rtc_time, gps_time = await self._await_pps() # μs since Y2K at time of latest PPS + return rtc_time - gps_time + + # Pause until PPS interrupt occurs. Then wait for an RTC subsecond change. + # Read the RTC time in μs since Y2K and adjust to give the time the RTC + # (notionally) would have read at the PPS leading edge. + async def _await_pps(self): + t0 = self.acquired + while self.acquired == t0: # Busy-wait on PPS interrupt: not time-critical + await asyncio.sleep_ms(0) # because acquisition time stored in ISR. + gc.collect() # Time-critical code follows + st = rtc.datetime()[7] + while rtc.datetime()[7] == st: # Wait for RTC to change (4ms max) + pass + dt = utime.ticks_diff(utime.ticks_us(), self.acquired) + trtc = self._get_rtc_usecs() - dt # Read RTC now and adjust for PPS edge + tgps = 1000000 * (self.epoch_time + 3600 * self.local_offset + 1) + return trtc, tgps + + # Non-realtime calculation of calibration factor. times are in μs + def _calculate(self, gps_start, gps_end, rtc_start, rtc_end): + # Duration (μs) between PPS edges + pps_delta = gps_end - gps_start + # Duration (μs) between PPS edges as measured by RTC and corrected + rtc_delta = rtc_end - rtc_start + ppm = (1000000 * (rtc_delta - pps_delta)) / pps_delta # parts per million + return int(-ppm / 0.954) + + # Measure difference between RTC and GPS rate and return calibration factor + # If 3 successive identical results are within 1 digit the outcome is considered + # valid and the coro quits. + async def _getcal(self, minutes=5): + if minutes < 1: + raise ValueError("minutes must be >= 1") + results = [0, 0, 0] # Last 3 cal results + idx = 0 # Index into above circular buffer + nresults = 0 # Count of results + rtc.calibration(0) # Clear existing RTC calibration + await self.set_rtc() + # Wait for PPS, then RTC 1/256 second change. Return the time the RTC + # would have measured at instant of PPS (notional μs since Y2K). Also + # GPS time at the same instant. + rtc_start, gps_start = await self._await_pps() + for n in range(minutes): + for _ in range(6): # Try every 10s + await asyncio.sleep(10) + # Get RTC time at instant of PPS + rtc_end, gps_end = await self._await_pps() + cal = self._calculate(gps_start, gps_end, rtc_start, rtc_end) + print("Mins {:d} cal factor {:d}".format(n + 1, cal)) + results[idx] = cal + idx += 1 + idx %= len(results) + nresults += 1 + if nresults >= 4 and (abs(max(results) - min(results)) <= 1): + return round(sum(results) / len(results)) + return cal + + # Pause until time/date message received and 1st PPS interrupt has occurred. + async def ready(self): + while self.acquired is None: + await asyncio.sleep(1) + + async def calibrate(self, minutes=5): + if not on_pyboard: + raise OSError("Only available on STM targets.") + print("Waiting for GPS startup.") + await self.ready() + print("Waiting up to {} minutes to acquire calibration factor...".format(minutes)) + cal = await self._getcal(minutes) + if cal <= 512 and cal >= -511: + rtc.calibration(cal) + print("Pyboard RTC is calibrated. Factor is {:d}.".format(cal)) + else: + print("Calibration factor {:d} is out of range.".format(cal)) + + # User interface functions: accurate GPS time. + # Return GPS time in ms since midnight (small int on 32 bit h/w). + # No allocation. + def get_ms(self): + state = machine.disable_irq() + t = self.t_ms + acquired = self.acquired + machine.enable_irq(state) + return t + utime.ticks_diff(utime.ticks_us(), acquired) // 1000 + + # Return accurate GPS time of day (hrs: int, mins: int, secs: int, μs: int) + # The ISR can skip an update of .secs if a day rollover would occur. Next + # RMC handles this, so if updates are at 1s intervals the subsequent ISR + # will see hms = 0, 0, 1 and a value of .acquired > 1000000. + # Even at the slowest update rate of 10s this can't overflow into minutes. + def get_t_split(self): + state = machine.disable_irq() + t = self.t_ms + acquired = self.acquired + machine.enable_irq(state) + isecs, ims = divmod(t, 1000) # Get integer secs and ms + x, secs = divmod(isecs, 60) + hrs, mins = divmod(x, 60) + dt = utime.ticks_diff(utime.ticks_us(), acquired) # μs to time now + ds, us = divmod(dt, 1000000) + # If dt > 1e6 can add to secs without risk of rollover: see above. + self._time[0] = hrs + self._time[1] = mins + self._time[2] = secs + ds + self._time[3] = us + ims * 1000 + return self._time + + +GPS_Timer = type("GPS_Timer", (GPS_Tbase, AS_GPS), {"__init__": gps_ro_t_init}) +GPS_RWTimer = type("GPS_RWTimer", (GPS_Tbase, GPS), {"__init__": gps_rw_t_init}) diff --git a/v3/as_drivers/as_GPS/ast_pb.py b/v3/as_drivers/as_GPS/ast_pb.py new file mode 100644 index 0000000..de93fed --- /dev/null +++ b/v3/as_drivers/as_GPS/ast_pb.py @@ -0,0 +1,106 @@ +# ast_pb.py +# Basic test/demo of AS_GPS class (asynchronous GPS device driver) +# Runs on a Pyboard with GPS data on pin X2. +# Copyright (c) Peter Hinch 2018-2020 +# Released under the MIT License (MIT) - see LICENSE file +# Test asynchronous GPS device driver as_pyGPS + +import pyb +import asyncio +from primitives.delay_ms import Delay_ms +from .as_GPS import DD, MPH, LONG, AS_GPS + +red = pyb.LED(1) +green = pyb.LED(2) +ntimeouts = 0 + + +def callback(gps, _, timer): + red.toggle() + green.on() + timer.trigger(10000) + + +def timeout(): + global ntimeouts + green.off() + ntimeouts += 1 + + +# Print satellite data every 10s +async def sat_test(gps): + while True: + d = await gps.get_satellite_data() + print("***** SATELLITE DATA *****") + for i in d: + print(i, d[i]) + print() + await asyncio.sleep(10) + + +# Print statistics every 30s +async def stats(gps): + while True: + await asyncio.sleep(30) + print("***** STATISTICS *****") + print("Outages:", ntimeouts) + print("Sentences Found:", gps.clean_sentences) + print("Sentences Parsed:", gps.parsed_sentences) + print("CRC_Fails:", gps.crc_fails) + print() + + +# Print navigation data every 4s +async def navigation(gps): + while True: + await asyncio.sleep(4) + await gps.data_received(position=True) + print("***** NAVIGATION DATA *****") + print("Data is Valid:", gps._valid) + print("Longitude:", gps.longitude(DD)) + print("Latitude", gps.latitude(DD)) + print() + + +async def course(gps): + while True: + await asyncio.sleep(4) + await gps.data_received(course=True) + print("***** COURSE DATA *****") + print("Data is Valid:", gps._valid) + print("Speed:", gps.speed_string(MPH)) + print("Course", gps.course) + print("Compass Direction:", gps.compass_direction()) + print() + + +async def date(gps): + while True: + await asyncio.sleep(4) + await gps.data_received(date=True) + print("***** DATE AND TIME *****") + print("Data is Valid:", gps._valid) + print("UTC time:", gps.utc) + print("Local time:", gps.local_time) + print("Date:", gps.date_string(LONG)) + print() + + +async def gps_test(): + print("Initialising") + # Adapt for other MicroPython hardware + uart = pyb.UART(4, 9600, read_buf_len=200) + # read_buf_len is precautionary: code runs reliably without it.) + sreader = asyncio.StreamReader(uart) + timer = Delay_ms(timeout) + sentence_count = 0 + gps = AS_GPS(sreader, local_offset=1, fix_cb=callback, fix_cb_args=(timer,)) + print("awaiting first fix") + asyncio.create_task(sat_test(gps)) + asyncio.create_task(stats(gps)) + asyncio.create_task(navigation(gps)) + asyncio.create_task(course(gps)) + await date(gps) + + +asyncio.run(gps_test()) diff --git a/v3/as_drivers/as_GPS/ast_pbrw.py b/v3/as_drivers/as_GPS/ast_pbrw.py new file mode 100644 index 0000000..3e177ff --- /dev/null +++ b/v3/as_drivers/as_GPS/ast_pbrw.py @@ -0,0 +1,185 @@ +# ast_pbrw.py +# Basic test/demo of AS_GPS class (asynchronous GPS device driver) +# Runs on a Pyboard with GPS data on pin X2. +# Copyright (c) Peter Hinch 2018-2020 +# Released under the MIT License (MIT) - see LICENSE file +# Test asynchronous GPS device driver as_rwGPS + +# LED's: +# Green indicates data is being received. +# Red toggles on RMC message received. +# Yellow: coroutine has 4s loop delay. +# Yellow toggles on position reading. + +import pyb +import asyncio +from primitives.delay_ms import Delay_ms +from .as_GPS import DD, LONG, MPH +from .as_rwGPS import * + +# Avoid multiple baudrates. Tests use 9600 or 19200 only. +BAUDRATE = 19200 +red, green, yellow = pyb.LED(1), pyb.LED(2), pyb.LED(3) +ntimeouts = 0 + + +def callback(gps, _, timer): + red.toggle() + green.on() + timer.trigger(10000) # Outage is declared after 10s + + +def cb_timeout(): + global ntimeouts + green.off() + ntimeouts += 1 + + +def message_cb(gps, segs): + print("Message received:", segs) + + +# Print satellite data every 10s +async def sat_test(gps): + while True: + d = await gps.get_satellite_data() + print("***** SATELLITE DATA *****") + print("Data is Valid:", hex(gps._valid)) + for i in d: + print(i, d[i]) + print() + await asyncio.sleep(10) + + +# Print statistics every 30s +async def stats(gps): + while True: + await gps.data_received(position=True) # Wait for a valid fix + await asyncio.sleep(30) + print("***** STATISTICS *****") + print("Outages:", ntimeouts) + print("Sentences Found:", gps.clean_sentences) + print("Sentences Parsed:", gps.parsed_sentences) + print("CRC_Fails:", gps.crc_fails) + print("Antenna status:", gps.antenna) + print("Firmware version:", gps.version) + print("Enabled sentences:", gps.enabled) + print() + + +# Print navigation data every 4s +async def navigation(gps): + while True: + await asyncio.sleep(4) + await gps.data_received(position=True) + yellow.toggle() + print("***** NAVIGATION DATA *****") + print("Data is Valid:", hex(gps._valid)) + print("Longitude:", gps.longitude(DD)) + print("Latitude", gps.latitude(DD)) + print() + + +async def course(gps): + while True: + await asyncio.sleep(4) + await gps.data_received(course=True) + print("***** COURSE DATA *****") + print("Data is Valid:", hex(gps._valid)) + print("Speed:", gps.speed_string(MPH)) + print("Course", gps.course) + print("Compass Direction:", gps.compass_direction()) + print() + + +async def date(gps): + while True: + await asyncio.sleep(4) + await gps.data_received(date=True) + print("***** DATE AND TIME *****") + print("Data is Valid:", hex(gps._valid)) + print("UTC Time:", gps.utc) + print("Local time:", gps.local_time) + print("Date:", gps.date_string(LONG)) + print() + + +async def change_status(gps, uart): + await asyncio.sleep(10) + print("***** Changing status. *****") + await gps.baudrate(BAUDRATE) + uart.init(BAUDRATE) + print("***** baudrate 19200 *****") + await asyncio.sleep(5) # Ensure baudrate is sorted + print("***** Query VERSION *****") + await gps.command(VERSION) + await asyncio.sleep(10) + print("***** Query ENABLE *****") + await gps.command(ENABLE) + await asyncio.sleep(10) # Allow time for 1st report + await gps.update_interval(2000) + print("***** Update interval 2s *****") + await asyncio.sleep(10) + await gps.enable(gsv=False, chan=False) + print("***** Disable satellite in view and channel messages *****") + await asyncio.sleep(10) + print("***** Query ENABLE *****") + await gps.command(ENABLE) + + +# See README.md re antenna commands +# await asyncio.sleep(10) +# await gps.command(ANTENNA) +# print('***** Antenna reports requested *****') +# await asyncio.sleep(60) +# await gps.command(NO_ANTENNA) +# print('***** Antenna reports turned off *****') +# await asyncio.sleep(10) + + +async def gps_test(): + global gps, uart # For shutdown + print("Initialising") + # Adapt UART instantiation for other MicroPython hardware + uart = pyb.UART(4, 9600, read_buf_len=200) + # read_buf_len is precautionary: code runs reliably without it. + sreader = asyncio.StreamReader(uart) + swriter = asyncio.StreamWriter(uart, {}) + timer = Delay_ms(cb_timeout) + sentence_count = 0 + gps = GPS( + sreader, swriter, local_offset=1, fix_cb=callback, fix_cb_args=(timer,), msg_cb=message_cb + ) + await asyncio.sleep(2) + await gps.command(DEFAULT_SENTENCES) + print("Set sentence frequencies to default") + # await gps.command(FULL_COLD_START) + # print('Performed FULL_COLD_START') + print("awaiting first fix") + asyncio.create_task(sat_test(gps)) + asyncio.create_task(stats(gps)) + asyncio.create_task(navigation(gps)) + asyncio.create_task(course(gps)) + asyncio.create_task(date(gps)) + await gps.data_received(True, True, True, True) # all messages + await change_status(gps, uart) + + +async def shutdown(): + # Normally UART is already at BAUDRATE. But if last session didn't restore + # factory baudrate we can restore connectivity in the subsequent stuck + # session with ctrl-c. + uart.init(BAUDRATE) + await asyncio.sleep(1) + await gps.command(FULL_COLD_START) + print("Factory reset") + # print('Restoring default baudrate.') + # await gps.baudrate(9600) + + +try: + asyncio.run(gps_test()) +except KeyboardInterrupt: + print("Interrupted") +finally: + asyncio.run(shutdown()) diff --git a/v3/as_drivers/as_GPS/astests.py b/v3/as_drivers/as_GPS/astests.py new file mode 100755 index 0000000..c71d70a --- /dev/null +++ b/v3/as_drivers/as_GPS/astests.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python3.8 +# -*- coding: utf-8 -*- + +# astests.py +# Tests for AS_GPS module (asynchronous GPS device driver) +# Based on tests for MicropyGPS by Michael Calvin McCoy +# https://github.com/inmcm/micropyGPS + +# Copyright (c) 2018 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file +# Run under CPython 3.5+ or MicroPython + +from .as_GPS import * +import asyncio + + +async def run(): + sentence_count = 0 + + test_RMC = [ + "$GPRMC,081836,A,3751.65,S,14507.36,E,000.0,360.0,130998,011.3,E*62\n", + "$GPRMC,123519,A,4807.038,N,01131.000,E,022.4,084.4,230394,003.1,W*6A\n", + "$GPRMC,225446,A,4916.45,N,12311.12,W,000.5,054.7,191194,020.3,E*68\n", + "$GPRMC,180041.896,A,3749.1851,N,08338.7891,W,001.9,154.9,240911,,,A*7A\n", + "$GPRMC,180049.896,A,3749.1808,N,08338.7869,W,001.8,156.3,240911,,,A*70\n", + "$GPRMC,092751.000,A,5321.6802,N,00630.3371,W,0.06,31.66,280511,,,A*45\n", + ] + + test_VTG = ["$GPVTG,232.9,T,,M,002.3,N,004.3,K,A*01\n"] + test_GGA = ["$GPGGA,180050.896,3749.1802,N,08338.7865,W,1,07,1.1,397.4,M,-32.5,M,,0000*6C\n"] + test_GSA = [ + "$GPGSA,A,3,07,11,28,24,26,08,17,,,,,,2.0,1.1,1.7*37\n", + "$GPGSA,A,3,07,02,26,27,09,04,15,,,,,,1.8,1.0,1.5*33\n", + ] + test_GSV = [ + "$GPGSV,3,1,12,28,72,355,39,01,52,063,33,17,51,272,44,08,46,184,38*74\n", + "$GPGSV,3,2,12,24,42,058,33,11,34,053,33,07,20,171,40,20,15,116,*71\n", + "$GPGSV,3,3,12,04,12,204,34,27,11,324,35,32,11,089,,26,10,264,40*7B\n", + "$GPGSV,3,1,11,03,03,111,00,04,15,270,00,06,01,010,00,13,06,292,00*74\n", + "$GPGSV,3,2,11,14,25,170,00,16,57,208,39,18,67,296,40,19,40,246,00*74\n", + "$GPGSV,3,3,11,22,42,067,42,24,14,311,43,27,05,244,00,,,,*4D\n", + "$GPGSV,4,1,14,22,81,349,25,14,64,296,22,18,54,114,21,51,40,212,*7D\n", + "$GPGSV,4,2,14,24,30,047,22,04,22,312,26,31,22,204,,12,19,088,23*72\n", + "$GPGSV,4,3,14,25,17,127,18,21,16,175,,11,09,315,16,19,05,273,*72\n", + "$GPGSV,4,4,14,32,05,303,,15,02,073,*7A\n", + ] + test_GLL = [ + "$GPGLL,3711.0942,N,08671.4472,W,000812.000,A,A*46\n", + "$GPGLL,4916.45,N,12311.12,W,225444,A,*1D\n", + "$GPGLL,4250.5589,S,14718.5084,E,092204.999,A*2D\n", + "$GPGLL,0000.0000,N,00000.0000,E,235947.000,V*2D\n", + ] + + my_gps = AS_GPS(None) + sentence = "" + for sentence in test_RMC: + my_gps._valid = 0 + sentence_count += 1 + sentence = await my_gps._update(sentence) + if sentence is None: + print("RMC sentence is invalid.") + else: + print("Parsed a", sentence, "Sentence") + print("Longitude:", my_gps.longitude()) + print("Latitude", my_gps.latitude()) + print("UTC Timestamp:", my_gps.utc) + print("Speed:", my_gps.speed()) + print("Date Stamp:", my_gps.date) + print("Course", my_gps.course) + print("Data is Valid:", bool(my_gps._valid & 1)) + print("Compass Direction:", my_gps.compass_direction()) + print("") + + for sentence in test_GLL: + my_gps._valid = 0 + sentence_count += 1 + sentence = await my_gps._update(sentence) + if sentence is None: + print("GLL sentence is invalid.") + else: + print("Parsed a", sentence, "Sentence") + print("Longitude:", my_gps.longitude()) + print("Latitude", my_gps.latitude()) + print("UTC Timestamp:", my_gps.utc) + print("Data is Valid:", bool(my_gps._valid & 2)) + print("") + + for sentence in test_VTG: + my_gps._valid = 0 + sentence_count += 1 + sentence = await my_gps._update(sentence) + if sentence is None: + print("VTG sentence is invalid.") + else: + print("Parsed a", sentence, "Sentence") + print("Speed:", my_gps.speed()) + print("Course", my_gps.course) + print("Compass Direction:", my_gps.compass_direction()) + print("Data is Valid:", bool(my_gps._valid & 4)) + print("") + + for sentence in test_GGA: + my_gps._valid = 0 + sentence_count += 1 + sentence = await my_gps._update(sentence) + if sentence is None: + print("GGA sentence is invalid.") + else: + print("Parsed a", sentence, "Sentence") + print("Longitude", my_gps.longitude()) + print("Latitude", my_gps.latitude()) + print("UTC Timestamp:", my_gps.utc) + print("Altitude:", my_gps.altitude) + print("Height Above Geoid:", my_gps.geoid_height) + print("Horizontal Dilution of Precision:", my_gps.hdop) + print("Satellites in Use by Receiver:", my_gps.satellites_in_use) + print("Data is Valid:", bool(my_gps._valid & 8)) + print("") + + for sentence in test_GSA: + my_gps._valid = 0 + sentence_count += 1 + sentence = await my_gps._update(sentence) + if sentence is None: + print("GSA sentence is invalid.") + else: + print("Parsed a", sentence, "Sentence") + print("Satellites Used", my_gps.satellites_used) + print("Horizontal Dilution of Precision:", my_gps.hdop) + print("Vertical Dilution of Precision:", my_gps.vdop) + print("Position Dilution of Precision:", my_gps.pdop) + print("Data is Valid:", bool(my_gps._valid & 16)) + print("") + + for sentence in test_GSV: + my_gps._valid = 0 + sentence_count += 1 + sentence = await my_gps._update(sentence) + if sentence is None: + print("GSV sentence is invalid.") + else: + print("Parsed a", sentence, "Sentence") + print("SV Sentences Parsed", my_gps._last_sv_sentence) + print("SV Sentences in Total", my_gps._total_sv_sentences) + print("# of Satellites in View:", my_gps.satellites_in_view) + print("Data is Valid:", bool(my_gps._valid & 32)) + data_valid = ( + my_gps._total_sv_sentences > 0 + and my_gps._total_sv_sentences == my_gps._last_sv_sentence + ) + print("Is Satellite Data Valid?:", data_valid) + if data_valid: + print("Satellite Data:", my_gps._satellite_data) + print("Satellites Visible:", list(my_gps._satellite_data.keys())) + print("") + + print("Pretty Print Examples:") + print("Latitude (degs):", my_gps.latitude_string(DD)) + print("Longitude (degs):", my_gps.longitude_string(DD)) + print("Latitude (dms):", my_gps.latitude_string(DMS)) + print("Longitude (dms):", my_gps.longitude_string(DMS)) + print("Latitude (kml):", my_gps.latitude_string(KML)) + print("Longitude (kml):", my_gps.longitude_string(KML)) + print("Latitude (degs, mins):", my_gps.latitude_string()) + print("Longitude (degs, mins):", my_gps.longitude_string()) + print( + "Speed:", + my_gps.speed_string(KPH), + "or", + my_gps.speed_string(MPH), + "or", + my_gps.speed_string(KNOT), + ) + print("Date (Long Format):", my_gps.date_string(LONG)) + print("Date (Short D/M/Y Format):", my_gps.date_string(DMY)) + print("Date (Short M/D/Y Format):", my_gps.date_string(MDY)) + print("Time:", my_gps.time_string()) + print() + + print("### Final Results ###") + print("Sentences Attempted:", sentence_count) + print("Sentences Found:", my_gps.clean_sentences) + print("Sentences Parsed:", my_gps.parsed_sentences) + print("Unsupported sentences:", my_gps.unsupported_sentences) + print("CRC_Fails:", my_gps.crc_fails) + + +def run_tests(): + asyncio.run(run()) + + +if __name__ == "__main__": + run_tests() diff --git a/v3/as_drivers/as_GPS/astests_pyb.py b/v3/as_drivers/as_GPS/astests_pyb.py new file mode 100755 index 0000000..171714f --- /dev/null +++ b/v3/as_drivers/as_GPS/astests_pyb.py @@ -0,0 +1,170 @@ +# astests_pyb.py + +# Tests for AS_GPS module. Emulates a GPS unit using a UART loopback. +# Run on a Pyboard with X1 and X2 linked +# Tests for AS_GPS module (asynchronous GPS device driver) +# Based on tests for MicropyGPS by Michael Calvin McCoy +# https://github.com/inmcm/micropyGPS + +# Copyright (c) 2018-2020 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +# Ported to uasyncio V3 OK. + +from .as_GPS import * +from machine import UART +import asyncio + + +def callback(gps, _, arg): + print("Fix callback. Time:", gps.utc, arg) + + +async def run_tests(): + uart = UART(4, 9600, read_buf_len=200) + swriter = asyncio.StreamWriter(uart, {}) + sreader = asyncio.StreamReader(uart) + sentence_count = 0 + + test_RMC = [ + "$GPRMC,180041.896,A,3749.1851,N,08338.7891,W,001.9,154.9,240911,,,A*7A\n", + "$GPRMC,180049.896,A,3749.1808,N,08338.7869,W,001.8,156.3,240911,,,A*70\n", + "$GPRMC,092751.000,A,5321.6802,N,00630.3371,W,0.06,31.66,280511,,,A*45\n", + ] + + test_VTG = ["$GPVTG,232.9,T,,M,002.3,N,004.3,K,A*01\n"] + test_GGA = ["$GPGGA,180050.896,3749.1802,N,08338.7865,W,1,07,1.1,397.4,M,-32.5,M,,0000*6C\n"] + test_GSA = [ + "$GPGSA,A,3,07,11,28,24,26,08,17,,,,,,2.0,1.1,1.7*37\n", + "$GPGSA,A,3,07,02,26,27,09,04,15,,,,,,1.8,1.0,1.5*33\n", + ] + test_GSV = [ + "$GPGSV,3,1,12,28,72,355,39,01,52,063,33,17,51,272,44,08,46,184,38*74\n", + "$GPGSV,3,2,12,24,42,058,33,11,34,053,33,07,20,171,40,20,15,116,*71\n", + "$GPGSV,3,3,12,04,12,204,34,27,11,324,35,32,11,089,,26,10,264,40*7B\n", + "$GPGSV,3,1,11,03,03,111,00,04,15,270,00,06,01,010,00,13,06,292,00*74\n", + "$GPGSV,3,2,11,14,25,170,00,16,57,208,39,18,67,296,40,19,40,246,00*74\n", + "$GPGSV,3,3,11,22,42,067,42,24,14,311,43,27,05,244,00,,,,*4D\n", + "$GPGSV,4,1,14,22,81,349,25,14,64,296,22,18,54,114,21,51,40,212,*7D\n", + "$GPGSV,4,2,14,24,30,047,22,04,22,312,26,31,22,204,,12,19,088,23*72\n", + "$GPGSV,4,3,14,25,17,127,18,21,16,175,,11,09,315,16,19,05,273,*72\n", + "$GPGSV,4,4,14,32,05,303,,15,02,073,*7A\n", + ] + test_GLL = [ + "$GPGLL,3711.0942,N,08671.4472,W,000812.000,A,A*46\n", + "$GPGLL,4916.45,N,12311.12,W,225444,A,*1D\n", + "$GPGLL,4250.5589,S,14718.5084,E,092204.999,A*2D\n", + "$GPGLL,4250.5589,S,14718.5084,E,092204.999,A*2D\n", + ] + + # '$GPGLL,0000.0000,N,00000.0000,E,235947.000,V*2D\n', # Will ignore this one + + my_gps = AS_GPS(sreader, fix_cb=callback, fix_cb_args=(42,)) + sentence = "" + for sentence in test_RMC: + sentence_count += 1 + await swriter.awrite(sentence) + await my_gps.data_received(date=True) + print("Longitude:", my_gps.longitude()) + print("Latitude", my_gps.latitude()) + print("UTC Time:", my_gps.utc) + print("Speed:", my_gps.speed()) + print("Date Stamp:", my_gps.date) + print("Course", my_gps.course) + print("Data is Valid:", my_gps._valid) + print("Compass Direction:", my_gps.compass_direction()) + print("") + + for sentence in test_GLL: + sentence_count += 1 + await swriter.awrite(sentence) + await my_gps.data_received(position=True) + print("Longitude:", my_gps.longitude()) + print("Latitude", my_gps.latitude()) + print("UTC Time:", my_gps.utc) + print("Data is Valid:", my_gps._valid) + print("") + + for sentence in test_VTG: + print("Test VTG", sentence) + sentence_count += 1 + await swriter.awrite(sentence) + await asyncio.sleep_ms(200) # Can't wait for course because of position check + print("Speed:", my_gps.speed()) + print("Course", my_gps.course) + print("Compass Direction:", my_gps.compass_direction()) + print("") + + for sentence in test_GGA: + sentence_count += 1 + await swriter.awrite(sentence) + await my_gps.data_received(position=True) + print("Longitude", my_gps.longitude()) + print("Latitude", my_gps.latitude()) + print("UTC Time:", my_gps.utc) + # print('Fix Status:', my_gps.fix_stat) + print("Altitude:", my_gps.altitude) + print("Height Above Geoid:", my_gps.geoid_height) + print("Horizontal Dilution of Precision:", my_gps.hdop) + print("Satellites in Use by Receiver:", my_gps.satellites_in_use) + print("") + + for sentence in test_GSA: + sentence_count += 1 + await swriter.awrite(sentence) + await asyncio.sleep_ms(200) + print("Satellites Used", my_gps.satellites_used) + print("Horizontal Dilution of Precision:", my_gps.hdop) + print("Vertical Dilution of Precision:", my_gps.vdop) + print("Position Dilution of Precision:", my_gps.pdop) + print("") + + for sentence in test_GSV: + sentence_count += 1 + await swriter.awrite(sentence) + await asyncio.sleep_ms(200) + print("SV Sentences Parsed", my_gps._last_sv_sentence) + print("SV Sentences in Total", my_gps._total_sv_sentences) + print("# of Satellites in View:", my_gps.satellites_in_view) + data_valid = ( + my_gps._total_sv_sentences > 0 + and my_gps._total_sv_sentences == my_gps._last_sv_sentence + ) + print("Is Satellite Data Valid?:", data_valid) + if data_valid: + print("Satellite Data:", my_gps._satellite_data) + print("Satellites Visible:", list(my_gps._satellite_data.keys())) + print("") + + print("Pretty Print Examples:") + print("Latitude (degs):", my_gps.latitude_string(DD)) + print("Longitude (degs):", my_gps.longitude_string(DD)) + print("Latitude (dms):", my_gps.latitude_string(DMS)) + print("Longitude (dms):", my_gps.longitude_string(DMS)) + print("Latitude (kml):", my_gps.latitude_string(KML)) + print("Longitude (kml):", my_gps.longitude_string(KML)) + print("Latitude (degs, mins):", my_gps.latitude_string()) + print("Longitude (degs, mins):", my_gps.longitude_string()) + print( + "Speed:", + my_gps.speed_string(KPH), + "or", + my_gps.speed_string(MPH), + "or", + my_gps.speed_string(KNOT), + ) + print("Date (Long Format):", my_gps.date_string(LONG)) + print("Date (Short D/M/Y Format):", my_gps.date_string(DMY)) + print("Date (Short M/D/Y Format):", my_gps.date_string(MDY)) + print("Time:", my_gps.time_string()) + print() + + print("### Final Results ###") + print("Sentences Attempted:", sentence_count) + print("Sentences Found:", my_gps.clean_sentences) + print("Sentences Parsed:", my_gps.parsed_sentences) + print("Unsupported sentences:", my_gps.unsupported_sentences) + print("CRC_Fails:", my_gps.crc_fails) + + +asyncio.run(run_tests()) diff --git a/v3/as_drivers/as_GPS/baud.py b/v3/as_drivers/as_GPS/baud.py new file mode 100644 index 0000000..29852c3 --- /dev/null +++ b/v3/as_drivers/as_GPS/baud.py @@ -0,0 +1,60 @@ +# baud.py Test uasyncio at high baudrate +import pyb +import asyncio +import utime +import as_drivers.as_rwGPS as as_rwGPS + +# Outcome +# Sleep Buffer +# 0 None OK, length limit 74 +# 10 None Bad: length 111 also short weird RMC sentences +# 10 1000 OK, length 74, 37 +# 10 200 Bad: 100, 37 overruns +# 10 400 OK, 74,24 Short GSV sentence looked OK +# 4 200 OK, 74,35 Emulate parse time + +# as_GPS.py +# As written update blocks for 23.5ms parse for 3.8ms max +# with CRC check removed update blocks 17.3ms max +# CRC, bad char and line length removed update blocks 8.1ms max + +# At 10Hz update rate I doubt there's enough time to process the data +BAUDRATE = 115200 +red, green, yellow, blue = pyb.LED(1), pyb.LED(2), pyb.LED(3), pyb.LED(4) + + +async def setup(): + print("Initialising") + uart = pyb.UART(4, 9600) + sreader = asyncio.StreamReader(uart) + swriter = asyncio.StreamWriter(uart, {}) + gps = as_rwGPS.GPS(sreader, swriter, local_offset=1) + await asyncio.sleep(2) + await gps.baudrate(BAUDRATE) + uart.init(BAUDRATE) + + +def setbaud(): + asyncio.run(setup()) + print("Baudrate set to 115200.") + + +async def gps_test(): + print("Initialising") + uart = pyb.UART(4, BAUDRATE, read_buf_len=400) + sreader = asyncio.StreamReader(uart) + swriter = asyncio.StreamWriter(uart, {}) + maxlen = 0 + minlen = 100 + while True: + res = await sreader.readline() + l = len(res) + maxlen = max(maxlen, l) + minlen = min(minlen, l) + print(l, maxlen, minlen, res) + red.toggle() + utime.sleep_ms(10) + + +def test(): + asyncio.run(gps_test()) diff --git a/v3/as_drivers/as_GPS/log.kml b/v3/as_drivers/as_GPS/log.kml new file mode 100644 index 0000000..31d1076 --- /dev/null +++ b/v3/as_drivers/as_GPS/log.kml @@ -0,0 +1,128 @@ + + + + +#yellowPoly + +1 +1 +absolute + +-2.102780,53.297553,162.2 +-2.102777,53.297548,164.6 +-2.102772,53.297539,165.4 +-2.102763,53.297534,165.8 +-2.102763,53.297534,165.8 +-2.102763,53.297534,165.8 +-2.102763,53.297534,165.7 +-2.102758,53.297534,165.7 +-2.102750,53.297534,165.7 +-2.102738,53.297524,165.7 +-2.102735,53.297515,165.7 +-2.102733,53.297515,165.7 +-2.102667,53.297505,165.7 +-2.102215,53.297677,165.7 +-2.101582,53.297644,165.7 +-2.101537,53.297944,165.7 +-2.102668,53.298240,165.7 +-2.103305,53.298321,165.7 +-2.104530,53.297915,165.7 +-2.106058,53.297248,165.7 +-2.107628,53.296633,165.7 +-2.108622,53.295879,165.7 +-2.109327,53.295202,165.7 +-2.110145,53.294253,165.7 +-2.110045,53.293753,165.7 +-2.110323,53.293729,165.7 +-2.110578,53.293681,165.7 +-2.110587,53.293648,165.7 +-2.110592,53.293653,165.7 +-2.110593,53.293653,165.7 +-2.110593,53.293653,165.7 +-2.110593,53.293653,165.7 +-2.110593,53.293653,165.7 +-2.110595,53.293657,165.7 +-2.110595,53.293657,165.7 +-2.110595,53.293657,165.7 +-2.110593,53.293657,165.7 +-2.110593,53.293657,165.7 +-2.110593,53.293657,165.7 +-2.110593,53.293657,165.7 +-2.110593,53.293657,165.7 +-2.110593,53.293657,165.7 +-2.110595,53.293657,165.7 +-2.110595,53.293657,165.7 +-2.110595,53.293657,165.7 +-2.110595,53.293657,165.7 +-2.110593,53.293667,165.7 +-2.110597,53.293676,165.7 +-2.110597,53.293676,165.7 +-2.110597,53.293676,165.7 +-2.110597,53.293676,165.7 +-2.110597,53.293676,165.7 +-2.110597,53.293676,165.7 +-2.110597,53.293676,165.7 +-2.110597,53.293681,165.7 +-2.110545,53.293624,165.7 +-2.110288,53.293591,165.7 +-2.110288,53.293595,165.7 +-2.110147,53.294272,165.7 +-2.109365,53.295212,165.7 +-2.108420,53.296084,165.7 +-2.107292,53.296876,165.7 +-2.105490,53.297467,165.7 +-2.104190,53.298225,165.7 +-2.102533,53.298411,165.7 +-2.100548,53.298159,165.7 +-2.098730,53.298378,165.7 +-2.097297,53.298297,165.7 +-2.096425,53.298078,165.7 +-2.095933,53.298249,165.7 +-2.095803,53.298254,165.7 +-2.095803,53.298254,165.7 +-2.095803,53.298254,165.7 +-2.095803,53.298254,165.7 +-2.095803,53.298254,165.7 +-2.095803,53.298254,165.7 +-2.095803,53.298254,165.7 +-2.095805,53.298254,165.7 +-2.095805,53.298254,165.7 +-2.095805,53.298254,165.7 +-2.095805,53.298254,165.7 +-2.095805,53.298254,165.7 +-2.095807,53.298259,165.7 +-2.095873,53.298278,165.7 +-2.095777,53.298335,165.7 +-2.095338,53.298645,165.7 +-2.095562,53.298788,165.7 +-2.096558,53.298659,165.7 +-2.097402,53.298526,165.7 +-2.097873,53.298349,165.7 +-2.099518,53.298202,165.7 +-2.101260,53.298235,165.7 +-2.102687,53.298383,165.7 +-2.102098,53.298144,165.7 +-2.101278,53.297801,165.7 +-2.101830,53.297644,165.7 +-2.102540,53.297577,165.7 +-2.102727,53.297496,165.7 +-2.102738,53.297515,165.7 +-2.102743,53.297524,165.7 +-2.102742,53.297524,165.7 +-2.102742,53.297524,165.7 +-2.102742,53.297524,165.7 +-2.102740,53.297524,165.7 +-2.102740,53.297524,165.7 + + + + + diff --git a/v3/as_drivers/as_GPS/log_kml.py b/v3/as_drivers/as_GPS/log_kml.py new file mode 100644 index 0000000..e16b1b8 --- /dev/null +++ b/v3/as_drivers/as_GPS/log_kml.py @@ -0,0 +1,76 @@ +# log_kml.py Log GPS data to a kml file for display on Google Earth + +# Copyright (c) Peter Hinch 2018 +# MIT License (MIT) - see LICENSE file +# Test program for asynchronous GPS device driver as_pyGPS +# KML file format: https://developers.google.com/kml/documentation/kml_tut +# http://www.toptechboy.com/arduino/lesson-25-display-your-gps-data-as-track-on-google-earth/ + +# Logging stops and the file is closed when the user switch is pressed. + +from .as_GPS import KML, AS_GPS +import asyncio +import pyb + +str_start = """ + + + +#yellowPoly + +1 +1 +absolute + +""" + +str_end = """ + + + + +""" + +red, green, yellow = pyb.LED(1), pyb.LED(2), pyb.LED(3) +sw = pyb.Switch() + +# Toggle the red LED +def toggle_led(*_): + red.toggle() + + +async def log_kml(fn="/sd/log.kml", interval=10): + yellow.on() # Waiting for data + uart = pyb.UART(4, 9600, read_buf_len=200) # Data on X2 + sreader = asyncio.StreamReader(uart) + gps = AS_GPS(sreader, fix_cb=toggle_led) + await gps.data_received(True, True, True, True) + yellow.off() + with open(fn, "w") as f: + f.write(str_start) + while not sw.value(): + f.write(gps.longitude_string(KML)) + f.write(",") + f.write(gps.latitude_string(KML)) + f.write(",") + f.write(str(gps.altitude)) + f.write("\r\n") + for _ in range(interval * 10): + await asyncio.sleep_ms(100) + if sw.value(): + break + + f.write(str_end) + red.off() + green.on() + + +asyncio.run(log_kml()) diff --git a/v3/as_drivers/client_server/heartbeat.py b/v3/as_drivers/client_server/heartbeat.py new file mode 100644 index 0000000..8045921 --- /dev/null +++ b/v3/as_drivers/client_server/heartbeat.py @@ -0,0 +1,33 @@ +# flash.py Heartbeat code for simple uasyncio-based echo server + +# Released under the MIT licence +# Copyright (c) Peter Hinch 2019 + +import asyncio +from sys import platform + + +async def heartbeat(tms): + if platform == "pyboard": # V1.x or D series + from pyb import LED + + led = LED(1) + elif platform == "esp8266": + from machine import Pin + + led = Pin(2, Pin.OUT, value=1) + elif platform == "esp32": + # Some boards have an LED + # from machine import Pin + # led = Pin(2, Pin.OUT, value=1) + return # Reference board has no LED + elif platform == "linux": + return # No LED + else: + raise OSError("Unsupported platform.") + while True: + if platform == "pyboard": + led.toggle() + elif platform == "esp8266": + led(not led()) + await asyncio.sleep_ms(tms) diff --git a/v3/as_drivers/client_server/uclient.py b/v3/as_drivers/client_server/uclient.py new file mode 100644 index 0000000..f052d18 --- /dev/null +++ b/v3/as_drivers/client_server/uclient.py @@ -0,0 +1,57 @@ +# uclient.py Demo of simple uasyncio-based client for echo server + +# Released under the MIT licence +# Copyright (c) Peter Hinch 2019-2020 + +import usocket as socket +import asyncio +import ujson +from heartbeat import heartbeat # Optional LED flash + +server = "192.168.0.41" +port = 8123 + + +async def run(): + # Optional fast heartbeat to confirm nonblocking operation + asyncio.create_task(heartbeat(100)) + sock = socket.socket() + + def close(): + sock.close() + print("Server disconnect.") + + try: + serv = socket.getaddrinfo(server, port)[0][-1] + sock.connect(serv) + except OSError as e: + print("Cannot connect to {} on port {}".format(server, port)) + sock.close() + return + while True: + sreader = asyncio.StreamReader(sock) + swriter = asyncio.StreamWriter(sock, {}) + data = ["value", 1] + while True: + try: + swriter.write("{}\n".format(ujson.dumps(data))) + await swriter.drain() + res = await sreader.readline() + except OSError: + close() + return + try: + print("Received", ujson.loads(res)) + except ValueError: + close() + return + await asyncio.sleep(2) + data[1] += 1 + + +try: + asyncio.run(run()) +except KeyboardInterrupt: + print("Interrupted") # This mechanism doesn't work on Unix build. +finally: + _ = asyncio.new_event_loop() diff --git a/v3/as_drivers/client_server/userver.py b/v3/as_drivers/client_server/userver.py new file mode 100644 index 0000000..509bcd9 --- /dev/null +++ b/v3/as_drivers/client_server/userver.py @@ -0,0 +1,64 @@ +# userver.py Demo of simple uasyncio-based echo server + +# Released under the MIT licence +# Copyright (c) Peter Hinch 2019-2020 + +import usocket as socket +import asyncio +import uselect as select +import ujson +from heartbeat import heartbeat # Optional LED flash + + +class Server: + def __init__(self, host="0.0.0.0", port=8123, backlog=5, timeout=20): + self.host = host + self.port = port + self.backlog = backlog + self.timeout = timeout + + async def run(self): + print("Awaiting client connection.") + self.cid = 0 + asyncio.create_task(heartbeat(100)) + self.server = await asyncio.start_server( + self.run_client, self.host, self.port, self.backlog + ) + while True: + await asyncio.sleep(100) + + async def run_client(self, sreader, swriter): + self.cid += 1 + print("Got connection from client", self.cid) + try: + while True: + try: + res = await asyncio.wait_for(sreader.readline(), self.timeout) + except asyncio.TimeoutError: + res = b"" + if res == b"": + raise OSError + print("Received {} from client {}".format(ujson.loads(res.rstrip()), self.cid)) + swriter.write(res) + await swriter.drain() # Echo back + except OSError: + pass + print("Client {} disconnect.".format(self.cid)) + await sreader.wait_closed() + print("Client {} socket closed.".format(self.cid)) + + async def close(self): + print("Closing server") + self.server.close() + await self.server.wait_closed() + print("Server closed.") + + +server = Server() +try: + asyncio.run(server.run()) +except KeyboardInterrupt: + print("Interrupted") # This mechanism doesn't work on Unix build. +finally: + asyncio.run(server.close()) + _ = asyncio.new_event_loop() diff --git a/v3/as_drivers/hd44780/__init__.py b/v3/as_drivers/hd44780/__init__.py new file mode 100644 index 0000000..3ffc82c --- /dev/null +++ b/v3/as_drivers/hd44780/__init__.py @@ -0,0 +1 @@ +from .alcd import * diff --git a/HD44780/alcd.py b/v3/as_drivers/hd44780/alcd.py similarity index 55% rename from HD44780/alcd.py rename to v3/as_drivers/hd44780/alcd.py index 74b4368..908a322 100644 --- a/HD44780/alcd.py +++ b/v3/as_drivers/hd44780/alcd.py @@ -1,6 +1,7 @@ # LCD class for Micropython and uasyncio. # Author: Peter Hinch # Copyright Peter Hinch 2017 Released under the MIT license +# V1.1 24 Apr 2020 Updated for uasyncio V3 # V1.0 13 May 2017 # Assumes an LCD with standard Hitachi HD44780 controller chip wired using four data lines @@ -13,31 +14,16 @@ # # Author : Matt Hawkins # Site : http://www.raspberrypi-spy.co.uk -# -# Date : 26/07/2012 from machine import Pin import utime as time -import uasyncio as asyncio +import asyncio -# **************************************************** LCD DRIVER *************************************************** +# ********************************** GLOBAL CONSTANTS: TARGET BOARD PIN NUMBERS ************************************* -""" -Pin correspondence of default pinlist. This is supplied as an example -Name LCD connector Board -Rs 4 1 red Y1 -E 6 2 Y2 -D7 14 3 Y3 -D6 13 4 Y4 -D5 12 5 Y5 -D4 11 6 Y6 -""" +# Supply board pin numbers as a tuple in order Rs, E, D4, D5, D6, D7 -# *********************************** GLOBAL CONSTANTS: MICROPYTHON PIN NUMBERS ************************************* - -# Supply as board pin numbers as a tuple Rs, E, D4, D5, D6, D7 - -PINLIST = ('Y1','Y2','Y6','Y5','Y4','Y3') +PINLIST = ("Y1", "Y2", "Y6", "Y5", "Y4", "Y3") # As used in testing. # **************************************************** LCD CLASS **************************************************** # Initstring: @@ -59,16 +45,18 @@ # lcd_byte and lcd_nybble method use explicit delays. This is because execution # time is short relative to general latency (on the order of 300μs). -class LCD(object): # LCD objects appear as read/write lists - INITSTRING = b'\x33\x32\x28\x0C\x06\x01' - LCD_LINES = b'\x80\xC0' # LCD RAM address for the 1st and 2nd line (0 and 40H) + +class LCD: # LCD objects appear as read/write lists + INITSTRING = b"\x33\x32\x28\x0C\x06\x01" + LCD_LINES = b"\x80\xC0" # LCD RAM address for the 1st and 2nd line (0 and 40H) CHR = True CMD = False - E_PULSE = 50 # Timing constants in uS + E_PULSE = 50 # Timing constants in uS E_DELAY = 50 - def __init__(self, pinlist, cols, rows = 2): # Init with pin nos for enable, rs, D4, D5, D6, D7 + + def __init__(self, pinlist, cols, rows=2): # Init with pin nos for enable, rs, D4, D5, D6, D7 self.initialising = True - self.LCD_E = Pin(pinlist[1], Pin.OUT) # Create and initialise the hardware pins + self.LCD_E = Pin(pinlist[1], Pin.OUT) # Create and initialise the hardware pins self.LCD_RS = Pin(pinlist[0], Pin.OUT) self.datapins = [Pin(pin_name, Pin.OUT) for pin_name in pinlist[2:]] self.cols = cols @@ -77,16 +65,15 @@ def __init__(self, pinlist, cols, rows = 2): # Init with pin nos for enable, rs, self.dirty = [False] * self.rows for thisbyte in LCD.INITSTRING: self.lcd_byte(thisbyte, LCD.CMD) - self.initialising = False # Long delay after first byte only - loop = asyncio.get_event_loop() - loop.create_task(self.runlcd()) + self.initialising = False # Long delay after first byte only + asyncio.create_task(self.runlcd()) - def lcd_nybble(self, bits): # send the LS 4 bits + def lcd_nybble(self, bits): # send the LS 4 bits for pin in self.datapins: pin.value(bits & 0x01) bits >>= 1 time.sleep_us(LCD.E_DELAY) # 50μs - self.LCD_E.value(True) # Toggle the enable pin + self.LCD_E.value(True) # Toggle the enable pin time.sleep_us(LCD.E_PULSE) self.LCD_E.value(False) if self.initialising: @@ -94,32 +81,28 @@ def lcd_nybble(self, bits): # send the LS 4 bits else: time.sleep_us(LCD.E_DELAY) # 50μs - def lcd_byte(self, bits, mode): # Send byte to data pins: bits = data - self.LCD_RS.value(mode) # mode = True for character, False for command - self.lcd_nybble(bits >>4) # send high bits - self.lcd_nybble(bits) # then low ones + def lcd_byte(self, bits, mode): # Send byte to data pins: bits = data + self.LCD_RS.value(mode) # mode = True for character, False for command + self.lcd_nybble(bits >> 4) # send high bits + self.lcd_nybble(bits) # then low ones - def __setitem__(self, line, message): # Send string to display line 0 or 1 - # Strip or pad to width of display. - # Whould use "{0:{1}.{1}}".format("rats", 20) but - message = "%-*.*s" % (self.cols,self.cols,message) # computed format field sizes are unsupported - if message != self.lines[line]: # Only update LCD if data has changed - self.lines[line] = message # Update stored line - self.dirty[line] = True # Flag its non-correspondence with the LCD device + def __setitem__(self, line, message): # Send string to display line 0 or 1 + message = "{0:{1}.{1}}".format(message, self.cols) + if message != self.lines[line]: # Only update LCD if data has changed + self.lines[line] = message # Update stored line + self.dirty[line] = True # Flag its non-correspondence with the LCD device def __getitem__(self, line): return self.lines[line] - async def runlcd(self): # Periodically check for changed text and update LCD if so - while(True): + async def runlcd(self): # Periodically check for changed text and update LCD if so + while True: for row in range(self.rows): if self.dirty[row]: msg = self[row] + self.dirty[row] = False self.lcd_byte(LCD.LCD_LINES[row], LCD.CMD) for thisbyte in msg: self.lcd_byte(ord(thisbyte), LCD.CHR) await asyncio.sleep_ms(0) # Reshedule ASAP - self.dirty[row] = False - await asyncio.sleep_ms(20) # Give other coros a look-in - - + await asyncio.sleep_ms(20) # Give other coros a look-in diff --git a/v3/as_drivers/hd44780/alcdtest.py b/v3/as_drivers/hd44780/alcdtest.py new file mode 100644 index 0000000..f057c23 --- /dev/null +++ b/v3/as_drivers/hd44780/alcdtest.py @@ -0,0 +1,20 @@ +# alcdtest.py Test program for LCD class +# Author: Peter Hinch +# Copyright Peter Hinch 2017-2020 Released under the MIT license +# Updated for uasyncio V3 +# runs for 20s +import asyncio +import utime as time +from .alcd import LCD, PINLIST + +lcd = LCD(PINLIST, cols=16) + + +async def lcd_task(): + for secs in range(20, -1, -1): + lcd[0] = "MicroPython {}".format(secs) + lcd[1] = "{:11d}uS".format(time.ticks_us()) + await asyncio.sleep(1) + + +asyncio.run(lcd_task()) diff --git a/v3/as_drivers/htu21d/__init__.py b/v3/as_drivers/htu21d/__init__.py new file mode 100644 index 0000000..ca5a992 --- /dev/null +++ b/v3/as_drivers/htu21d/__init__.py @@ -0,0 +1 @@ +from .htu21d_mc import * diff --git a/htu21d/htu21d_mc.py b/v3/as_drivers/htu21d/htu21d_mc.py similarity index 69% rename from htu21d/htu21d_mc.py rename to v3/as_drivers/htu21d/htu21d_mc.py index d071075..385ec48 100644 --- a/htu21d/htu21d_mc.py +++ b/v3/as_drivers/htu21d/htu21d_mc.py @@ -4,14 +4,13 @@ # Based on https://github.com/manitou48/pyboard/blob/master/htu21d.py # Author: Peter Hinch -# Copyright Peter Hinch 2018 Released under the MIT license +# Copyright Peter Hinch 2018-2020 Released under the MIT license import machine import ustruct -import uasyncio as asyncio +import asyncio from micropython import const -_ADDRESS = const(0x40) # HTU21D Address _PAUSE_MS = const(60) # HTU21D acquisition delay _READ_USER_REG = const(0xE7) @@ -20,18 +19,19 @@ # value[0], value[1] = Raw temp/hum data, value[2] = CRC # Polynomial = 0x0131 = x^8 + x^5 + x^4 + 1 + class HTU21D: - START_TEMP_MEASURE = b'\xF3' # Commands - START_HUMD_MEASURE = b'\xF5' + START_TEMP_MEASURE = b"\xF3" # Commands + START_HUMD_MEASURE = b"\xF5" - def __init__(self, i2c, read_delay=10): + def __init__(self, i2c, read_delay=10, address=0x40): self.i2c = i2c - if _ADDRESS not in self.i2c.scan(): - raise OSError('No HTU21D device found.') + if address not in self.i2c.scan(): + raise OSError("No HTU21D device found.") + self.address = address self.temperature = None self.humidity = None - loop = asyncio.get_event_loop() - loop.create_task(self._run(read_delay)) + asyncio.create_task(self._run(read_delay)) async def _run(self, read_delay): while True: @@ -43,22 +43,22 @@ async def _run(self, read_delay): def __iter__(self): # Await 1st reading while self.humidity is None: - yield + yield from asyncio.sleep(0) async def _get_data(self, cmd, divisor=0x131 << 15, bit=1 << 23): - self.i2c.writeto(_ADDRESS, cmd) # Start reading + self.i2c.writeto(self.address, cmd) # Start reading await asyncio.sleep_ms(_PAUSE_MS) # Wait for device - value = self.i2c.readfrom(_ADDRESS, 3) # Read result, check CRC8 - data, crc = ustruct.unpack('>HB', value) + value = self.i2c.readfrom(self.address, 3) # Read result, check CRC8 + data, crc = ustruct.unpack(">HB", value) remainder = (data << 8) | crc while bit > 128: - if(remainder & bit): + if remainder & bit: remainder ^= divisor divisor >>= 1 bit >>= 1 if remainder: - raise OSError('HTU21D CRC Fail') + raise OSError("HTU21D CRC Fail") return data & 0xFFFC # Clear the status bits def user_register(self): # Read the user register byte (should be 2) - return self.i2c.readfrom_mem(_ADDRESS, _READ_USER_REG, 1)[0] + return self.i2c.readfrom_mem(self.address, _READ_USER_REG, 1)[0] diff --git a/htu21d/htu_test.py b/v3/as_drivers/htu21d/htu_test.py similarity index 71% rename from htu21d/htu_test.py rename to v3/as_drivers/htu21d/htu_test.py index 401aac1..fd983f9 100644 --- a/htu21d/htu_test.py +++ b/v3/as_drivers/htu21d/htu_test.py @@ -3,12 +3,12 @@ # Author: Peter Hinch # Copyright Peter Hinch 2018 Released under the MIT license -import uasyncio as asyncio +import asyncio import sys from machine import Pin, I2C -import htu21d_mc +from .htu21d_mc import HTU21D -if sys.platform == 'pyboard': +if sys.platform == "pyboard": i2c = I2C(1) # scl=X9 sda=X10 else: # Specify pullup: on my ESP32 board pullup resistors are not fitted :-( @@ -18,17 +18,17 @@ i2c = I2C(-1, scl=scl_pin, sda=sda_pin) # Loboris port (soon this special treatment won't be needed). # https://forum.micropython.org/viewtopic.php?f=18&t=3553&start=390 - #i2c = I2C(scl=scl_pin, sda=sda_pin) + # i2c = I2C(scl=scl_pin, sda=sda_pin) + +htu = HTU21D(i2c, read_delay=2) # read_delay=2 for test purposes -htu = htu21d_mc.HTU21D(i2c, read_delay=2) # read_delay=2 for test purposes async def main(): await htu while True: - fstr = 'Temp {:5.1f} Humidity {:5.1f}' + fstr = "Temp {:5.1f} Humidity {:5.1f}" print(fstr.format(htu.temperature, htu.humidity)) await asyncio.sleep(5) -loop = asyncio.get_event_loop() -loop.create_task(main()) -loop.run_forever() + +asyncio.run(main()) diff --git a/v3/as_drivers/i2c/__init__.py b/v3/as_drivers/i2c/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/v3/as_drivers/i2c/asi2c.py b/v3/as_drivers/i2c/asi2c.py new file mode 100644 index 0000000..4a72eaf --- /dev/null +++ b/v3/as_drivers/i2c/asi2c.py @@ -0,0 +1,204 @@ +# asi2c.py A communications link using I2C slave mode on Pyboard. +# Channel and Responder classes. Adapted for uasyncio V3, WBUS DIP28. + +# The MIT License (MIT) +# +# Copyright (c) 2018-2020 Peter Hinch +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +import asyncio +import machine +import utime +from micropython import const +import io + +_MP_STREAM_POLL_RD = const(1) +_MP_STREAM_POLL_WR = const(4) +_MP_STREAM_POLL = const(3) +_MP_STREAM_ERROR = const(-1) +# Delay compensates for short Responder interrupt latency. Must be >= max delay +# between Initiator setting a pin and initiating an I2C transfer: ensure +# Initiator sets up first. +_DELAY = const(20) # μs + + +# Base class provides user interface and send/receive object buffers +class Channel(io.IOBase): + def __init__(self, i2c, own, rem, verbose, rxbufsize): + self.rxbufsize = rxbufsize + self.verbose = verbose + self.synchronised = False + # Hardware + self.i2c = i2c + self.own = own + self.rem = rem + own.init(mode=machine.Pin.OUT, value=1) + rem.init(mode=machine.Pin.IN, pull=machine.Pin.PULL_UP) + # I/O + self.txbyt = b"" # Data to send + self.txsiz = bytearray(2) # Size of .txbyt encoded as 2 bytes + self.rxbyt = b"" + self.rxbuf = bytearray(rxbufsize) + self.rx_mv = memoryview(self.rxbuf) + self.cantx = True # Remote can accept data + + async def _sync(self): + self.verbose and print("Synchronising") + self.own(0) + while self.rem(): + await asyncio.sleep_ms(100) + # Both pins are now low + await asyncio.sleep(0) + self.verbose and print("Synchronised") + self.synchronised = True + + def waitfor(self, val): # Initiator overrides + while not self.rem() == val: + pass + + # Get incoming bytes instance from memoryview. + def _handle_rxd(self, msg): + self.rxbyt = bytes(msg) + + def _txdone(self): + self.txbyt = b"" + self.txsiz[0] = 0 + self.txsiz[1] = 0 + + # Stream interface + + def ioctl(self, req, arg): + ret = _MP_STREAM_ERROR + if req == _MP_STREAM_POLL: + ret = 0 + if self.synchronised: + if arg & _MP_STREAM_POLL_RD: + if self.rxbyt: + ret |= _MP_STREAM_POLL_RD + if arg & _MP_STREAM_POLL_WR: + if (not self.txbyt) and self.cantx: + ret |= _MP_STREAM_POLL_WR + return ret + + def readline(self): + n = self.rxbyt.find(b"\n") + if n == -1: + t = self.rxbyt[:] + self.rxbyt = b"" + else: + t = self.rxbyt[: n + 1] + self.rxbyt = self.rxbyt[n + 1 :] + return t.decode() + + def read(self, n): + t = self.rxbyt[:n] + self.rxbyt = self.rxbyt[n:] + return t.decode() + + # Set .txbyt to the required data. Return its size. So awrite returns + # with transmission occurring in tha background. + # uasyncio V3: Stream.drain() calls write with buf being a memoryview + # and no off or sz args. + def write(self, buf): + if self.synchronised: + if self.txbyt: # Initial call from awrite + return 0 # Waiting for existing data to go out + l = len(buf) + self.txbyt = buf + self.txsiz[0] = l & 0xFF + self.txsiz[1] = l >> 8 + return l + return 0 + + # User interface + + # Wait for sync + async def ready(self): + while not self.synchronised: + await asyncio.sleep_ms(100) + + # Leave pin high in case we run again + def close(self): + self.own(1) + + +# Responder is I2C master. It is cross-platform and uses machine. +# It does not handle errors: if I2C fails it dies and awaits reset by initiator. +# send_recv is triggered by Interrupt from Initiator. + + +class Responder(Channel): + addr = 0x12 + rxbufsize = 200 + + def __init__(self, i2c, pin, pinack, verbose=True): + super().__init__(i2c, pinack, pin, verbose, self.rxbufsize) + loop = asyncio.get_event_loop() + loop.create_task(self._run()) + + async def _run(self): + await self._sync() # own pin ->0, wait for remote pin == 0 + self.rem.irq(handler=self._handler, trigger=machine.Pin.IRQ_RISING) + + # Request was received: immediately read payload size, then payload + # On Pyboard blocks for 380μs to 1.2ms for small amounts of data + def _handler(self, _, sn=bytearray(2), txnull=bytearray(2)): + addr = Responder.addr + self.rem.irq(handler=None) + utime.sleep_us(_DELAY) # Ensure Initiator has set up to write. + self.i2c.readfrom_into(addr, sn) + self.own(1) + self.waitfor(0) + self.own(0) + n = sn[0] + ((sn[1] & 0x7F) << 8) # no of bytes to receive + if n > self.rxbufsize: + raise ValueError("Receive data too large for buffer.") + self.cantx = not bool(sn[1] & 0x80) # Can Initiator accept a payload? + if n: + self.waitfor(1) + utime.sleep_us(_DELAY) + mv = memoryview(self.rx_mv[0:n]) # allocates + self.i2c.readfrom_into(addr, mv) + self.own(1) + self.waitfor(0) + self.own(0) + self._handle_rxd(mv) + + self.own(1) # Request to send + self.waitfor(1) + utime.sleep_us(_DELAY) + dtx = self.txbyt != b"" and self.cantx # Data to send + siz = self.txsiz if dtx else txnull + if self.rxbyt: + siz[1] |= 0x80 # Hold off Initiator TX + else: + siz[1] &= 0x7F + self.i2c.writeto(addr, siz) # Was getting ENODEV occasionally on Pyboard + self.own(0) + self.waitfor(0) + if dtx: + self.own(1) + self.waitfor(1) + utime.sleep_us(_DELAY) + self.i2c.writeto(addr, self.txbyt) + self.own(0) + self.waitfor(0) + self._txdone() # Invalidate source + self.rem.irq(handler=self._handler, trigger=machine.Pin.IRQ_RISING) diff --git a/v3/as_drivers/i2c/asi2c_i.py b/v3/as_drivers/i2c/asi2c_i.py new file mode 100644 index 0000000..d9f47e5 --- /dev/null +++ b/v3/as_drivers/i2c/asi2c_i.py @@ -0,0 +1,149 @@ +# asi2c_i.py A communications link using I2C slave mode on Pyboard. +# Initiator class. Adapted for uasyncio V3, WBUS DIP28. + +# The MIT License (MIT) +# +# Copyright (c) 2018-2020 Peter Hinch +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +import asyncio +import machine +import utime +import gc +from .asi2c import Channel + + +# The initiator is an I2C slave. It runs on a Pyboard. I2C uses pyb for slave +# mode, but pins are instantiated using machine. +# reset (if provided) is a means of resetting Responder in case of error: it +# is (pin, active_level, ms) +class Initiator(Channel): + t_poll = 100 # ms between Initiator polling Responder + rxbufsize = 200 + + def __init__( + self, + i2c, + pin, + pinack, + reset=None, + verbose=True, + cr_go=False, + go_args=(), + cr_fail=False, + f_args=(), + ): + super().__init__(i2c, pin, pinack, verbose, self.rxbufsize) + self.reset = reset + self.cr_go = cr_go + self.go_args = go_args + self.cr_fail = cr_fail + self.f_args = f_args + if reset is not None: + reset[0].init(mode=machine.Pin.OUT, value=not (reset[1])) + # Self measurement + self.nboots = 0 # No. of reboots of Responder + self.block_max = 0 # Blocking times: max + self.block_sum = 0 # Total + self.block_cnt = 0 # Count + asyncio.create_task(self._run()) + + def waitfor(self, val): # Wait for response for 1 sec + tim = utime.ticks_ms() + while not self.rem() == val: + if utime.ticks_diff(utime.ticks_ms(), tim) > 1000: + raise OSError + + async def reboot(self): + self.close() # Leave own pin high + if self.reset is not None: + rspin, rsval, rstim = self.reset + self.verbose and print("Resetting target.") + rspin(rsval) # Pulse reset line + await asyncio.sleep_ms(rstim) + rspin(not rsval) + + async def _run(self): + while True: + # If hardware link exists reboot Responder + await self.reboot() + self.txbyt = b"" + self.rxbyt = b"" + await self._sync() + await asyncio.sleep(1) # Ensure Responder is ready + if self.cr_go: + self.loop.create_task(self.cr_go(*self.go_args)) + while True: + gc.collect() + try: + tstart = utime.ticks_us() + self._sendrx() + t = utime.ticks_diff(utime.ticks_us(), tstart) + except OSError: # Reboot remote. + break + await asyncio.sleep_ms(Initiator.t_poll) + self.block_max = max(self.block_max, t) # self measurement + self.block_cnt += 1 + self.block_sum += t + self.nboots += 1 + if self.cr_fail: + await self.cr_fail(*self.f_args) + if self.reset is None: # No means of recovery + raise OSError("Responder fail.") + + def _send(self, d): + # CRITICAL TIMING. Trigger interrupt on responder immediately before + # send. Send must start before RX begins. Fast responders may need to + # do a short blocking wait to guarantee this. + self.own(1) # Trigger interrupt. + self.i2c.send(d) # Blocks until RX complete. + self.waitfor(1) + self.own(0) + self.waitfor(0) + + # Send payload length (may be 0) then payload (if any) + def _sendrx(self, sn=bytearray(2), txnull=bytearray(2)): + siz = self.txsiz if self.cantx else txnull + if self.rxbyt: + siz[1] |= 0x80 # Hold off further received data + else: + siz[1] &= 0x7F + self._send(siz) + if self.txbyt and self.cantx: + self._send(self.txbyt) + self._txdone() # Invalidate source + # Send complete + self.waitfor(1) # Wait for responder to request send + self.own(1) # Acknowledge + self.i2c.recv(sn) + self.waitfor(0) + self.own(0) + n = sn[0] + ((sn[1] & 0x7F) << 8) # no of bytes to receive + if n > self.rxbufsize: + raise ValueError("Receive data too large for buffer.") + self.cantx = not bool(sn[1] & 0x80) + if n: + self.waitfor(1) # Wait for responder to request send + self.own(1) # Acknowledge + mv = self.rx_mv[0:n] # mv is a memoryview instance + self.i2c.recv(mv) + self.waitfor(0) + self.own(0) + self._handle_rxd(mv) diff --git a/v3/as_drivers/i2c/i2c_esp.py b/v3/as_drivers/i2c/i2c_esp.py new file mode 100644 index 0000000..d61d50f --- /dev/null +++ b/v3/as_drivers/i2c/i2c_esp.py @@ -0,0 +1,71 @@ +# i2c_esp.py Test program for asi2c.py. Adapted for uasyncio V3, WBUS DIP28. +# Tests Responder on ESP8266. + +# The MIT License (MIT) +# +# Copyright (c) 2018-2020 Peter Hinch +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +import asyncio +from machine import Pin, I2C +from .asi2c import Responder +import ujson +import gc + +gc.collect() + +i2c = I2C(scl=Pin(0), sda=Pin(2)) # software I2C +syn = Pin(5) +ack = Pin(4) +chan = Responder(i2c, syn, ack) + + +async def receiver(): + sreader = asyncio.StreamReader(chan) + await chan.ready() + print("started") + for _ in range(5): # Test flow control + res = await sreader.readline() + print("Received", ujson.loads(res)) + await asyncio.sleep(4) + while True: + res = await sreader.readline() + print("Received", ujson.loads(res)) + + +async def sender(): + swriter = asyncio.StreamWriter(chan, {}) + txdata = [0, 0] + while True: + txdata[0] = gc.mem_free() + await swriter.awrite("".join((ujson.dumps(txdata), "\n"))) + txdata[1] += 1 + await asyncio.sleep_ms(1500) + gc.collect() + + +asyncio.create_task(receiver()) +try: + asyncio.run(sender()) +except KeyboardInterrupt: + print("Interrupted") +finally: + asyncio.new_event_loop() + chan.close() # for subsequent runs diff --git a/v3/as_drivers/i2c/i2c_init.py b/v3/as_drivers/i2c/i2c_init.py new file mode 100644 index 0000000..bdadc6b --- /dev/null +++ b/v3/as_drivers/i2c/i2c_init.py @@ -0,0 +1,87 @@ +# i2c_init.py Test program for asi2c.py. Adapted for uasyncio V3, WBUS DIP28. +# Tests Initiator on a Pyboard + +# The MIT License (MIT) +# +# Copyright (c) 2018-2020 Peter Hinch +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +import asyncio +from pyb import I2C # Only pyb supports slave mode +from machine import Pin +from .asi2c_i import Initiator +import ujson +import os + +i2c = I2C(2, mode=I2C.SLAVE) +syn = Pin("Y11") +ack = Pin("X6") +# Reset on Pyboard and ESP8266 is active low. Use 200ms pulse. +rst = (Pin("Y12"), 0, 200) +chan = Initiator(i2c, syn, ack, rst) +if os.uname().machine.split(" ")[0][:4] == "PYBD": + Pin.board.EN_3V3.value(1) + + +async def receiver(): + sreader = asyncio.StreamReader(chan) + for _ in range(5): # Test flow control + res = await sreader.readline() + print("Received", ujson.loads(res)) + await asyncio.sleep(4) + while True: + res = await sreader.readline() + print("Received", ujson.loads(res)) + + +async def sender(): + swriter = asyncio.StreamWriter(chan, {}) + txdata = [0, 0] + await swriter.awrite("".join((ujson.dumps("this is a test 1"), "\n"))) + await swriter.awrite("".join((ujson.dumps("this is a test 2"), "\n"))) + await swriter.awrite("".join((ujson.dumps("this is a test 3"), "\n"))) + while True: + await swriter.awrite("".join((ujson.dumps(txdata), "\n"))) + txdata[0] += 1 + await asyncio.sleep_ms(800) + + +async def test(): + asyncio.create_task(receiver()) + asyncio.create_task(sender()) + while True: + await chan.ready() + await asyncio.sleep(10) + if chan.block_cnt: + print( + "Blocking time {:d}μs max. {:d}μs mean.".format( + chan.block_max, int(chan.block_sum / chan.block_cnt) + ) + ) + print("Reboots: ", chan.nboots) + + +try: + asyncio.run(test()) +except KeyboardInterrupt: + print("Interrupted") +finally: + asyncio.new_event_loop() + chan.close() # for subsequent runs diff --git a/v3/as_drivers/i2c/i2c_resp.py b/v3/as_drivers/i2c/i2c_resp.py new file mode 100644 index 0000000..b3bf799 --- /dev/null +++ b/v3/as_drivers/i2c/i2c_resp.py @@ -0,0 +1,70 @@ +# i2c_resp.py Test program for asi2c.py. Adapted for uasyncio V3, WBUS DIP28. +# Tests Responder on a Pyboard. + +# The MIT License (MIT) +# +# Copyright (c) 2018-2020 Peter Hinch +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +import asyncio +from machine import Pin, I2C +from .asi2c import Responder +import ujson +import os + +i2c = I2C(2) +# i2c = I2C(scl=Pin('Y9'),sda=Pin('Y10')) # software I2C +syn = Pin("Y11") +ack = Pin("X6") +chan = Responder(i2c, syn, ack) +if os.uname().machine.split(" ")[0][:4] == "PYBD": + Pin.board.EN_3V3.value(1) + + +async def receiver(): + sreader = asyncio.StreamReader(chan) + await chan.ready() + print("started") + for _ in range(5): # Test flow control + res = await sreader.readline() + print("Received", ujson.loads(res)) + await asyncio.sleep(4) + while True: + res = await sreader.readline() + print("Received", ujson.loads(res)) + + +async def sender(): + swriter = asyncio.StreamWriter(chan, {}) + txdata = [0, 0] + while True: + await swriter.awrite("".join((ujson.dumps(txdata), "\n"))) + txdata[1] += 1 + await asyncio.sleep_ms(1500) + + +asyncio.create_task(receiver()) +try: + asyncio.run(sender()) +except KeyboardInterrupt: + print("Interrupted") +finally: + asyncio.new_event_loop() + chan.close() # for subsequent runs diff --git a/v3/as_drivers/metrics/__init__.py b/v3/as_drivers/metrics/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/v3/as_drivers/metrics/metrics.py b/v3/as_drivers/metrics/metrics.py new file mode 100644 index 0000000..3c62db0 --- /dev/null +++ b/v3/as_drivers/metrics/metrics.py @@ -0,0 +1,49 @@ +# metrics.py Check on scheduling performance of an application +# Released under the MIT licence +# Copyright (c) Peter Hinch 2020 + +import asyncio +import gc +from utime import ticks_us, ticks_diff + + +def metrics(): + ncalls = 0 + max_d = 0 + min_d = 100_000_000 + tot_d = 0 + st = "Max {}μs Min {}μs Avg {}μs No. of calls {} Freq {}" + + async def func(): + nonlocal ncalls, max_d, min_d, tot_d + while True: + tstart = ticks_us() + t_last = None + while ticks_diff(t := ticks_us(), tstart) < 10_000_000: + await asyncio.sleep(0) + if ncalls: + dt = ticks_diff(t, t_last) + max_d = max(max_d, dt) + min_d = min(min_d, dt) + tot_d += dt + ncalls += 1 + t_last = t + print(st.format(max_d, min_d, tot_d // ncalls, ncalls, ncalls // 10)) + gc.collect() + print("mem free", gc.mem_free()) + ncalls = 0 + max_d = 0 + min_d = 100_000_000 + tot_d = 0 + + return func + + +# Example of call +async def main(): + asyncio.create_task(metrics()()) # Note the syntax + while True: + await asyncio.sleep(0) + + +# asyncio.run(main()) diff --git a/v3/as_drivers/nec_ir/__init__.py b/v3/as_drivers/nec_ir/__init__.py new file mode 100644 index 0000000..54209f0 --- /dev/null +++ b/v3/as_drivers/nec_ir/__init__.py @@ -0,0 +1 @@ +from .aremote import * diff --git a/nec_ir/aremote.py b/v3/as_drivers/nec_ir/aremote.py similarity index 75% rename from nec_ir/aremote.py rename to v3/as_drivers/nec_ir/aremote.py index 09cfa58..ed152a5 100644 --- a/nec_ir/aremote.py +++ b/v3/as_drivers/nec_ir/aremote.py @@ -5,16 +5,19 @@ # Copyright Peter Hinch 2017 Released under the MIT license from sys import platform -import uasyncio as asyncio -from asyn import Event +import asyncio +from primitives.message import Message from micropython import const from array import array -from utime import ticks_us, ticks_diff -if platform == 'pyboard': +from utime import ticks_ms, ticks_us, ticks_diff + +if platform == "pyboard": from pyb import Pin, ExtInt else: from machine import Pin +ESP32 = platform == "esp32" or platform == "esp32_LoBo" + # Save RAM # from micropython import alloc_emergency_exception_buf # alloc_emergency_exception_buf(100) @@ -37,30 +40,28 @@ # exceed the worst case block transmission time, but (with asyncio latency) be # less than the interval between a block start and a repeat code start (108ms) # Value of 73 allows for up to 35ms latency. -class NEC_IR(): +class NEC_IR: def __init__(self, pin, callback, extended, *args): # Optional args for callback - self._ev_start = Event() + self._ev_start = Message() self._callback = callback self._extended = extended self._addr = 0 self.block_time = 80 if extended else 73 # Allow for some tx tolerance (?) self._args = args - self._times = array('i', (0 for _ in range(_EDGECOUNT + 1))) # +1 for overrun - if platform == 'pyboard': + self._times = array("i", (0 for _ in range(_EDGECOUNT + 1))) # +1 for overrun + if platform == "pyboard": ExtInt(pin, ExtInt.IRQ_RISING_FALLING, Pin.PULL_NONE, self._cb_pin) - else: - pin.irq(handler = self._cb_pin, trigger = (Pin.IRQ_FALLING | Pin.IRQ_RISING), hard = True) + else: # PR5962 ESP8266 hard IRQ's not supported + pin.irq(handler=self._cb_pin, trigger=(Pin.IRQ_FALLING | Pin.IRQ_RISING)) self._edge = 0 self._ev_start.clear() - loop = asyncio.get_event_loop() - loop.create_task(self._run()) + asyncio.create_task(self._run()) async def _run(self): - loop = asyncio.get_event_loop() while True: await self._ev_start # Wait until data collection has started # Compensate for asyncio latency - latency = ticks_diff(loop.time(), self._ev_start.value()) + latency = ticks_diff(ticks_ms(), self._ev_start.value()) await asyncio.sleep_ms(self.block_time - latency) # Data block should have ended self._decode() # decode, clear event, prepare for new rx, call cb @@ -70,8 +71,7 @@ def _cb_pin(self, line): # On overrun ignore pulses until software timer times out if self._edge <= _EDGECOUNT: # Allow 1 extra pulse to record overrun if not self._ev_start.is_set(): # First edge received - loop = asyncio.get_event_loop() - self._ev_start.set(loop.time()) # asyncio latency compensation + self._ev_start.set(ticks_ms()) # asyncio latency compensation self._times[self._edge] = t self._edge += 1 @@ -82,7 +82,7 @@ def _decode(self): width = ticks_diff(self._times[1], self._times[0]) if width > 4000: # 9ms leading mark for all valid data width = ticks_diff(self._times[2], self._times[1]) - if width > 3000: # 4.5ms space for normal data + if width > 3000: # 4.5ms space for normal data if self._edge < _EDGECOUNT: # Haven't received the correct number of edges val = BADBLOCK @@ -95,19 +95,19 @@ def _decode(self): val >>= 1 if ticks_diff(self._times[edge + 1], self._times[edge]) > 1120: val |= 0x80000000 - elif width > 1700: # 2.5ms space for a repeat code. Should have exactly 4 edges. + elif width > 1700: # 2.5ms space for a repeat code. Should have exactly 4 edges. val = REPEAT if self._edge == 4 else BADREP addr = 0 if val >= 0: # validate. Byte layout of val ~cmd cmd ~addr addr - addr = val & 0xff - cmd = (val >> 16) & 0xff - if addr == ((val >> 8) ^ 0xff) & 0xff: # 8 bit address OK - val = cmd if cmd == (val >> 24) ^ 0xff else BADDATA + addr = val & 0xFF + cmd = (val >> 16) & 0xFF + if addr == ((val >> 8) ^ 0xFF) & 0xFF: # 8 bit address OK + val = cmd if cmd == (val >> 24) ^ 0xFF else BADDATA self._addr = addr else: - addr |= val & 0xff00 # pass assumed 16 bit address to callback + addr |= val & 0xFF00 # pass assumed 16 bit address to callback if self._extended: - val = cmd if cmd == (val >> 24) ^ 0xff else BADDATA + val = cmd if cmd == (val >> 24) ^ 0xFF else BADDATA self._addr = addr else: val = BADADDR diff --git a/v3/as_drivers/nec_ir/art.py b/v3/as_drivers/nec_ir/art.py new file mode 100644 index 0000000..e8e5481 --- /dev/null +++ b/v3/as_drivers/nec_ir/art.py @@ -0,0 +1,63 @@ +# art.py Test program for IR remote control decoder aremote.py +# Supports Pyboard and ESP8266 + +# Author: Peter Hinch +# Copyright Peter Hinch 2017 Released under the MIT license + +# Run this to characterise a remote. +# import as_drivers.nec_ir.art + +from sys import platform +import asyncio + +ESP32 = platform == "esp32" or platform == "esp32_LoBo" + +if platform == "pyboard": + from pyb import Pin +elif platform == "esp8266" or ESP32: + from machine import Pin, freq +else: + print("Unsupported platform", platform) + +from .aremote import * + +errors = { + BADSTART: "Invalid start pulse", + BADBLOCK: "Error: bad block", + BADREP: "Error: repeat", + OVERRUN: "Error: overrun", + BADDATA: "Error: invalid data", + BADADDR: "Error: invalid address", +} + + +def cb(data, addr): + if data == REPEAT: + print("Repeat") + elif data >= 0: + print(hex(data), hex(addr)) + else: + print("{} Address: {}".format(errors[data], hex(addr))) + + +def test(): + print("Test for IR receiver. Assumes NEC protocol.") + print("ctrl-c to stop.") + if platform == "pyboard": + p = Pin("X3", Pin.IN) + elif platform == "esp8266": + freq(160000000) + p = Pin(13, Pin.IN) + elif ESP32: + p = Pin(23, Pin.IN) + ir = NEC_IR(p, cb, True) # Assume r/c uses extended addressing + loop = asyncio.get_event_loop() + try: + loop.run_forever() + except KeyboardInterrupt: + print("Interrupted") + finally: + asyncio.new_event_loop() # Still need ctrl-d because of interrupt vector + + +test() diff --git a/nec_ir/art1.py b/v3/as_drivers/nec_ir/art1.py similarity index 54% rename from nec_ir/art1.py rename to v3/as_drivers/nec_ir/art1.py index a11beb3..853e97d 100644 --- a/nec_ir/art1.py +++ b/v3/as_drivers/nec_ir/art1.py @@ -9,47 +9,61 @@ # your needs. from sys import platform -import uasyncio as asyncio -if platform == 'pyboard': +import asyncio + +ESP32 = platform == "esp32" or platform == "esp32_LoBo" +if platform == "pyboard": from pyb import Pin, LED -elif platform == 'esp8266': +elif platform == "esp8266" or ESP32: from machine import Pin, freq else: - print('Unsupported platform', platform) + print("Unsupported platform", platform) from aremote import NEC_IR, REPEAT + def cb(data, addr, led): if addr == 0x40: # Adapt for your remote if data == 1: # Button 1. Adapt for your remote/buttons - print('LED on') - if platform == 'pyboard': + print("LED on") + if platform == "pyboard": led.on() else: led(0) elif data == 2: - print('LED off') - if platform == 'pyboard': + print("LED off") + if platform == "pyboard": led.off() else: led(1) elif data < REPEAT: - print('Bad IR data') + print("Bad IR data") else: - print('Incorrect remote') + print("Incorrect remote") + def test(): - print('Test for IR receiver. Assumes NEC protocol. Turn LED on or off.') - if platform == 'pyboard': - p = Pin('X3', Pin.IN) + print("Test for IR receiver. Assumes NEC protocol. Turn LED on or off.") + if platform == "pyboard": + p = Pin("X3", Pin.IN) led = LED(2) - elif platform == 'esp8266': + elif platform == "esp8266": freq(160000000) p = Pin(13, Pin.IN) led = Pin(2, Pin.OUT) led(1) + elif ESP32: + p = Pin(23, Pin.IN) + led = Pin(21, Pin.OUT) # LED with 220Ω series resistor between 3.3V and pin 21 + led(1) ir = NEC_IR(p, cb, True, led) # Assume extended address mode r/c loop = asyncio.get_event_loop() - loop.run_forever() + try: + loop.run_forever() + except KeyboardInterrupt: + print("Interrupted") + finally: + asyncio.new_event_loop() # Still need ctrl-d because of interrupt vector + test() diff --git a/v3/as_drivers/sched/__init__.py b/v3/as_drivers/sched/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/v3/as_drivers/sched/asynctest.py b/v3/as_drivers/sched/asynctest.py new file mode 100644 index 0000000..0b1a282 --- /dev/null +++ b/v3/as_drivers/sched/asynctest.py @@ -0,0 +1,40 @@ +# asynctest.py Demo of asynchronous code scheduling tasks with cron + +# Copyright (c) 2020 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +import asyncio +from sched.sched import schedule +from time import localtime + + +def foo(txt): # Demonstrate callback + yr, mo, md, h, m, s, wd = localtime()[:7] + fst = "Callback {} {:02d}:{:02d}:{:02d} on {:02d}/{:02d}/{:02d}" + print(fst.format(txt, h, m, s, md, mo, yr)) + + +async def bar(txt): # Demonstrate coro launch + yr, mo, md, h, m, s, wd = localtime()[:7] + fst = "Coroutine {} {:02d}:{:02d}:{:02d} on {:02d}/{:02d}/{:02d}" + print(fst.format(txt, h, m, s, md, mo, yr)) + await asyncio.sleep(0) + + +async def main(): + print("Asynchronous test running...") + asyncio.create_task(schedule(foo, "every 4 mins", hrs=None, mins=range(0, 60, 4))) + + asyncio.create_task(schedule(foo, "every 5 mins", hrs=None, mins=range(0, 60, 5))) + + # Launch a coroutine + asyncio.create_task(schedule(bar, "every 3 mins", hrs=None, mins=range(0, 60, 3))) + + asyncio.create_task(schedule(foo, "one shot", hrs=None, mins=range(0, 60, 2), times=1)) + await asyncio.sleep(900) # Quit after 15 minutes + + +try: + asyncio.run(main()) +finally: + _ = asyncio.new_event_loop() diff --git a/v3/as_drivers/sched/cron.py b/v3/as_drivers/sched/cron.py new file mode 100644 index 0000000..0d853b1 --- /dev/null +++ b/v3/as_drivers/sched/cron.py @@ -0,0 +1,118 @@ +# cron.py + +# Copyright (c) 2020-2023 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +# A cron is instantiated with sequence specifier args. An instance accepts an integer time +# value (in secs since epoch) and returns the number of seconds to wait for a matching time. +# It holds no state. +# See docs for restrictions and limitations. + +from time import mktime, localtime +# Validation +_valid = ((0, 59, 'secs'), (0, 59, 'mins'), (0, 23, 'hrs'), + (1, 31, 'mday'), (1, 12, 'month'), (0, 6, 'wday')) +_mdays = {2:28, 4:30, 6:30, 9:30, 11:30} +# A call to the inner function takes 270-520μs on Pyboard depending on args +def cron(*, secs=0, mins=0, hrs=3, mday=None, month=None, wday=None): + # Given an arg and current value, return offset between arg and cv + # If arg is iterable return offset of next arg +ve for future -ve for past (add modulo) + def do_arg(a, cv): # Arg, current value + if a is None: + return 0 + elif isinstance(a, int): + return a - cv + try: + return min(x for x in a if x >= cv) - cv + except ValueError: # wrap-round + return min(a) - cv # -ve + except TypeError: + raise ValueError('Invalid argument type', type(a)) + + if secs is None: # Special validation for seconds + raise ValueError('Invalid None value for secs') + if not isinstance(secs, int) and len(secs) > 1: # It's an iterable + ss = sorted(secs) + if min((a[1] - a[0] for a in zip(ss, ss[1:]))) < 10: + raise ValueError("Seconds values must be >= 10s apart.") + args = (secs, mins, hrs, mday, month, wday) # Validation for all args + valid = iter(_valid) + vestr = 'Argument {} out of range' + vmstr = 'Invalid no. of days for month' + for arg in args: # Check for illegal arg values + lower, upper, errtxt = next(valid) + if isinstance(arg, int): + if not lower <= arg <= upper: + raise ValueError(vestr.format(errtxt)) + elif arg is not None: # Must be an iterable + if any(v for v in arg if not lower <= v <= upper): + raise ValueError(vestr.format(errtxt)) + if mday is not None and month is not None: # Check mday against month + max_md = mday if isinstance(mday, int) else max(mday) + if isinstance(month, int): + if max_md > _mdays.get(month, 31): + raise ValueError(vmstr) + elif sum((m for m in month if max_md > _mdays.get(m, 31))): + raise ValueError(vmstr) + if mday is not None and wday is not None and do_arg(mday, 23) > 0: + raise ValueError('mday must be <= 22 if wday also specified.') + + def inner(tnow): + tev = tnow # Time of next event: work forward from time now + yr, mo, md, h, m, s, wd = localtime(tev)[:7] + init_mo = mo # Month now + toff = do_arg(secs, s) + tev += toff if toff >= 0 else 60 + toff + + yr, mo, md, h, m, s, wd = localtime(tev)[:7] + toff = do_arg(mins, m) + tev += 60 * (toff if toff >= 0 else 60 + toff) + + yr, mo, md, h, m, s, wd = localtime(tev)[:7] + toff = do_arg(hrs, h) + tev += 3600 * (toff if toff >= 0 else 24 + toff) + + yr, mo, md, h, m, s, wd = localtime(tev)[:7] + toff = do_arg(month, mo) + mo += toff + md = md if mo == init_mo else 1 + if toff < 0: + yr += 1 + tev = mktime((yr, mo, md, h, m, s, wd, 0)) + yr, mo, md, h, m, s, wd = localtime(tev)[:7] + if mday is not None: + if mo == init_mo: # Month has not rolled over or been changed + toff = do_arg(mday, md) # see if mday causes rollover + md += toff + if toff < 0: + toff = do_arg(month, mo + 1) # Get next valid month + mo += toff + 1 # Offset is relative to next month + if toff < 0: + yr += 1 + else: # Month has rolled over: day is absolute + md = do_arg(mday, 0) + + if wday is not None: + if mo == init_mo: + toff = do_arg(wday, wd) + md += toff % 7 # mktime handles md > 31 but month may increment + tev = mktime((yr, mo, md, h, m, s, wd, 0)) + cur_mo = mo + mo = localtime(tev)[1] # get month + if mo != cur_mo: + toff = do_arg(month, mo) # Get next valid month + mo += toff # Offset is relative to new, incremented month + if toff < 0: + yr += 1 + tev = mktime((yr, mo, 1, h, m, s, wd, 0)) # 1st of new month + yr, mo, md, h, m, s, wd = localtime(tev)[:7] # get day of week + toff = do_arg(wday, wd) + md += toff % 7 + else: + md = 1 if mday is None else md + tev = mktime((yr, mo, md, h, m, s, wd, 0)) # 1st of new month + yr, mo, md, h, m, s, wd = localtime(tev)[:7] # get day of week + md += (do_arg(wday, 0) - wd) % 7 + + return mktime((yr, mo, md, h, m, s, wd, 0)) - tnow + return inner diff --git a/v3/as_drivers/sched/crontest.py b/v3/as_drivers/sched/crontest.py new file mode 100644 index 0000000..ea91b2c --- /dev/null +++ b/v3/as_drivers/sched/crontest.py @@ -0,0 +1,106 @@ +# crontest.py Now works under Unix build + +# Copyright (c) 2020-2023 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +from time import time, ticks_diff, ticks_us, localtime, mktime +from sched.cron import cron +import sys + +maxruntime = 0 +fail = 0 + +# Args: +# ts Time of run in secs since epoch +# exp Expected absolute end time (yr, mo, md, h, m, s) +# msg Message describing test +# kwargs are args for cron +def test(ts, exp, msg, *, secs=0, mins=0, hrs=3, mday=None, month=None, wday=None): + global maxruntime, fail + texp = mktime(exp + (0, 0)) # Expected absolute end time + yr, mo, md, h, m, s, wd = localtime(texp)[:7] + print(f"Test: {msg}") + print(f"Expected endtime: {h:02d}:{m:02d}:{s:02d} on {md:02d}/{mo:02d}/{yr:02d}") + + cg = cron(secs=secs, mins=mins, hrs=hrs, mday=mday, month=month, wday=wday) + start = ticks_us() + t = cg(ts) # Wait duration returned by cron (secs) + delta = ticks_diff(ticks_us(), start) + maxruntime = max(maxruntime, delta) + yr, mo, md, h, m, s, wd = localtime(t + ts)[:7] # Get absolute time from cron + print(f"Endtime from cron: {h:02d}:{m:02d}:{s:02d} on {md:02d}/{mo:02d}/{yr:02d}") + if t == texp - ts: + print(f"PASS") + else: + print(f"FAIL [{t}]") + fail += 1 + print(f"Runtime = {delta}us\n") + + +now = mktime((2020, 7, 30, 3, 0, 0, 0, 0)) # 3am Thursday (day 3) 30 July 2020 + +exp = (2020, 7, 31, 1, 5, 0) # Expect 01:05:00 on 31/07/2020 +msg = "wday and time both cause 1 day increment." +test(now, exp, msg, wday=4, hrs=(1, 2), mins=5) + +exp = (2020, 7, 31, 1, 5, 0) # 01:05:00 on 31/07/2020 +msg = "time causes 1 day increment." +test(now, exp, msg, hrs=(1, 2), mins=5) + +exp = (2020, 7, 31, 3, 5, 0) # 03:05:00 on 31/07/2020 +msg = "wday causes 1 day increment." +test(now, exp, msg, wday=4, mins=5) + +exp = (2020, 7, 30, 5, 58, 0) # 05:58:00 on 30/07/2020 +msg = "time increment no day change." +test(now, exp, msg, hrs=(5, 23), mins=58) + +exp = (2021, 1, 3, 3, 0, 0) # 03:00:00 on 03/01/2021 +msg = "month and year rollover, 1st Sunday" +test(now, exp, msg, month=1, wday=6) + +exp = (2021, 2, 20, 3, 0, 0) # 03:00:00 on 20/02/2021 +msg = "month and year rollover, mday->20 Feb" +test(now, exp, msg, month=2, mday=20) + +exp = (2020, 12, 25, 1, 30, 0) # 01:30:00 on 25/12/2020 +msg = "Forward to Xmas day, hrs backwards" +test(now, exp, msg, month=12, mday=25, hrs=1, mins=30) + +exp = (2020, 12, 6, 23, 5, 15) # 23:05:15 on 06/12/2020 +msg = "1st Sunday in Dec 2020" +test(now, exp, msg, month=12, wday=6, hrs=23, mins=5, secs=15) + +exp = (2020, 10, 1, 3, 0, 0) # 03:00:00 on 01/10/2020 +msg = "Current time on 1st Oct 2020" +test(now, exp, msg, month=10) + +exp = (2020, 7, 30, 9, 0, 0) # 09:00:00 on 30/07/2020 +msg = "Explicitly specify current month" +test(now, exp, msg, month=7, hrs=9) + +exp = (2021, 2, 14, 3, 0, 0) # 03:00:00 on 14/02/2021 +msg = "Second Sunday in February 2021" +test(now, exp, msg, month=2, mday=8, wday=6) + +exp = (2021, 2, 28, 3, 0, 0) # 03:00:00 on 28/02/2021 +msg = "Fourth Sunday in February 2021" +test(now, exp, msg, month=2, mday=22, wday=6) # month end + +exp = (2020, 10, 1, 1, 59, 0) # 01:59:00 on 01/10/2020 +msg = "Time causes month rollover to next legal month" +test(now + 24 * 3600, exp, msg, month=(7, 10), hrs=1, mins=59) + +exp = (2021, 1, 1, 3, 0, 0) # 03:00:00 on 01/01/2021 +msg = "mday causes month rollover to next year" +test(now, exp, msg, month=(7, 1), mday=1) + +exp = (2021, 3, 3, 3, 0, 0) # 03:00:00 on 03/03/2021 +msg = "wday causes month rollover to next year" +test(now, exp, msg, month=(7, 3), wday=(2, 6)) + +print(f"Max runtime {maxruntime}us") +if fail: + print(fail, "FAILURES OCCURRED") +else: + print("ALL TESTS PASSED") diff --git a/v3/as_drivers/sched/package.json b/v3/as_drivers/sched/package.json new file mode 100644 index 0000000..c862aae --- /dev/null +++ b/v3/as_drivers/sched/package.json @@ -0,0 +1,13 @@ +{ + "urls": [ + ["sched/primitives/__init__.py", "github:peterhinch/micropython-async/v3/as_drivers/sched/primitives/__init__.py"], + ["sched/__init__.py", "github:peterhinch/micropython-async/v3/as_drivers/sched/__init__.py"], + ["sched/asynctest.py", "github:peterhinch/micropython-async/v3/as_drivers/sched/asynctest.py"], + ["sched/cron.py", "github:peterhinch/micropython-async/v3/as_drivers/sched/cron.py"], + ["sched/crontest.py", "github:peterhinch/micropython-async/v3/as_drivers/sched/crontest.py"], + ["sched/sched.py", "github:peterhinch/micropython-async/v3/as_drivers/sched/sched.py"], + ["sched/simulate.py", "github:peterhinch/micropython-async/v3/as_drivers/sched/simulate.py"], + ["sched/synctest.py", "github:peterhinch/micropython-async/v3/as_drivers/sched/synctest.py"] + ], + "version": "0.1" +} diff --git a/v3/as_drivers/sched/primitives/__init__.py b/v3/as_drivers/sched/primitives/__init__.py new file mode 100644 index 0000000..0a824d5 --- /dev/null +++ b/v3/as_drivers/sched/primitives/__init__.py @@ -0,0 +1,33 @@ +# __init__.py Common functions for uasyncio primitives + +# Copyright (c) 2018-2020 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +import asyncio + + +async def _g(): + pass + + +type_coro = type(_g()) + +# If a callback is passed, run it and return. +# If a coro is passed initiate it and return. +# coros are passed by name i.e. not using function call syntax. +def launch(func, tup_args): + res = func(*tup_args) + if isinstance(res, type_coro): + res = asyncio.create_task(res) + return res + + +def set_global_exception(): + def _handle_exception(loop, context): + import sys + + sys.print_exception(context["exception"]) + sys.exit() + + loop = asyncio.get_event_loop() + loop.set_exception_handler(_handle_exception) diff --git a/v3/as_drivers/sched/sched.py b/v3/as_drivers/sched/sched.py new file mode 100644 index 0000000..99a6651 --- /dev/null +++ b/v3/as_drivers/sched/sched.py @@ -0,0 +1,65 @@ +# sched.py + +# Copyright (c) 2020-2023 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +import asyncio +from sched.primitives import launch +from time import time, mktime, localtime +from sched.cron import cron + + +# uasyncio can't handle long delays so split into 1000s (1e6 ms) segments +_MAXT = const(1000) +# Wait prior to a sequence start: see +# https://github.com/peterhinch/micropython-async/blob/master/v3/docs/SCHEDULE.md#71-initialisation +_PAUSE = const(2) + + +class Sequence: # Enable asynchronous iterator interface + def __init__(self): + self._evt = asyncio.Event() + self._args = None + + def __aiter__(self): + return self + + async def __anext__(self): + await self._evt.wait() + self._evt.clear() + return self._args + + def trigger(self, args): + self._args = args + self._evt.set() + + +async def schedule(func, *args, times=None, **kwargs): + async def long_sleep(t): # Sleep with no bounds. Immediate return if t < 0. + while t > 0: + await asyncio.sleep(min(t, _MAXT)) + t -= _MAXT + + tim = mktime(localtime()[:3] + (0, 0, 0, 0, 0)) # Midnight last night + now = round(time()) # round() is for Unix + fcron = cron(**kwargs) # Cron instance for search. + while tim < now: # Find first future trigger in sequence + # Defensive. fcron should never return 0, but if it did the loop would never quit + tim += max(fcron(tim), 1) + # Wait until just before the first future trigger + await long_sleep(tim - now - _PAUSE) # Time to wait (can be < 0) + + while times is None or times > 0: # Until all repeats are done (or forever). + tw = fcron(round(time())) # Time to wait (s) (fcron is stateless). + await long_sleep(tw) + res = None + if isinstance(func, asyncio.Event): + func.set() + elif isinstance(func, Sequence): + func.trigger(args) + else: + res = launch(func, args) + if times is not None: + times -= 1 + await asyncio.sleep_ms(1200) # ensure we're into next second + return res diff --git a/v3/as_drivers/sched/simulate.py b/v3/as_drivers/sched/simulate.py new file mode 100644 index 0000000..5bf4fb5 --- /dev/null +++ b/v3/as_drivers/sched/simulate.py @@ -0,0 +1,40 @@ +# simulate.py Adapt this to simulate scheduled sequences + +from time import localtime, mktime +from sched.cron import cron + +days = ("Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday") +tim = 0 # Global time in secs + +def print_time(msg=""): + yr, mo, md, h, m, s, wd = localtime(tim)[:7] + print(f"{msg} {h:02d}:{m:02d}:{s:02d} on {days[wd]} {md:02d}/{mo:02d}/{yr:02d}") + +def wait(cr): # Simulate waiting on a cron instance + global tim + tim += 2 # Must always wait >=2s before calling cron again + dt = cr(tim) + hrs, m_s = divmod(dt + 2, 3600) # For neat display add back the 2 secs + mins, secs = divmod(m_s, 60) + print(f"Wait {hrs}hrs {mins}mins {secs}s") + tim += dt + print_time("Time now:") + +def set_time(y, month, mday, hrs, mins, secs): + global tim + tim = mktime((y, month, mday, hrs, mins, secs, 0, 0)) + print_time("Start at:") + +# Adapt the following to emulate the proposed application. Cron args +# secs=0, mins=0, hrs=3, mday=None, month=None, wday=None + +def sim(*args): + set_time(*args) + cs = cron(hrs = 0, mins = 59) + wait(cs) + cn = cron(wday=(0, 5), hrs=(1, 10), mins = range(0, 60, 15)) + for _ in range(10): + wait(cn) + print("Run payload.\n") + +sim(2023, 3, 29, 15, 20, 0) # Start time: year, month, mday, hrs, mins, secs diff --git a/v3/as_drivers/sched/synctest.py b/v3/as_drivers/sched/synctest.py new file mode 100644 index 0000000..c4499b6 --- /dev/null +++ b/v3/as_drivers/sched/synctest.py @@ -0,0 +1,41 @@ +# synctest.py Demo of synchronous code scheduling tasks with cron + +# Copyright (c) 2020 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +from .cron import cron +from time import localtime, sleep, time + +def foo(txt): + yr, mo, md, h, m, s, wd = localtime()[:7] + fst = "{} {:02d}:{:02d}:{:02d} on {:02d}/{:02d}/{:02d}" + print(fst.format(txt, h, m, s, md, mo, yr)) + +def main(): + print('Synchronous test running...') + tasks = [] # Entries: cron, callback, args, one_shot + cron4 = cron(hrs=None, mins=range(0, 60, 4)) + tasks.append([cron4, foo, ('every 4 mins',), False, False]) + cron5 = cron(hrs=None, mins=range(0, 60, 5)) + tasks.append([cron5, foo, ('every 5 mins',), False, False]) + cron3 = cron(hrs=None, mins=range(0, 60, 3)) + tasks.append([cron3, foo, ('every 3 mins',), False, False]) + cron2 = cron(hrs=None, mins=range(0, 60, 2)) + tasks.append([cron2, foo, ('one shot',), True, False]) + to_run = [] + while True: + now = int(time()) # Ensure constant: get once per iteration. + tasks.sort(key=lambda x:x[0](now)) + to_run.clear() # Pending tasks + deltat = tasks[0][0](now) # Time to pending task(s) + for task in (t for t in tasks if t[0](now) == deltat): # Tasks with same delta t + to_run.append(task) + task[4] = True # Has been scheduled + # Remove on-shot tasks which have been scheduled + tasks = [t for t in tasks if not (t[3] and t[4])] + sleep(deltat) + for tsk in to_run: + tsk[1](*tsk[2]) + sleep(1.2) # Ensure seconds have rolled over + +main() diff --git a/syncom_as/main.py b/v3/as_drivers/syncom/main.py similarity index 100% rename from syncom_as/main.py rename to v3/as_drivers/syncom/main.py diff --git a/syncom_as/sr_init.py b/v3/as_drivers/syncom/sr_init.py similarity index 75% rename from syncom_as/sr_init.py rename to v3/as_drivers/syncom/sr_init.py index 8953751..6abb601 100644 --- a/syncom_as/sr_init.py +++ b/v3/as_drivers/syncom/sr_init.py @@ -25,37 +25,38 @@ # Run on Pyboard from machine import Pin, Signal from pyb import LED -import uasyncio as asyncio +import asyncio from utime import ticks_ms, ticks_diff from syncom import SynCom, SynComError async def initiator_task(channel): while True: - so = ['test', 0, 0] - for x in range(4): # Test full duplex by sending 4 in succession + so = ["test", 0, 0] + for x in range(4): # Test full duplex by sending 4 in succession so[1] = x channel.send(so) await asyncio.sleep_ms(0) - while True: # Receive the four responses + while True: # Receive the four responses si = await channel.await_obj() # Deal with queue if si is None: - print('Timeout: restarting.') + print("Timeout: restarting.") return - print('initiator received', si) - if si[1] == 3: # received last one + print("initiator received", si) + if si[1] == 3: # received last one break - while True: # At 2 sec intervals send an object and get response + while True: # At 2 sec intervals send an object and get response await asyncio.sleep(2) - print('sending', so) + print("sending", so) channel.send(so) tim = ticks_ms() so = await channel.await_obj() # wait for response duration = ticks_diff(ticks_ms(), tim) if so is None: - print('Timeout: restarting.') + print("Timeout: restarting.") return - print('initiator received', so, 'timing', duration) + print("initiator received", so, "timing", duration) + async def heartbeat(): led = LED(1) @@ -63,13 +64,14 @@ async def heartbeat(): await asyncio.sleep_ms(500) led.toggle() + def test(): - dout = Pin(Pin.board.Y5, Pin.OUT_PP, value = 0) # Define pins - ckout = Pin(Pin.board.Y6, Pin.OUT_PP, value = 0) # Don't assert clock until data is set + dout = Pin(Pin.board.Y5, Pin.OUT_PP, value=0) # Define pins + ckout = Pin(Pin.board.Y6, Pin.OUT_PP, value=0) # Don't assert clock until data is set din = Pin(Pin.board.Y7, Pin.IN) ckin = Pin(Pin.board.Y8, Pin.IN) reset = Pin(Pin.board.Y4, Pin.OPEN_DRAIN) - sig_reset = Signal(reset, invert = True) + sig_reset = Signal(reset, invert=True) channel = SynCom(False, ckin, ckout, din, dout, sig_reset, 10000) @@ -83,4 +85,5 @@ def test(): finally: ckout.value(0) + test() diff --git a/syncom_as/sr_passive.py b/v3/as_drivers/syncom/sr_passive.py similarity index 84% rename from syncom_as/sr_passive.py rename to v3/as_drivers/syncom/sr_passive.py index 652d8b5..334cba8 100644 --- a/syncom_as/sr_passive.py +++ b/v3/as_drivers/syncom/sr_passive.py @@ -23,19 +23,21 @@ # THE SOFTWARE. # Run on ESP8266 -import uasyncio as asyncio +import asyncio from syncom import SynCom from machine import Pin, freq import gc + async def passive_task(chan): while True: obj = await chan.await_obj() - if obj is not None: # Ignore timeouts -# print('passive received: ', obj) - obj[2] += 1 # modify object and send it back + if obj is not None: # Ignore timeouts + # print('passive received: ', obj) + obj[2] += 1 # modify object and send it back chan.send(obj) + async def heartbeat(): led = Pin(2, Pin.OUT) while True: @@ -43,10 +45,11 @@ async def heartbeat(): led(not led()) gc.collect() + def test(): freq(160000000) - dout = Pin(14, Pin.OUT, value = 0) # Define pins - ckout = Pin(15, Pin.OUT, value = 0) # clocks must be initialised to zero. + dout = Pin(14, Pin.OUT, value=0) # Define pins + ckout = Pin(15, Pin.OUT, value=0) # clocks must be initialised to zero. din = Pin(13, Pin.IN) ckin = Pin(12, Pin.IN) @@ -61,4 +64,5 @@ def test(): finally: ckout(0) + test() diff --git a/syncom_as/syncom.py b/v3/as_drivers/syncom/syncom.py similarity index 66% rename from syncom_as/syncom.py rename to v3/as_drivers/syncom/syncom.py index 4ecb489..209095c 100644 --- a/syncom_as/syncom.py +++ b/v3/as_drivers/syncom/syncom.py @@ -4,7 +4,7 @@ # The MIT License (MIT) # -# Copyright (c) 2017 Peter Hinch +# Copyright (c) 2017-2021 Peter Hinch # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -31,53 +31,62 @@ # Mean throughput running test programs 8.8ms per char (800bps). from utime import ticks_diff, ticks_ms -import uasyncio as asyncio +import asyncio from micropython import const +import ujson _BITS_PER_CH = const(7) _BITS_SYN = const(8) -_SYN = const(0x9d) +_SYN = const(0x9D) _RX_BUFLEN = const(100) + class SynComError(Exception): pass -class SynCom(object): - def __init__(self, passive, ckin, ckout, din, dout, sig_reset=None, - timeout=0, string_mode=False, verbose=True): + +class SynCom: + def __init__( + self, + passive, + ckin, + ckout, + din, + dout, + pin_reset=None, + timeout=0, + string_mode=False, + verbose=True, + ): # Signal unsupported on rp2 self.passive = passive self.string_mode = string_mode - if not string_mode: - global pickle - import pickle - self._running = False # _run coro is down + self._running = False # _run coro is down self._synchronised = False self.verbose = verbose - self.idstr = 'passive' if self.passive else 'initiator' + self.idstr = "passive" if self.passive else "initiator" - self.ckin = ckin # Interface pins + self.ckin = ckin # Interface pins self.ckout = ckout self.din = din self.dout = dout - self.sig_reset = sig_reset + self.pin_reset = pin_reset - self._timeout = timeout # In ms. 0 == No timeout. - self.lsttx = [] # Queue of strings to send - self.lstrx = [] # Queue of received strings + self._timeout = timeout # In ms. 0 == No timeout. + self.lsttx = [] # Queue of strings to send + self.lstrx = [] # Queue of received strings -# Start interface and initiate an optional user task. If a timeout and reset -# signal are specified and the target times out, the target is reset and the -# interface restarted. If a user task is provided, this must return if a -# timeout occurs (i.e. not running() or await_obj returns None). -# If it returns for other (error) reasons, a timeout event is forced. + # Start interface and initiate an optional user task. If a timeout and reset + # signal are specified and the target times out, the target is reset and the + # interface restarted. If a user task is provided, this must return if a + # timeout occurs (i.e. not running() or await_obj returns None). + # If it returns for other (error) reasons, a timeout event is forced. async def start(self, user_task=None, awaitable=None): - loop = asyncio.get_event_loop() while True: - if not self._running: # Restarting - self.lstrx = [] # Clear down queues + if not self._running: # Restarting + self.lstrx = [] # Clear down queues self.lsttx = [] self._synchronised = False - loop.create_task(self._run()) # Reset target (if possible) + asyncio.create_task(self._run()) # Reset target (if possible) while not self._synchronised: # Wait for sync await asyncio.sleep_ms(100) if user_task is None: @@ -88,76 +97,72 @@ async def start(self, user_task=None, awaitable=None): # If it quit for other reasons force a t/o exception self.stop() await asyncio.sleep_ms(0) - if awaitable is not None: # User code may use an ExitGate - await awaitable # to ensure all coros have quit + if awaitable is not None: + await awaitable() # Optional user coro -# Can be used to force a failure + # Can be used to force a failure def stop(self): self._running = False self.dout(0) self.ckout(0) -# Queue an object for tx. Convert to string NOW: snapshot of current -# object state + # Queue an object for tx. Convert to string NOW: snapshot of current + # object state def send(self, obj): if self.string_mode: self.lsttx.append(obj) # strings are immutable else: - self.lsttx.append(pickle.dumps(obj)) + self.lsttx.append(ujson.dumps(obj)) -# Number of queued objects (None on timeout) + # Number of queued objects (None on timeout) def any(self): if self._running: return len(self.lstrx) -# Wait for an object. Return None on timeout. -# If in string mode returns a string (or None on t/o) + # Wait for an object. Return None on timeout. + # If in string mode returns a string (or None on t/o) async def await_obj(self, t_ms=10): while self._running: await asyncio.sleep_ms(t_ms) if len(self.lstrx): return self.lstrx.pop(0) -# running() is False if the target has timed out. + # running() is False if the target has timed out. def running(self): return self._running -# Private methods - def _vbprint(self, *args): - if self.verbose: - print(*args) - + # Private methods async def _run(self): - self.indata = 0 # Current data bits + self.indata = 0 # Current data bits self.inbits = 0 self.odata = _SYN - self.phase = 0 # Interface initial conditions + self.phase = 0 # Interface initial conditions if self.passive: self.dout(0) self.ckout(0) else: self.dout(self.odata & 1) self.ckout(1) - self.odata >>= 1 # we've sent that bit + self.odata >>= 1 # we've sent that bit self.phase = 1 - if self.sig_reset is not None: - self._vbprint(self.idstr, ' resetting target...') - self.sig_reset.on() + if self.pin_reset is not None: + self.verbose and print(self.idstr, " resetting target...") + self.pin_reset(0) await asyncio.sleep_ms(100) - self.sig_reset.off() + self.pin_reset(1) await asyncio.sleep(1) # let target settle down - self._vbprint(self.idstr, ' awaiting sync...') + self.verbose and print(self.idstr, " awaiting sync...") try: - self._running = True # False on failure: can be cleared by other tasks + self._running = True # False on failure: can be cleared by other tasks while self.indata != _SYN: # Don't hog CPU while waiting for start await self._synchronise() self._synchronised = True - self._vbprint(self.idstr, ' synchronised.') + self.verbose and print(self.idstr, " synchronised.") - sendstr = '' # string for transmission - send_idx = None # character index. None: no current string - getstr = '' # receive string + sendstr = "" # string for transmission + send_idx = None # character index. None: no current string + getstr = "" # receive string rxbuf = bytearray(_RX_BUFLEN) rxidx = 0 while True: @@ -179,28 +184,28 @@ async def _run(self): await self._get_byte_active() if self.indata: # Optimisation: buffer reduces allocations. if rxidx >= _RX_BUFLEN: # Buffer full: append to string. - getstr = ''.join((getstr, bytes(rxbuf).decode())) + getstr = "".join((getstr, bytes(rxbuf).decode())) rxidx = 0 rxbuf[rxidx] = self.indata rxidx += 1 elif rxidx or len(getstr): # Got 0 but have data so string is complete. - # Append buffer. - getstr = ''.join((getstr, bytes(rxbuf[:rxidx]).decode())) + # Append buffer. + getstr = "".join((getstr, bytes(rxbuf[:rxidx]).decode())) if self.string_mode: self.lstrx.append(getstr) else: try: - self.lstrx.append(pickle.loads(getstr)) - except: # Pickle fail means target has crashed + self.lstrx.append(ujson.loads(getstr)) + except: # ujson fail means target has crashed raise SynComError - getstr = '' # Reset for next string + getstr = "" # Reset for next string rxidx = 0 except SynComError: if self._running: - self._vbprint('SynCom Timeout.') + self.verbose and print("SynCom Timeout.") else: - self._vbprint('SynCom was stopped.') + self.verbose and print("SynCom was stopped.") finally: self.stop() @@ -217,7 +222,7 @@ async def _get_byte_passive(self): inbits = await self._get_bit(inbits) self.inbits = inbits - async def _synchronise(self): # wait for clock + async def _synchronise(self): # wait for clock t = ticks_ms() while self.ckin() == self.phase ^ self.passive ^ 1: # Other tasks can clear self._running by calling stop() @@ -229,14 +234,14 @@ async def _synchronise(self): # wait for clock self.dout(odata & 1) self.odata = odata >> 1 self.phase ^= 1 - self.ckout(self.phase) # set clock + self.ckout(self.phase) # set clock async def _get_bit(self, dest): t = ticks_ms() while self.ckin() == self.phase ^ self.passive ^ 1: if (self._timeout and ticks_diff(ticks_ms(), t) > self._timeout) or not self._running: raise SynComError - yield # Faster than await asyncio.sleep_ms() + await asyncio.sleep_ms(0) dest = (dest | (self.din() << _BITS_PER_CH)) >> 1 obyte = self.odata self.dout(obyte & 1) diff --git a/v3/docs/DRIVERS.md b/v3/docs/DRIVERS.md new file mode 100644 index 0000000..dcaf218 --- /dev/null +++ b/v3/docs/DRIVERS.md @@ -0,0 +1,1444 @@ +This document describes classes designed to enhance the capability of +MicroPython's `asyncio` when used in a microcontroller context. + +# 0. Contents + + 1. [Introduction](./DRIVERS.md#1-introduction) + 1.1 [API Design](./DRIVERS.md#11-api-design) Callbacks vs. asynchronous interfaces. + 1.2 [Switches](./DRIVERS.md#12-switches) Electrical considerations. + 2. [Installation and usage](./DRIVERS.md#2-installation-and-usage) + 3. [Interfacing switches](./DRIVERS.md#3-interfacing-switches) + 3.1 [ESwitch class](./DRIVERS.md#31-eswitch-class) Switch debouncer with event interface. + 3.2 [Switch class](./DRIVERS.md#32-switch-class) Switch debouncer with callbacks. + 4. [Interfacing pushbuttons](./DRIVERS.md#4-interfacing-pushbuttons) Access short, long and double-click events. + 4.1 [EButton class](./DRIVERS.md#41-ebutton-class) Debounced pushbutton with Event-based interface. + 4.2 [Pushbutton class](./DRIVERS.md#42-pushbutton-class) Debounced pushbutton with callback interface. +      4.2.1 [The suppress constructor argument](./DRIVERS.md#421-the-suppress-constructor-argument) +      4.2.2 [The sense constructor argument](./DRIVERS.md#422-the-sense-constructor-argument) + 4.3 [ESP32Touch class](./DRIVERS.md#43-esp32touch-class) + 4.4 [Keyboard class](./DRIVERS.md#44-keyboard-class) Retrieve characters from a keypad. + 4.5 [SwArray class](./DRIVERS.md#45-swarray-class) Interface a crosspoint array of switches or buttons. + 4.6 [Suppress mode](./DRIVERS.md#46-suppress-mode) Reduce the number of events/callbacks. + 5. [ADC monitoring](./DRIVERS.md#5-adc-monitoring) Pause until an ADC goes out of bounds + 5.1 [AADC class](./DRIVERS.md#51-aadc-class) + 5.2 [Design note](./DRIVERS.md#52-design-note) + 6. [Quadrature encoders](./DRIVERS.md#6-quadrature-encoders) Asynchronous interface for rotary encoders. + 6.1 [Encoder class](./DRIVERS.md#61-encoder-class) + 7. [Ringbuf Queue](./DRIVERS.md#7-ringbuf-queue) A MicroPython optimised queue primitive. + 8. [Delay_ms class](./DRIVERS.md#8-delay_ms-class) A flexible retriggerable delay with callback or Event interface. + 9. [Message Broker](./DRIVERS.md#9-message-broker) A flexible means of messaging between tasks. + 9.1 [Further examples](./DRIVERS.md#91-further-examples) + 9.2 [User agents](./DRIVERS.md#92-user-agents) User defined Agent classes. + 9.3 [Wildcard subscriptions](./DRIVERS.md#93-wildcard-subscriptions) + 9.4 [Notes](./DRIVERS.md#9-notes) + 10. [Additional functions](./DRIVERS.md#10-additional-functions) + 10.1 [launch](./DRIVERS.md#101-launch) Run a coro or callback interchangeably. + 10.2 [set_global_exception](./DRIVERS.md#102-set_global_exception) Simplify debugging with a global exception handler. + +###### [asyncio Tutorial](./TUTORIAL.md#contents) + +# 1. Introduction + +The classes presented here include asynchronous interfaces to switches, +pushbuttons, incremental encoders and ADC's. Specifically they are interfaces to +devices defined in the `machine` module rather than device drivers for external +hardware: as such they are grouped with synchronisation primitives. There are +also synchronisation primitives providing a microcontroller-optimised alternative +to the existing CPython-compatible primitives. + +## 1.1 API design + +The traditional interface to asynchronous external events is via a callback. +When the event occurs, the device driver runs a user-specified callback. Some +classes described here offer a callback interface. Where callbacks are used the +term `callable` implies a Python `callable`: namely a function, bound method, +coroutine or bound coroutine. Any of these may be supplied as a callback +function. + + +Newer class designs abandon callbacks in favour of asynchronous interfaces. This +is done by exposing `Event` or asynchronous iterator interfaces. It is arguable +that callbacks are outdated. Handling of arguments and return values is +inelegant and there are usually better ways using asynchronous coding. In +particular MicroPython's `asyncio` implements asynchronous interfaces in an +efficient manner. A task waiting on an `Event` consumes minimal resources. If a +user wishes to use a callback it may readily be achieved using patterns like the +following. In this case the device is an asynchronous iterator: +```python + async def run_callback(device, callback, *args): + async for result in device: + callback(result, *args) +``` +or, where the device presents an `Event` interface: +```python +async def run_callback(device, callback, *args): + while True: + await device.wait() # Wait on the Event + device.clear() # Clear it down + callback(*args) +``` + +## 1.2 Switches + +From an electrical standpoint switches and pushbuttons are identical, however +from a programming perspective a switch is either open or closed, while a +pushbutton may be subject to single or double clicks, or to long presses. +Consequently switch drivers expose a simpler interface with a consequent saving +in code size. + +All switch drivers rely on millisecond-level timing: callback functions must +be designed to terminate rapidly. This applies to all functions in the +application; coroutines should yield regularly. If these constraints are not +met, switch events can be missed. + +All switches are prone to contact bounce, with a consequent risk of spurious +events: the drivers presented here embody debouncing. The phenomenon of contact +bounce is discussed in [this document](http://www.ganssle.com/debouncing.htm). + +Two ways of wiring switches are supported. For small numbers of switches, the +switch may link a pin to `gnd` with the pin being configured as an input with a +pull up resistor. Interfacing such a switch is simple: +```Python +import asyncio +from machine import Pin +from primitives import ESwitch +es = ESwitch(Pin(16, Pin.IN, Pin.PULL_UP)) + +async def closure(): + while True: + es.close.clear() # Clear the Event + await es.close.wait() # Wait for contact closure + print("Closed") # Run code + +asyncio.run(closure()) +``` + +As the number of switches increases, consumption of GPIO pins can be +problematic. A solution is to wire the switches as a crosspoint array with the +driver polling each row in turn and reading the columns. This is the usual configuration of keypads. + +![Image](./images/keypad.png) + +Crosspoint connection requires precautions to +cater for the case where multiple contacts are closed simultaneously, as this +can have the effect of linking two output pins. Risk of damage is averted by +defining the outputs as open drain. This allows for one key rollover: if a +second key is pressed before the first is released, the keys will be read +correctly. Invalid contact closures may be registered if more than two contacts +are closed. This also applies where the matrix comprises switches rather than +buttons. In this case diode isolation is required: + +![Image](./images/isolate.png) + +Whether or not diodes are used the column input pins must be pulled up. Scanning +of the array occurs rapidly, and built-in pull-up resistors have a high value. +If the capacitance between wires is high, spurious closures may be registered. +To prevent this it is wise to add physical resistors between the input pins and +3.3V. A value in the region of 1KΩ to 5KΩ is recommended. + +# 2. Installation and usage + +The latest release build of firmware or a newer preview build is recommended. +To install the library, connect the target hardware to WiFi and issue: +```python +import mip +mip.install("github:peterhinch/micropython-async/v3/primitives") +``` +For any target including non-networked ones use +[mpremote](https://docs.micropython.org/en/latest/reference/mpremote.html): +```bash +$ mpremote mip install "github:peterhinch/micropython-async/v3/primitives" +``` + +Drivers are imported with: +```python +from primitives import Switch, Pushbutton, AADC +``` +There is a test/demo program for the Switch and Pushbutton classes. On import +this lists available tests. It assumes a Pyboard with a switch or pushbutton +between X1 and Gnd. It is run as follows: +```python +from primitives.tests.switches import * +test_sw() # For example +``` +The test for the `AADC` class requires a Pyboard with pins X1 and X5 linked. It +is run as follows: +```python +from primitives.tests.adctest import test +test() +``` + +###### [Contents](./DRIVERS.md#0-contents) + +# 3. Interfacing switches + +The `primitives` module provides `ESwitch` and `Switch` classes. The former is a +minimal driver providing an `Event` interface. The latter supports callbacks and +`Event`s. + +## 3.1 ESwitch class + +```python +from primitives import ESwitch # events.py +``` +This provides a debounced interface to a switch connected to gnd or to 3V3. A +pullup or pull down resistor should be supplied to ensure a valid logic level +when the switch is open. The default constructor arg `lopen=1` is for a switch +connected between the pin and gnd, with a pullup to 3V3. Typically the pullup +is internal, the pin being as follows: +```python +from machine import Pin +pin_id = 0 # Depends on hardware +pin = Pin(pin_id, Pin.IN, Pin.PULL_UP) +``` +Constructor arguments: + + 1. `pin` The `Pin` instance: should be initialised as an input with a pullup or + down as appropriate. + 2. `lopen=1` Electrical level when switch is open circuit i.e. 1 is 3.3V, 0 is + gnd. + +Methods: + + 1. `__call__()` Call syntax e.g. `myswitch()` returns the logical debounced + state of the switch i.e. 0 if open, 1 if closed. + 2. `deinit()` No args. Cancels the polling task and clears bound `Event`s. + +Class variable: + 1. `debounce_ms=50` Debounce time in ms. + +Bound objects: + 1. `close` An `Event` instance. Set on contact closure. + 2. `open` An `Event` instance. Set on contact open. + +Application code is responsible for clearing the `Event` instances. +Usage example: +```python +import asyncio +from machine import Pin +from primitives import ESwitch +es = ESwitch(Pin("Y1", Pin.IN, Pin.PULL_UP)) + +async def closure(): + while True: + es.close.clear() + await es.close.wait() + print("Closed") + +async def open(): + while True: + es.open.clear() + await es.open.wait() + print("Open") + +async def main(): + asyncio.create_task(open()) + await closure() # Run forever + +asyncio.run(main()) +``` + +## 3.2 Switch class + +```python +from primitives import Switch # switch.py +``` +This can run callbacks or schedule coros on contact closure and/or opening. As +an alternative to a callback based interface, bound `Event` objects may be +triggered on switch state changes. + +This assumes a normally open switch connected between a pin and ground. The pin +should be initialised as an input with a pullup. A `callable` may be specified +to run on contact closure or opening; where the `callable` is a coroutine it +will be converted to a `Task` and will run asynchronously. Debouncing is +implicit: contact bounce will not cause spurious execution of the `callable`. + +Constructor argument (mandatory): + + 1. `pin` The initialised Pin instance. + +Methods: + + 1. `close_func(func, args=())` Args: `func` a `callable` to run on contact + closure, `args` a tuple of arguments for the `callable`. + 2. `open_func(func, args=())` Args: `func` a `callable` to run on contact open, + `args` a tuple of arguments for the `callable`. + 3. `__call__()` Call syntax e.g. `myswitch()` returns the physical debounced + state of the switch i.e. 0 if grounded, 1 if connected to `3V3`. + 4. `deinit()` No args. Cancels the running task. + +Class attribute: + 1. `debounce_ms=50` Debounce time in ms. + +```python +from pyb import LED +from machine import Pin +import asyncio +from primitives import Switch + +async def pulse(led, ms): + led.on() + await asyncio.sleep_ms(ms) + led.off() + +async def my_app(): + pin = Pin('X1', Pin.IN, Pin.PULL_UP) # Hardware: switch to gnd + red = LED(1) + sw = Switch(pin) + sw.close_func(pulse, (red, 1000)) # Note how coro and args are passed + await asyncio.sleep(60) # Dummy application code + +asyncio.run(my_app()) # Run main application code +``` + +#### Event interface + +This enables a task to wait on a switch state as represented by a bound `Event` +instance. A bound contact closure `Event` is created by passing `None` to +`.close_func`, in which case the `Event` is named `.close`. Likewise a `.open` +`Event` is created by passing `None` to `open_func`. + +###### [Contents](./DRIVERS.md#0-contents) + +# 4. Interfacing pushbuttons + +The `primitives` module provides the following classes for interfacing +pushbuttons. The following support normally open or normally closed buttons +connected to gnd or to 3V3: +* `EButton` Provides an `Event` based interface. +* `Pushbutton` Offers `Event`s and/or callbacks. +The following support normally open pushbuttons connected in a crosspoint array. +* `Keyboard` An asynchronous iterator responding to button presses. +* `SwArray` As above, but also supporting open, double and long events. +The latter can also support switches in a diode-isolated array. + +## 4.1 EButton class + +```python +from primitives import EButton # events.py +``` + +This extends the functionality of `ESwitch` to provide additional events for +long and double presses. + +This can support normally open or normally closed switches, connected to `gnd` +(with a pullup) or to `3V3` (with a pull-down). The `Pin` object should be +initialised appropriately. The default state of the switch can be passed in the +optional "sense" parameter on the constructor, otherwise the assumption is that +on instantiation the button is not pressed. + +The EButton class uses logical rather than physical state: a button's state +is considered `True` if pressed, otherwise `False` regardless of its physical +implementation. + +Constructor arguments: + + 1. `pin` Mandatory. The initialised Pin instance. + 2. `suppress=False`. See [Suppress mode](./DRIVERS.md#46-suppress-mode). + 3. `sense=None`. Optionally define the electrical connection: see + [section 4.2.1](./DRIVERS.md#411-the-sense-constructor-argument). + +Methods: + + 1. `__call__()` Call syntax e.g. `mybutton()` Returns the logical debounced + state of the button (`True` corresponds to pressed). + 2. `rawstate()` Returns the logical instantaneous state of the button. There + is probably no reason to use this. + 3. `deinit()` No args. Cancels the running task and clears all events. + +Bound `Event`s: + + 1. `press` Set on button press. + 2. `release` Set on button release. + 3. `long` Set if button press is longer than `EButton.long_press_ms`. + 4. `double` Set if two button preses occur within `EButton.double_click_ms`. + +Application code is responsible for clearing any `Event`s that are used. + +Class attributes: + 1. `debounce_ms=50` Debounce time in ms. Default 50. + 2. `long_press_ms=1000` Threshold time in ms for a long press. + 3. `double_click_ms=400` Threshold time in ms for a double-click. + +### 4.1.1 The sense constructor argument + +In most applications it can be assumed that, at power-up, pushbuttons are not +pressed. The default `None` value uses this assumption to read the pin state +and to assign the result to the `False` (not pressed) state at power up. This +works with normally open or normally closed buttons wired to either supply +rail; this without programmer intervention. + +In certain use cases this assumption does not hold, and `sense` must explicitly +be specified. This defines the logical state of the un-pressed button. Hence +`sense=0` defines a button connected in such a way that when it is not pressed, +the voltage on the pin is gnd. + +Whenever the pin value changes, the new value is compared with `sense` to +determine whether the button is closed or open. + +###### [Contents](./DRIVERS.md#0-contents) + +## 4.2 Pushbutton class + +```py +from primitives import Pushbutton # pushbutton.py +``` + +This can support normally open or normally closed switches, connected to `gnd` +(with a pullup) or to `3V3` (with a pull-down). The `Pin` object should be +initialised appropriately. The default state of the switch can be passed in the +optional "sense" parameter on the constructor, otherwise the assumption is that +on instantiation the button is not pressed. + +The Pushbutton class uses logical rather than physical state: a button's state +is considered `True` if pressed, otherwise `False` regardless of its physical +implementation. + +`callable` instances may be specified to run on button press, release, double +click or long press events; where the `callable` is a coroutine it will be +converted to a `Task` and will run asynchronously. + +Please see the note on timing in [section 3](./DRIVERS.md#3-interfacing-switches). + +Constructor arguments: + + 1. `pin` Mandatory. The initialised Pin instance. + 2. `suppress` Default `False`. See + [section 4.2.2](./DRIVERS.md#422-the-suppress-constructor-argument). + 3. `sense` Default `None`. Option to define electrical connection. See + [section 4.2.1](./DRIVERS.md#421-the-sense-constructor-argument). + +Methods: + + 1. `press_func(func=False, args=())` Args: `func` a `callable` to run on button + push, `args` a tuple of arguments for the `callable`. + 2. `release_func(func=False, args=())` Args: `func` a `callable` to run on + button release, `args` a tuple of arguments for the `callable`. + 3. `long_func(func=False, args=())` Args: `func` a `callable` to run on long + button push, `args` a tuple of arguments for the `callable`. + 4. `double_func(func=False, args=())` Args: `func` a `callable` to run on + double button push, `args` a tuple of arguments for the `callable`. + 5. `__call__()` Call syntax e.g. `mybutton()` Returns the logical debounced + state of the button (`True` corresponds to pressed). + 6. `rawstate()` Returns the logical instantaneous state of the button. There + is probably no reason to use this. + 7. `deinit()` No args. Cancels the running debounce task. + +Methods 1 - 4 may be called at any time. If `False` is passed for a callable, +any existing callback will be disabled. If `None` is passed, a bound `Event` is +created. See below for `Event` names. + +Class variables: + 1. `debounce_ms` Debounce time in ms. Default 50. + 2. `long_press_ms` Threshold time in ms for a long press. Default 1000. + 3. `double_click_ms` Threshold time in ms for a double-click. Default 400. + + If these variables are changed, it should be done prior to instantiating the + class. The double click time must be less than the long press time. + +A simple Pyboard demo: +```python +from pyb import LED +from machine import Pin +import asyncio +from primitives import Pushbutton + +def toggle(led): + led.toggle() + +async def my_app(): + pin = Pin('X1', Pin.IN, Pin.PULL_UP) # Pushbutton to gnd + red = LED(1) + pb = Pushbutton(pin) + pb.press_func(toggle, (red,)) # Note how function and args are passed + await asyncio.sleep(60) # Dummy + +asyncio.run(my_app()) # Run main application code +``` + +### 4.2.1 The suppress constructor argument + +See [Suppress mode](./DRIVERS.md#46-suppress-mode) for the purpose of this arg. +Note: `suppress` affects the behaviour of the `release_func` only. Other +callbacks including `press_func` behave normally. If the `suppress = True` +constructor argument is set, the `release_func` will be launched as follows: + + * If `double_func` does not exist on rapid button release. + * If `double_func` exists, after the expiration of the double-click timer. + * If `long_func` exists and the press duration causes `long_func` to be + launched, `release_func` will not be launched. + * If `double_func` exists and a double-click occurs, `release_func` will not + be launched. + +In the typical case where `long_func` and `double_func` are both defined, this +ensures that only one of `long_func`, `double_func` and `release_func` run. In +the case of a single short press, the `release_func` will be delayed until the +expiry of the double-click timer (because until that time a second click might +occur). + +### 4.2.2 The sense constructor argument + +In most applications it can be assumed that, at power-up, pushbuttons are not +pressed. The default `None` value uses this assumption to assign the `False` +(not pressed) state at power up. It therefore works with normally open or +normally closed buttons wired to either supply rail. This without programmer +intervention. + +In certain use cases this assumption does not hold, and `sense` must explicitly +be specified. This defines the logical state at power-up regardless of whether, +at that time, the button is pressed. Hence `sense=0` defines a button connected +in such a way that when it is not pressed, the voltage on the pin is 0. + +When the pin value changes, the new value is compared with `sense` to determine +if the button is closed or open. This is to allow the designer to specify if +the `closed` state of the button is active `high` or active `low`. + +#### Event interface + +Event names, where `None` is passed to a method listed below, are as follows: +| method | Event | +|:-------------|:--------| +| press_func | press | +| release_func | release | +| long_func | long | +| double_func | double | + +###### [Contents](./DRIVERS.md#0-contents) + +## 4.3 ESP32Touch class + +```py +from primitives import ESP32Touch # pushbutton.py +``` + +This subclass of `Pushbutton` supports ESP32 touchpads providing a callback +based interface. See the +[official docs](http://docs.micropython.org/en/latest/esp32/quickref.html#capacitive-touch). + +API and usage are as per `Pushbutton` with the following provisos: + 1. The `sense` constructor arg is not supported. + 2. The `Pin` instance passed to the constructor must support the touch + interface. It is instantiated without args, as per the example below. + 3. There is an additional classmethod `threshold` which takes an integer arg. + The arg represents the detection threshold as a percentage. + +The driver determines the untouched state by periodically polling +`machine.TouchPad.read()` and storing its maximum value. If it reads a value +below `maximum * threshold / 100` a touch is deemed to have occurred. Default +threshold is currently 80% but this is subject to change. + +Example usage: +```python +from machine import Pin +import asyncio +from primitives import ESP32Touch + +ESP32Touch.threshold(70) # optional + +async def main(): + tb = ESP32Touch(Pin(15), suppress=True) + tb.press_func(lambda : print("press")) + tb.double_func(lambda : print("double")) + tb.long_func(lambda : print("long")) + tb.release_func(lambda : print("release")) + while True: + await asyncio.sleep(1) + +asyncio.run(main()) +``` +If a touchpad is touched on initialisation no callbacks will occur even when +the pad is released. Initial button state is always `False`. Normal behaviour +will commence with subsequent touches. + +The best threshold value depends on physical design. Directly touching a large +pad will result in a low value from `machine.TouchPad.read()`. A small pad +covered with an insulating film will yield a smaller change. + +###### [Contents](./DRIVERS.md#0-contents) + +## 4.4 Keyboard class + +```python +from primitives import Keyboard # sw_array.py +``` +A `Keyboard` provides an interface to a set of pushbuttons arranged as a +crosspoint array. If a key is pressed its array index (scan code) is placed on a +queue. Keypresses are retrieved with `async for`. The driver operates by +polling each row, reading the response of each column. 1-key rollover is +supported - this is the case where a key is pressed before the prior key has +been released. + +Constructor mandatory args: + * `rowpins` A list or tuple of initialised open drain output pins. + * `colpins` A list or tuple of initialised input pins (pulled up). + +Constructor optional keyword only args: + * `bufsize=10)` Size of keyboard buffer. + * `db_delay=50` Debounce delay in ms. + + Methods: + * `deinit(self)` Cancels the running task. + * `__getitem__(self, scan_code)` Returns a `bool` being the instantaneous + debounced state of a given pin. Enables code that causes actions after a button + press, for example on release or auto-repeat while pressed. + +The `Keyboard` class is subclassed from [Ringbuf Queue](./DRIVERS.md#7-ringbuf-queue) +enabling scan codes to be retrieved with an asynchronous iterator. +Example usage: +```python +import asyncio +from primitives import Keyboard +from machine import Pin +rowpins = [Pin(p, Pin.OPEN_DRAIN) for p in range(10, 14)] +colpins = [Pin(p, Pin.IN, Pin.PULL_UP) for p in range(16, 20)] + +async def main(): + kp = Keyboard(rowpins, colpins) + async for scan_code in kp: + print(scan_code) + if not scan_code: + break # Quit on key with code 0 + +asyncio.run(main()) +``` +In typical use the scan code would be used as the index into a string of +keyboard characters ordered to match the physical layout of the keys. If data +is not removed from the buffer, on overflow the oldest scan code is discarded. +There is no limit on the number of rows or columns however if more than 256 keys +are used, the `bufsize` arg would need to be adapted to handle scan codes > 255. +In this case an `array` or `list` object would be passed. + +Usage example. Keypresses on a numeric keypad are sent to a UART with auto +repeat. Optionally link GPIO0 and GPIO1 to view the result. +```python +import asyncio +from primitives import Keyboard +from machine import Pin, UART +cmap = b"123456789*0#" # Numeric keypad character map + +async def repeat(kpad, scan_code, uart): # Send at least one char + ch = cmap[scan_code : scan_code + 1] # Get character + uart.write(ch) + await asyncio.sleep_ms(400) # Longer initial delay + while kpad[scan_code]: # While key is pressed + uart.write(ch) + await asyncio.sleep_ms(150) # Faster repeat + +async def receiver(uart): + sreader = asyncio.StreamReader(uart) + while True: + res = await sreader.readexactly(1) + print('Received', res) + +async def main(): # Run forever + rowpins = [Pin(p, Pin.OPEN_DRAIN) for p in range(10, 13)] + colpins = [Pin(p, Pin.IN, Pin.PULL_UP) for p in range(16, 20)] + uart = UART(0, 9600, tx=0, rx=1) + asyncio.create_task(receiver(uart)) + kpad = Keyboard(rowpins, colpins) + async for scan_code in kpad: + rpt = asyncio.create_task(repeat(kpad, scan_code, uart)) + +asyncio.run(main()) +``` + +###### [Contents](./DRIVERS.md#0-contents) + +## 4.5 SwArray class + +```python +from primitives.sw_array import SwArray, CLOSE, OPEN, LONG, DOUBLE, SUPPRESS +``` +An `SwArray` is similar to a `Keyboard` except that single, double and long +presses are supported. Items in the array may be switches or pushbuttons, +however if switches are used they must be diode-isolated. For the reason see +[Switches](./DRIVERS.md#12-switches). It is an asynchronous iterator with events +being retrieved with `async for`: this returns a pair of integers being the scan +code and a bit representing the event which occurred. + +Constructor mandatory args: + * `rowpins` A list or tuple of initialised open drain output pins. + * `colpins` A list or tuple of initialised input pins (pulled up). + * `cfg` An integer defining conditions requiring a response. See Module + Constants below. + +Constructor optional keyword only args: + * `bufsize=10` Size of buffer. + + Methods: + * `deinit(self)` Cancels the running task. + * `__getitem__(self, scan_code)` Returns a `bool` being the instantaneous + debounced state of a given pin. Enables code that causes actions after a button + press. For example after a press a pin might periodically be polled to achieve + auto-repeat until released. + + Synchronous bound method: + * `keymap()` Return an integer representing a bitmap of the debounced state of + all switches in the array. 1 == closed. + + Class variables: + * `debounce_ms = 50` Assumed maximum duration of contact bounce. + * `long_press_ms = 1000` Threshold for long press detection. + * `double_click_ms = 400` Threshold for double-click detection. + +Module constants. +The following constants are provided to simplify defining the `cfg` constructor +arg. This may be defined as a bitwise `or` of selected constants. For example if +the `CLOSE` bit is specified, switch closures will be reported. An omitted event +will be ignored. Where the array comprises switches it is usual to specify only +`CLOSE` and/or `OPEN`. This invokes a more efficient mode of operation because +timing is not required. + * `CLOSE` Report contact closure. + * `OPEN` Contact opening. + * `LONG` Contact closure longer than `long_press_ms`. + * `DOUBLE` Two closures in less than `double_click_ms`. + * `SUPPRESS` Disambiguate. For explanation see + [Suppress mode](./DRIVERS.md#46-suppress-mode). If all the above bits are set, + a double click will result in `DOUBLE` and `OPEN` responses. If the `OPEN` bit + were clear, only `DOUBLE` would occur. + +The `SwArray` class is subclassed from [Ringbuf Queue](./DRIVERS.md#7-ringbuf-queue). +This is an asynchronous iterator, enabling scan codes and event types to be +retrieved as state changes occur. The event type is a single bit corresponding +to the above constants. + +Usage example: +```python +import asyncio +from primitives.sw_array import SwArray, CLOSE, OPEN, LONG, DOUBLE, SUPPRESS +from machine import Pin +rowpins = [Pin(p, Pin.OPEN_DRAIN) for p in range(10, 14)] +colpins = [Pin(p, Pin.IN, Pin.PULL_UP) for p in range(16, 20)] +cfg = CLOSE | OPEN #LONG | DOUBLE | SUPPRESS + +async def main(): + swa = SwArray(rowpins, colpins, cfg) + async for scan_code, evt in swa: + print(scan_code, evt) + if not scan_code: + break # Quit on key with code 0 + +asyncio.run(main()) +``` +###### [Contents](./DRIVERS.md#0-contents) + +## 4.6 Suppress mode + +The pushbutton drivers support a mode known as `suppress`. This option reduces +the number of events (or callbacks) that occur in the case of a double click. +Consider a button double-click. By default with `suppress=False` the following +events will occur in order: + + * `press` + * `release` + * `press` + * `release` + * `double` + +Similarly a long press will trigger `press`, `long` and `release` in that +order. Some applications may require only one event to be triggered. Setting +`suppress=True` ensures this. Outcomes are as follows: + +| Occurrence | Events set | Time of primary event | +|:-------------|:----------------|:-----------------------------| +| Short press | press, release | After `.double_click_ms` | +| Double press | double, release | When the second press occurs | +| Long press | long, release | After `long_press_ms` | + +The tradeoff is that the `press` and `release` events are delayed: the soonest +it is possible to detect the lack of a double click is `.double_click_ms`ms +after a short button press. Hence in the case of a short press when `suppress` +is `True`, `press` and `release` events are set on expiration of the double +click timer. + +The following script may be used to demonstrate the effect of `suppress`. As +written, it assumes a Pi Pico with a push button attached between GPIO 18 and +Gnd, with the primitives installed. +```python +from machine import Pin +import asyncio +from primitives import Pushbutton + +btn = Pin(18, Pin.IN, Pin.PULL_UP) # Adapt for your hardware + +async def main(): + pb = Pushbutton(btn, suppress=True) + pb.release_func(print, ("SHORT",)) + pb.double_func(print, ("DOUBLE",)) + pb.long_func(print, ("LONG",)) + await asyncio.sleep(60) # Run for one minute + +asyncio.run(main()) +``` +###### [Contents](./DRIVERS.md#0-contents) + +# 5. ADC monitoring + +The `primitives.aadc` module provides the `AADC` (asynchronous ADC) class. This +provides for coroutines which pause until the value returned by an ADC goes +outside predefined bounds. Bounds may be absolute or relative to the current +value. Data from ADC's is usually noisy. Relative bounds provide a simple (if +crude) means of eliminating this. Absolute bounds can be used to raise an alarm +or log data, if the value goes out of range. Typical usage: +```python +import asyncio +from machine import ADC +import pyb +from primitives import AADC + +aadc = AADC(ADC(pyb.Pin.board.X1)) +async def foo(): + while True: + value = await aadc(2000) # Trigger if value changes by 2000 + print(value) + +asyncio.run(foo()) +``` + +## 5.1 AADC class + +```py +from primitives import AADC # aadc.py +``` + +`AADC` instances are awaitable. This is the principal mode of use. + +Constructor argument: + * `adc` An instance of `machine.ADC`. + +Awaiting an instance: +Function call syntax is used with zero, one or two unsigned integer args. These +determine the bounds for the ADC value. + * No args: bounds are those set when the instance was last awaited. + * One integer arg: relative bounds are used. The current ADC value +- the arg. + * Two args `lower` and `upper`: absolute bounds. + +Synchronous methods: + * `read_u16` arg `last=False` Get the current data from the ADC. If `last` is + `True` returns the last data read from the ADC. Returns a 16-bit unsigned int + as per `machine.ADC.read_u16`. + * `sense(normal)` By default a task awaiting an `AADC` instance will pause + until the value returned by the ADC exceeds the specified bounds. Issuing + `sense(False)` inverts this logic: a task will pause until the ADC value is + within the specified bounds. Issuing `sense(True)` restores normal operation. + +In the sample below the coroutine pauses until the ADC is in range, then pauses +until it goes out of range. + +```python +import asyncio +from machine import ADC +from primitives import AADC + +aadc = AADC(ADC('X1')) +async def foo(): + while True: + aadc.sense(normal=False) + value = await aadc(25_000, 39_000) # Wait until in range + print('In range:', value) + aadc.sense(normal=True) + value = await aadc() # Wait until out of range + print('Out of range:', value) + +asyncio.run(foo()) +``` +## 5.2 Design note + +The `AADC` class uses the `asyncio` stream I/O mechanism. This is not the most +obvious design. It was chosen because the plan for `asyncio` is that it will +include an option for prioritising I/O. I wanted this class to be able to use +this for applications requiring rapid response. + +###### [Contents](./DRIVERS.md#0-contents) + +# 6. Quadrature encoders + +The [Encoder](https://github.com/peterhinch/micropython-async/blob/master/v3/primitives/encoder.py) +class is an asynchronous driver for control knobs based on quadrature encoder +switches such as [this Adafruit product](https://www.adafruit.com/product/377). +The driver is not intended for applications such as CNC machines. Drivers for NC +machines must never miss an edge. Contact bounce or vibration induced jitter can +cause transitions to occur at a high rate; these must be tracked which +challenges software based solutions. + +Another issue affecting some solutions is that callbacks occur in an interrupt +context. This can lead to concurrency issues. These issues, along with general +discussion of MicroPython encoder drivers, are covered +[in this doc](https://github.com/peterhinch/micropython-samples/blob/master/encoders/ENCODERS.md). + +This driver runs the user supplied callback in an `asyncio` context, so that +the callback runs only when other tasks have yielded to the scheduler. This +ensures that the callback runs with the same rules as apply to any `asyncio` +task. This offers safety, even if the task triggers complex application +behaviour. + +The `Encoder` can be instantiated in such a way that its effective resolution +can be reduced. A virtual encoder with lower resolution can be useful in some +applications. In particular it can track the "clicks" of a mechanical detent. + +The driver allows limits to be assigned to the virtual encoder's value so that +a dial running from (say) 0 to 100 may be implemented. If limits are used, +encoder values no longer approximate absolute angles: the user might continue +to rotate the dial when its value is "stuck" at an endstop. + +The callback only runs if a change in position of the virtual encoder has +occurred. In consequence of the callback running in an `asyncio` context, by +the time it is scheduled, the encoder's position may have changed by more than +one increment. The callback receives two args, the absolute value of the +virtual encoder at the time it was triggered and the signed change in this +value since the previous time the callback ran. + +## 6.1 Encoder class + +```python +from primitives import Encoder # encoder.py +``` + +Existing users: the `delay` parameter is now a constructor arg rather than a +class varaiable. + +Constructor arguments: + 1. `pin_x` Initialised `machine.Pin` instances for the switch. Should be set + as `Pin.IN` and have pullups. + 2. `pin_y` Ditto. + 3. `v=0` Initial value. + 4. `div=1` A value > 1 causes the motion rate of the encoder to be divided + down, to produce a virtual encoder with lower resolution. This can enable + tracking of mechanical detents - typical values are then 4 or 2 pulses per + click. + 5. `vmin=None` By default the `value` of the encoder can vary without limit. + Optionally maximum and/or minimum limits can be set. + 6. `vmax=None` As above. If `vmin` and/or `vmax` are specified, a `ValueError` + will be thrown if the initial value `v` does not conform with the limits. + 7. `mod=None` An integer `N > 0` causes the divided value to be reduced modulo + `N` - useful for controlling rotary devices. + 8. `callback=lambda a, b : None` Optional callback function. The callback + receives two integer args, `v` being the virtual encoder's current value and + `delta` being the signed difference between the current value and the previous + one. Further args may be appended by the following. + 9. `args=()` An optional tuple of positional args for the callback. + 10. `delay=100` After motion is detected the driver waits for `delay` ms before + reading the current position. A delay limits the rate at which the callback is + invoked and improves debouncing. This is a minimal approach. See + [this script](https://github.com/peterhinch/micropython-async/blob/master/v3/primitives/tests/encoder_stop.py) + for a way to create a callback which runs only when the encoder stops moving. + + Synchronous method: + * `value` No args. Returns an integer being the virtual encoder's current + value. + +Not all combinations of arguments make mathematical sense. The order in which +operations are applied is: + 1. Apply division if specified. + 2. Restrict the divided value by any maximum or minimum. + 3. Reduce modulo N if specified. + +An `Encoder` instance is an asynchronous iterator. This enables it to be used +as follows, with successive values being retrieved with `async for`: +```python +from machine import Pin +import asyncio +from primitives import Encoder + +async def main(): + px = Pin(16, Pin.IN, Pin.PULL_UP) # Change to match hardware + py = Pin(17, Pin.IN, Pin.PULL_UP) + enc = Encoder(px, py, div=4) # div mtches mechanical detents + async for value in enc: + print(f"Value = {value}") + +try: + asyncio.run(main()) +finally: + asyncio.new_event_loop() +``` +See [this doc](https://github.com/peterhinch/micropython-samples/blob/master/encoders/ENCODERS.md) +for further information on encoders and their limitations. + +###### [Contents](./DRIVERS.md#0-contents) + +# 7. Ringbuf Queue + +```python +from primitives import RingbufQueue # ringbuf_queue.py +``` + +The API of the `Queue` aims for CPython compatibility. This is at some cost to +efficiency. As the name suggests, the `RingbufQueue` class uses a pre-allocated +circular buffer which may be of any mutable type supporting the buffer protocol +e.g. `list`, `array` or `bytearray`. + +It should be noted that `Queue`, `RingbufQueue` (and CPython's `Queue`) are not +thread safe. See [Threading](./THREADING.md). + +Attributes of `RingbufQueue`: + 1. It is of fixed size, `Queue` can grow to arbitrary size. + 2. It uses pre-allocated buffers of various types (`Queue` uses a `list`). + 3. It is an asynchronous iterator allowing retrieval with `async for`. + 4. It has an "overwrite oldest data" synchronous write mode. + +Constructor mandatory arg: + * `buf` Buffer for the queue, e.g. list, bytearray or array. If an integer is + passed, a list of this size is created. A buffer of size `N` can hold a + maximum of `N-1` items. Note that, where items on the queue are suitably + limited, bytearrays or arrays are more efficient than lists. + +Synchronous methods (immediate return): + * `qsize` No arg. Returns the number of items in the queue. + * `empty` No arg. Returns `True` if the queue is empty. + * `full` No arg. Returns `True` if the queue is full. + * `get_nowait` No arg. Returns an object from the queue. Raises `IndexError` + if the queue is empty. + * `put_nowait` Arg: the object to put on the queue. Raises `IndexError` if the + queue is full. If the calling code ignores the exception the oldest item in + the queue will be overwritten. In some applications this can be of use. + * `peek` No arg. Returns oldest entry without removing it from the queue. This + is a superset of the CPython compatible methods. + +Asynchronous methods: + * `put` Arg: the object to put on the queue. If the queue is full, it will + block until space is available. + * `get` Return an object from the queue. If empty, block until an item is + available. + +Retrieving items from the queue: + +The `RingbufQueue` is an asynchronous iterator. Results are retrieved using +`async for`: +```python +async def handle_queued_data(q): + async for obj in q: + await asyncio.sleep(0) # See below + # Process obj +``` +The `sleep` is necessary if you have multiple tasks waiting on the queue, +otherwise one task hogs all the data. + +The following illustrates putting items onto a `RingbufQueue` where the queue is +not allowed to stall: where it becomes full, new items overwrite the oldest ones +in the queue: +```python +def add_item(q, data): + try: + q.put_nowait(data) + except IndexError: + pass +``` +###### [Contents](./DRIVERS.md#0-contents) + +# 8. Delay_ms class + +```python +from primitives import Delay_ms # delay_ms.py +``` +This implements the software equivalent of a retriggerable monostable or a +watchdog timer. It has an internal boolean `running` state. When instantiated +the `Delay_ms` instance does nothing, with `running` `False` until triggered. +Then `running` becomes `True` and a timer is initiated. This can be prevented +from timing out by triggering it again (with a new timeout duration). So long +as it is triggered before the time specified in the preceding trigger it will +never time out. + +If it does time out the `running` state will revert to `False`. This can be +interrogated by the object's `running()` method. In addition a `callable` can +be specified to the constructor. A `callable` can be a callback or a coroutine. +A callback will execute when a timeout occurs; where the `callable` is a +coroutine it will be converted to a `Task` and run asynchronously. + +Constructor arguments (defaults in brackets): + + 1. `func` The `callable` to call on timeout (default `None`). + 2. `args` A tuple of arguments for the `callable` (default `()`). + 3. `can_alloc` Unused arg, retained to avoid breaking code. + 4. `duration` Integer, default 1000 ms. The default timer period where no value + is passed to the `trigger` method. + +Synchronous methods: + + 1. `trigger` optional argument `duration=0`. A timeout will occur after + `duration` ms unless retriggered. If no arg is passed the period will be that + of the `duration` passed to the constructor. The method can be called from a + hard or soft ISR. It is now valid for `duration` to be less than the current + time outstanding. + 2. `stop` No argument. Cancels the timeout, setting the `running` status + `False`. The timer can be restarted by issuing `trigger` again. Also clears + the `Event` described in `wait` below. + 3. `running` No argument. Returns the running status of the object. + 4. `__call__` Alias for running. + 5. `rvalue` No argument. If a timeout has occurred and a callback has run, + returns the return value of the callback. If a coroutine was passed, returns + the `Task` instance. This allows the `Task` to be cancelled or awaited. + 6. `callback` args `func=None`, `args=()`. Allows the callable and its args to + be assigned, reassigned or disabled at run time. + 7. `deinit` No args. Cancels the running task. To avoid a memory leak this + should be called before allowing a `Delay_ms` object to go out of scope. See + [Object scope](./TUTORIAL.md#44-object-scope). + 8. `clear` No args. Clears the `Event` described in `wait` below. + 9. `set` No args. Sets the `Event` described in `wait` below. + +Asynchronous method: + 1. `wait` One or more tasks may wait on a `Delay_ms` instance. Pause until the + delay instance has timed out. + +In this example a `Delay_ms` instance is created with the default duration of +1 sec. It is repeatedly triggered for 5 secs, preventing the callback from +running. One second after the triggering ceases, the callback runs. + +```python +import asyncio +from primitives import Delay_ms + +async def my_app(): + d = Delay_ms(callback, ('Callback running',)) + print('Holding off callback') + for _ in range(10): # Hold off for 5 secs + await asyncio.sleep_ms(500) + d.trigger() + print('Callback will run in 1s') + await asyncio.sleep(2) + print('Done') + +def callback(v): + print(v) + +try: + asyncio.run(my_app()) +finally: + asyncio.new_event_loop() # Clear retained state +``` +This example illustrates multiple tasks waiting on a `Delay_ms`. No callback is +used. +```python +import asyncio +from primitives import Delay_ms + +async def foo(n, d): + await d.wait() + d.clear() # Task waiting on the Event must clear it + print('Done in foo no.', n) + +async def my_app(): + d = Delay_ms() + tasks = [None] * 4 # For CPython compaibility must store a reference see Note + for n in range(4): + tasks[n] = asyncio.create_task(foo(n, d)) + d.trigger(3000) + print('Waiting on d') + await d.wait() + print('Done in my_app.') + await asyncio.sleep(1) + print('Test complete.') + +try: + asyncio.run(my_app()) +finally: + _ = asyncio.new_event_loop() # Clear retained state +``` +###### [Contents](./DRIVERS.md#0-contents) + +# 9. Message Broker + +```python +from primitives import Broker, broker # broker.py +``` +The `Broker` class provides a flexible means of messaging between running tasks. +It uses a publish-subscribe model (akin to MQTT) whereby the transmitting task +publishes to a topic. Objects subscribed to that topic will receive the message. +This enables one to one, one to many, many to one or many to many messaging. + +A task subscribes to a topic via an `agent`: this term describes a set of Python +types which may be used in this role. An `agent` is stored by the broker. When +the broker publishes a message, every `agent` subscribed to the message topic +will be triggered. In the simplest case the `agent` is a `Queue` instance: the +broker puts the topic and message onto the subscriber's queue for retrieval. + +More advanced agents can perform actions in response to a message, such as +calling a function, launching a `task` or lighting an LED. + +Agents may be subscribed and unsubscribed dynamically. The publishing task has +no "knowledge" of the number or type of agents subscribed to a topic. The module +is not threadsafe: `Broker` methods should not be called from a hard ISR or from +another thread. + +A `Broker` instance `broker` is provided. Where multiple modules issue +```python +from primitives import broker +``` +all will see the same instance, facilitating message passing between modules. + +#### Broker methods + +All are synchronous. +* Constructor This has no args. +* `subscribe(topic, agent, *args)` Passed `agent` will be triggered by messages +with a matching `topic`. Any additional args will be passed to the `agent` when +it is triggered. +* `unsubscribe(topic, agent, *args)` The `agent` will stop being triggered. If +args were passed on subscription, the same args must be passed. +* `publish(topic, message=None)` All `agent` instances subscribed to `topic` +will be triggered, receiving `topic` and `message` plus any further args that +were passed to `subscribe`. + +The `topic` arg is typically a string but may be any hashable object. A +`message` is an arbitrary Python object. Where string topics are used, wildcard +subscriptions are possible. + +#### Broker class variable + +* `Verbose=True` Enables printing of debug messages. + +#### Agent types + +An `agent` may be an instance of any of the following types. Args refers to any +arguments passed to the `agent` on subscription. + +* `RingbufQueue` Received messages are queued as a 2-tuple `(topic, message)` +assuming no subscription args - otheriwse `(topic, message, (args...))`. +* `Queue` Received messages are queued as described above. +* `function` Called when a message is received. Args: `topic`, `message` plus any +further subscription args. +* `bound method` Called when a message is received. Args: `topic`, `message` +plus any further args. +* `coroutine` Converted to a `task` when a message is received. Args: `topic`, +`message` plus any further subscription args. +* `bound coroutine` Converted to a `task` when a message is received. Args: `topic`, +`message` plus any further subscription args. +* `Event` Set when a message is received. +* `user_agent` Instance of a user class. See user agents below. + +Note that synchronous `agent` instances must run to completion quickly otherwise +the `publish` method will be slowed. See [Notes](./DRIVERS.md#93-notes) for +further details on queue behaviour. + +#### example +```py +import asyncio +from primitives import broker, RingbufQueue + +async def sender(t): + for x in range(t): + await asyncio.sleep(1) + broker.publish("foo_topic", f"test {x}") + +async def receiver(): + queue = RingbufQueue(20) + broker.subscribe("foo_topic", queue) + async for topic, message in queue: + print(topic, message) + +async def main(): + rx = asyncio.create_task(receiver()) + await sender(10) + await asyncio.sleep(2) + rx.cancel() + +asyncio.run(main()) +``` +## 9.1 Further examples + +An interesting application is to extend MQTT into the Python code +(see [mqtt_as](https://github.com/peterhinch/micropython-mqtt/tree/master)). +This is as simple as: +```py +async def messages(client): + async for topic, msg, retained in client.queue: + broker.publish(topic.decode(), msg.decode()) +``` +Assuming the MQTT client is subscribed to multiple topics, message strings are +directed to agents, each dedicated to handling a topic. An `agent` might operate +an interface or queue the message for a running task. + +The following illustrates a use case for passing args to an `agent` (pin nos. +are for Pyoard 1.1). +```py +import asyncio +from primitives import broker +from machine import Pin +red = Pin("A13", Pin.OUT, value=0) # Pin nos. for Pyboard V1.1 +green = Pin("A14", Pin.OUT, value=0) + +async def flash(): + broker.publish("led", 1) + await asyncio.sleep(1) + broker.publish("led", 0) + +def recv(topic, message, led): + led(message) # Light or extinguish an LED + +async def main(): + broker.subscribe("led", recv, red) + broker.subscribe("led", recv, green) + for _ in range(10): + await flash() + await asyncio.sleep(1) + broker.unsubscribe("led", recv, green) # Arg(s) must be passed + for _ in range(3): + await flash() + await asyncio.sleep(1) + +asyncio.run(main()) +``` +A task can wait on multiple topics using a `RingbufQueue`: +```python +import asyncio +from primitives import broker, RingbufQueue + + +async def receiver(): + q = RingbufQueue(10) + broker.subscribe("foo_topic", q) + broker.subscribe("bar_topic", q) + async for topic, message in q: + print(f"Received Topic: {topic} Message: {message}") + + +async def sender(t): + for x in range(t): + await asyncio.sleep(1) + broker.publish("foo_topic", f"test {x}") + broker.publish("bar_topic", f"test {x}") + broker.publish("ignore me", f"test {x}") + + +async def main(): + rx = asyncio.create_task(receiver()) + await sender(10) + await asyncio.sleep(2) + rx.cancel() + + +asyncio.run(main()) +``` +here the `receiver` task waits on two topics. The asynchronous iterator returns +messages as they are published. + +## 9.2 User agents + +An `agent` can be an instance of a user class. The class must be a subclass of +`Agent`, and it must support a synchronous `.put` method. Arguments are `topic` +and `message`, followed by any further args passed on subscription. The method +should run to completion quickly. + +```py +import asyncio +from primitives import broker, Agent + +class MyAgent(Agent): + def put(sef, topic, message, arg): + print(f"User agent. Topic: {topic} Message: {message} Arg: {arg}") + +async def sender(t): + for x in range(t): + await asyncio.sleep(1) + broker.publish("foo_topic", f"test {x}") + +async def main(): + broker.subscribe("foo_topic", MyAgent(), 42) + await sender(10) + +asyncio.run(main()) +``` +## 9.3 Wildcard subscriptions + +In the case of publications whose topics are strings, a single call to +`.subscribe` can subscribe an `agent` to multiple topics. This is by wildcard +matching. By default exact matching is used, however this can be changed to use +regular expressions as in this code fragment: +```py +from primitives import Broker, RegExp +broker.subscribe(RegExp(".*_topic"), some_agent) +``` +In this case `some_agent` would be triggered by publications to `foo_topic` or +`bar_topic` because the string `".*_topic"` matches these by the rules of +regular expressions. + +## 9.4 Notes + +#### The publish/subscribe model + +As in the real world, publication carries no guarantee of readership. If at the +time of publication there are no tasks with subscribed `agent` instances, the +message will silently be lost. + +#### agent arguments + +Arguments must be hashable objects. Mutable objects such as lists and +dictionaries are not permitted. If an object can be added to a `set` it is +valid. In general, interfaces such as `Pin` instances are OK. + +#### agent uniqueness + +An `agent` can be subscribed to multiple `topic`s. An `agent` may be subscribed +to a `topic` multiple times only if each instance has different arguments. + +#### queues + +If a message causes a queue to fill, a message will silently be lost. It is the +responsibility of the subscriber to avoid this. In the case of a `Queue` +instance the lost message is the one causing the overflow. In the case of +`RingbufQueue` the oldest message in the queue is discarded. In some +applications this behaviour is preferable. In general `RingbufQueue` is +preferred as it is optimised for microcontroller use and supports retrieval by +an asynchronous iterator. + +If either queue type is subscribed with args, a publication will create a queue +entry that is a 3-tuple `(topic, message, (args...))`. There is no obvious use +case for this. + +#### exceptions + +An `agent` instance is owned by a subscribing tasks but is executed by a +publishing task. If a function used as an `agent` throws an exception, the +traceback will point to a `Broker.publish` call. + +The `Broker` class throws a `ValueError` if `.subscribe` is called with an +invalid `agent` type. There are a number of non-fatal conditions which can occur +such as a queue overflow or an attempt to unsubscribe an `agent` twice. The +`Broker` will report these if `Broker.Verbose=True`. + +###### [Contents](./DRIVERS.md#0-contents) + +# 10. Additional functions + +## 10.1 Launch + +Import as follows: +```python +from primitives import launch +``` +`launch` enables a function to accept a coro or a callback interchangeably. It +accepts the callable plus a tuple of args. If a callback is passed, `launch` +runs it and returns the callback's return value. If a coro is passed, it is +converted to a `task` and run asynchronously. The return value is the `task` +instance. A usage example is in `primitives/switch.py`. + +## 10.2 set_global_exception + +Import as follows: +```python +from primitives import set_global_exception +``` +`set_global_exception` is a convenience funtion to enable a global exception +handler to simplify debugging. The function takes no args. It is called as +follows: + +```python +import asyncio +from primitives import set_global_exception + +async def main(): + set_global_exception() + # Main body of application code omitted + +try: + asyncio.run(main()) +finally: + asyncio.new_event_loop() # Clear retained state +``` +This is explained in the tutorial. In essence if an exception occurs in a task, +the default behaviour is for the task to stop but for the rest of the code to +continue to run. This means that the failure can be missed and the sequence of +events can be hard to deduce. A global handler ensures that the entire +application stops allowing the traceback and other debug prints to be studied. + +###### [Contents](./DRIVERS.md#0-contents) diff --git a/v3/docs/EVENTS.md b/v3/docs/EVENTS.md new file mode 100644 index 0000000..7776fab --- /dev/null +++ b/v3/docs/EVENTS.md @@ -0,0 +1,484 @@ +# Synopsis + +Using `Event` instances rather than callbacks in `asyncio` device drivers can +simplify their design and standardise their APIs. It can also simplify +application logic. + +This document assumes familiarity with `asyncio`. See [official docs](http://docs.micropython.org/en/latest/library/asyncio.html) and +[unofficial tutorial](https://github.com/peterhinch/micropython-async/blob/master/v3/docs/TUTORIAL.md). + +# 0. Contents + + 1. [An alternative to callbacks in asyncio code](./EVENTS.md#1-an-alternative-to-callbacks-in-asyncio-code) + 2. [Rationale](./EVENTS.md#2-rationale) + 3. [Device driver design](./EVENTS.md#3-device-driver-design) + 4. [Primitives](./EVENTS.md#4-primitives) Facilitating Event-based application logic + 4.1 [WaitAny](./EVENTS.md#41-waitany) Wait on any of a group of event-like objects + 4.2 [WaitAll](./EVENTS.md#42-waitall) Wait on all of a group of event-like objects + 4.3 [Nesting](./EVENTS.md#43-nesting) + 5. [Event based programming](./EVENTS.md#5-event-based-programming) + 5.1 [Use of Delay_ms](./EVENTS.md#51-use-of-delay_ms) A retriggerable delay + 5.2 [Long and very long button press](./EVENTS.md#52-long-and-very-long-button-press) + 5.3 [Application example](./EVENTS.md#53-application-example) + 6. [ELO class](./EVENTS.md#6-elo-class) Convert a coroutine or task to an event-like object. + 7. [Drivers](./EVENTS.md#7-drivers) Minimal Event-based drivers + 7.1 [ESwitch](./EVENTS.md#71-eswitch) Debounced switch + 7.2 [EButton](./EVENTS.md#72-ebutton) Debounced pushbutton with double and long press events + +[Appendix 1 Polling](./EVENTS.md#100-appendix-1-polling) + +# 1. An alternative to callbacks in asyncio code + +Callbacks have two merits. They are familiar, and they enable an interface +which allows an asynchronous application to be accessed by synchronous code. +GUI frameworks such as [micro-gui][1m] form a classic example: the callback +interface may be accessed by synchronous or asynchronous code. + +For the programmer of asynchronous applications, callbacks are largely +unnecessary and their use can lead to bugs. + +The idiomatic way to write an asynchronous function that responds to external +events is one where the function pauses while waiting on the event: +```python +async def handle_messages(input_stream): + while True: + msg = await input_stream.readline() + await handle_data(msg) +``` +Callbacks are not a natural fit in this model. Viewing the declaration of a +synchronous function, it is not evident how the function gets called or in what +context the code runs. Is it an ISR? Is it called from another thread or core? +Or is it a callback running in a `asyncio` context? You cannot tell without +trawling the code. By contrast, a routine such as the above example is a self +contained process whose context and intended behaviour are evident. + +The following steps can facilitate the use of asynchronous functions: + 1. Design device drivers to expose one or more bound `Event` objects. + Alternatively design the driver interface to be that of an `Event`. + 2. Design program logic to operate on objects with an `Event` interface. + +The first simplifies the design of drivers and standardises their interface. +Users only need to know the names of the bound `Event` instances. By contast +there is no standard way to specify callbacks, to define the passing of +callback arguments or to define how to retrieve their return values. + +There are other ways to define an API without callbacks, notably the stream +mechanism and the use of asynchronous iterators with `async for`. This doc +discusses the `Event` based approach which is ideal for sporadic occurrences +such as responding to user input. + +###### [Contents](./EVENTS.md#0-contents) + +# 2. Rationale + +Consider a device driver `Sensor` which has a bound `Event` object `.ready`. +An application might run a task of form: +```python +async def process_sensor(): + while True: + await sensor.ready.wait() + sensor.ready.clear() + # Read and process sensor data +``` +Note that the action taken might be to run a callback or to launch a task: +```python +async def process_sensor(): + while True: + await sensor.ready.wait() + sensor.ready.clear() + result = callback(args) + asyncio.create_task(sensor_coro(args)) +``` +An `Event` interface allows callback-based code and makes straightforward the +passing of arguments and retrieval of return values. However it also enables a +progrmming style that largely eliminates callbacks. Note that all you need to +know to access this driver interface is the name of the bound `Event`. + +This doc aims to demostrate that the event based approach can simplify +application logic by eliminating the need for callbacks. + +The design of `asyncio` V3 and its `Event` class enables this approach +because: + 1. A task waiting on an `Event` is put on a queue where it consumes no CPU + cycles until the event is triggered. + 2. The design of `asyncio` can support large numbers of tasks (hundreds) on + a typical microcontroller. Proliferation of tasks is not a problem, especially + where they are small and spend most of the time paused waiting on queues. + +This contrasts with other schedulers (such as `asyncio` V2) where there was no +built-in `Event` class; typical `Event` implementations used +[polling](./EVENTS.md#100-appendix-1-polling) and were convenience objects +rather than performance solutions. + +The `Event` class `.clear` method provides additional flexibility relative to +callbacks: + 1. An `Event` can be cleared immediately after being set; if multiple tasks + are waiting on `.wait()`, all will resume running. + 2. Alternatively the `Event` may be cleared later. The timing of clearing the + `Event` determines its behaviour if, at the time when the `Event` is set, a + task with an `await event.wait()` statement has not yet reached it. If + execution reaches `.wait()` before the `Event` is cleared, it will not pause. + If the `Event` is cleared, it will pause until it is set again. + +###### [Contents](./EVENTS.md#0-contents) + +# 3. Device driver design + +This document introduces the idea of an event-like object (ELO). This is an +object which may be used in place of an `Event` in program code. An ELO must +expose a `.wait` asynchronous method which will pause until an event occurs. +Additionally it can include `.clear` and/or `.set`. A device driver may become +an ELO by implementing `.wait` or by subclassing `Event` or `ThreadSafeFlag`. +Alternatively a driver may expose one or more bound `Event` or ELO instances. + +ELO examples are: + +| Object | wait | clear | set | comments | +|:---------------------|:----:|:-----:|:---:|:------------------| +| [Event][4m] | Y | Y | Y | | +| [ThreadSafeFlag][3m] | Y | N | Y | Self-clearing | +| [Message][7m] | Y | Y | Y | Subclass of above | +| [Delay_ms][2m] | Y | Y | Y | Self-setting | +| [WaitAll](./EVENTS.md#42-waitall) | Y | Y | N | See below | +| [WaitAny](./EVENTS.md#41-waitany) | Y | Y | N | | +| [ELO instances](./EVENTS.md#44-elo-class) | Y | N | N | | + +The `ELO` class converts coroutines or `Task` instances to event-like objects, +allowing them to be included in the arguments of event based primitives. + +Drivers exposing `Event` instances include: + + * [ESwitch](./EVENTS.md#61-eswitch) Micro debounced interface to a switch. + * [EButton](./EVENTS.md#62-ebutton) Micro debounced interface to a pushbutton. + * [Switch][5m] Similar but interfaces also expose callbacks. + * [Pushbutton][6m] + +###### [Contents](./EVENTS.md#0-contents) + +# 4. Primitives + +Applying `Events` to typical logic problems requires two new primitives: +`WaitAny` and `WaitAll`. Each is an ELO. These primitives may be cancelled or +subject to a timeout with `asyncio.wait_for()`, although judicious use of +`Delay_ms` offers greater flexibility than `wait_for`. + +## 4.1 WaitAny + +The constructor takes an iterable of ELO's. Its `.wait` method pauses until the +first of the ELO's is set; the method returns the object that triggered it, +enabling the application code to determine the reason for its triggering. + +The last ELO to trigger a `WaitAny` instance may also be retrieved by issuing +the instance's `.event()` method. +```python +from primitives import WaitAny +async def foo(elo1, elo2): + evt = await WaitAny((elo1, elo2)).wait() + if evt is elo1: + # Handle elo1 +``` +`WaitAny` has a `clear` method which issues `.clear()` to all passed ELO's with +a `.clear` method. + +## 4.2 WaitAll + +The constructor takes an iterable of ELO's. Its `.wait` method pauses until all +of the ELO's is set. + +`WaitAll` has a `clear` method which issues `.clear()` to all passed ELO's with +a `.clear` method. + +## 4.3 Nesting + +The fact that these primitives are ELO's enables nesting: +```Python +await WaitAll((event1, event2, WaitAny(event3, event4))).wait() +``` +This will pause until `event1` and `event2` and either `event3`or `event4` have +been set. + +###### [Contents](./EVENTS.md#0-contents) + +# 5. Event based programming + +## 5.1 Use of Delay_ms + +The [Delay_ms class](https://github.com/peterhinch/micropython-async/blob/master/v3/docs/TUTORIAL.md#38-delay_ms-class) +is an ELO and can be used as an alternative to `asyncio.wait_for`: it has the +advantage that it can be retriggered. It can also be stopped or its duration +changed dynamically. In the following sample `task_a` waits on an `Event` but +it also aborts if `task_b` stops running for any reason: +```python +from primitives import Delay_ms, WaitAny +delay = Delay_ms(duration=1000) +async def task_b(): + while True: + delay.trigger() # Keep task_a alive + # do some work + await asyncio.sleep_ms(0) + +async def task_a(evt): # Called with an event to wait on + while True: + cause = await WaitAny((evt, delay)).wait() + if cause is delay: # task_b has ended + delay.clear() # Clear the Event + return # Abandon the task + # Event has occurred + evt.clear() + # Do some work + await asyncio.sleep_ms(0) +``` +## 5.2 Long and very long button press + +A user had a need to distinguish short, fairly long, and very long presses of a +pushbutton. There was no requirement to detect double clicks, so the minimal +`ESwitch` driver was used. + +This solution does not attempt to disambiguate the press events: if a very long +press occurs, the short press code will run, followed by the "fairly long" +code, and then much later by the "very long" code. Disambiguating implies first +waiting for button release and then determining which application code to run: +in the application this delay was unacceptable. +```python +async def main(): + btn = ESwitch(Pin('X17', Pin.IN, Pin.PULL_UP), lopen=0) + ntim = Delay_ms(duration = 1000) # Fairly long press + ltim = Delay_ms(duration = 8000) # Very long press + while True: + ltim.stop() # Stop any running timers and clear their event + ntim.stop() + await btn.close.wait() + btn.close.clear() + ntim.trigger() # Button pressed, start timers, await release + ltim.trigger() # Run any press code + ev = await WaitAny((btn.open, ntim)).wait() + if ev is btn.open: + # Run "short press" application code + else: # ev is ntim: Fairly long timer timed out + # Run "fairly long" application code + # then check for very long press + ev = await WaitAny((btn.open, ltim)).wait() + if ev is ltim: # Long timer timed out + # Run "very long" application code + # We have not cleared the .open Event, so if the switch is already open + # there will be no delay below. Otherwise we await realease. + # Must await release otherwise the event is cleared before release + # occurs, setting the release event before the next press event. + await btn.open.wait() + btn.open.clear() +``` +Disambiguated version. Wait for button release and decide what to do based on +which timers are still running: +```python +async def main(): + btn = ESwitch(Pin('X17', Pin.IN, Pin.PULL_UP), lopen=0) + ntim = Delay_ms(duration=1000) # Fairly long press + ltim = Delay_ms(duration=8000) # Very long press + while True: + ltim.stop() # Stop any running timers and clear their event + ntim.stop() + await btn.close.wait() + btn.close.clear() + ntim.trigger() # Button pressed, start timers, await release + ltim.trigger() # Run any press code + await btn.open.wait() + btn.open.clear() + # Button released: check for any running timers + if not ltim(): # Very long press timer timed out before button was released + # Run "Very long" code + elif not ntim(): + # Run "Fairly long" code + else: + # Both timers running: run "short press" code +``` + +###### [Contents](./EVENTS.md#0-contents) + +## 5.3 Application example + +A measuring instrument is started by pressing a button. The measurement +normally runs for five seconds. If the sensor does not detect anything, the +test runs until it does, however it is abandoned if nothing has been detected +after a minute. While running, extra button presses are ignored. During a +normal five second run, extra detections from the sensor are ignored. + +This can readily be coded using callbacks and synchronous or asynchronous code, +however the outcome is likely to have a fair amount of _ad hoc_ logic. + +This event based solution is arguably clearer to read: +```python +from primitives import EButton, WaitAll, Delay_ms +btn = EButton(args) # Has Events for press, release, double, long +bp = btn.press +sn = Sensor(args) # Assumed to have an Event interface. +tm = Delay_ms(duration=5_000) # Exposes .wait and .clear only. +events = (sn, tm) +async def foo(): + while True: + bp.clear() # Ignore prior button press + await bp.wait() # Button pressed + events.clear() # Ignore events that were set prior to this moment + tm.trigger() # Start 5 second timer + try: + await asyncio.wait_for(WaitAll(events).wait(), 60) + except asyncio.TimeoutError: + print("No reading from sensor") + else: + # Normal outcome, process readings +``` +###### [Contents](./EVENTS.md#0-contents) + +# 6. ELO class + +This converts a task to an "event-like object", enabling tasks to be included in +`WaitAll` and `WaitAny` arguments. An `ELO` instance is a wrapper for a `Task` +instance and its lifetime is that of its `Task`. The constructor can take a +coroutine or a task as its first argument; in the former case the coro is +converted to a `Task`. + +#### Constructor args + +1. `coro` This may be a coroutine or a `Task` instance. +2. `*args` Positional args for a coroutine (ignored if a `Task` is passed). +3. `**kwargs` Keyword args for a coroutine (ignored if a `Task` is passed). + +If a coro is passed it is immediately converted to a `Task` and scheduled for +execution. + +#### Asynchronous method + +1. `wait` Pauses until the `Task` is complete or is cancelled. In the latter +case no exception is thrown. + +#### Synchronous method + +1. `__call__` Returns the instance's `Task`. If the instance's `Task` was +cancelled the `CancelledError` exception is returned. The function call operator +allows a running task to be accessed, e.g. for cancellation. It also enables return values to be +retrieved. + +#### Usage example + +In most use cases an `ELO` instance is a throw-away object which allows a coro +to participate in an event-based primitive: +```python +evt = asyncio.Event() +async def my_coro(t): + await asyncio.wait(t) + +async def foo(): # Puase until the event has been triggered and coro has completed + await WaitAll((evt, ELO(my_coro, 5))).wait() # Note argument passing +``` +#### Retrieving results + +A task may return a result on completion. This may be accessed by awaiting the +`ELO` instance's `Task`. A reference to the `Task` may be acquired with function +call syntax. The following code fragment illustrates usage. It assumes that +`task` has already been created, and that `my_coro` is a coroutine taking an +integer arg. There is an `EButton` instance `ebutton` and execution pauses until +tasks have run to completion and the button has been pressed. +```python +async def foo(): + elos = (ELO(my_coro, 5), ELO(task)) + events = (ebutton.press,) + await WaitAll(elos + events).wait() + for e in elos: # Retrieve results from each task + r = await e() # Works even though task has already completed + print(r) +``` +This works because it is valid to `await` a task which has already completed. +The `await` returns immediately with the result. If `WaitAny` were used an `ELO` +instance might contain a running task. In this case the line +```python +r = await e() +``` +would pause before returning the result. + +#### Cancellation + +The `Task` in `ELO` instance `elo` may be retrieved by issuing `elo()`. For +example the following will subject an `ELO` instance to a timeout: +```python +async def elo_timeout(elo, t): + await asyncio.sleep(t) + elo().cancel() # Retrieve the Task and cancel it + +async def foo(): + elo = ELO(my_coro, 5) + asyncio.create_task(elo_timeout(2)) + await WaitAll((elo, ebutton.press)).wait() # Until button press and ELO either finished or timed out +``` +If the `ELO` task is cancelled, `.wait` terminates; the exception is retained. +Thus `WaitAll` or `WaitAny` behaves as if the task had terminated normally. A +subsequent call to `elo()` will return the exception. In an application +where the task might return a result or be cancelled, the following may be used: +```python +async def foo(): + elos = (ELO(my_coro, 5), ELO(task)) + events = (ebutton.press,) + await WaitAll(elos + events).wait() + for e in elos: # Check each task + t = e() + if isinstance(t, asyncio.CancelledError): + # Handle exception + else: # Retrieve results + r = await t # Works even though task has already completed + print(r) +``` + +###### [Contents](./EVENTS.md#0-contents) + +# 7. Drivers + +The following device drivers provide an `Event` based interface for switches and +pushbuttons. + +## 7.1 ESwitch + +This is now documented [here](./DRIVERS.md#31-eswitch-class). + +## 7.2 EButton + +This is now documented [here](./DRIVERS.md#41-ebutton-class). + +Documentation for `Keyboard`, `SwArray` and `RingbufQueue` has also moved to +[primtives](./DRIVERS.md). + +###### [Contents](./EVENTS.md#0-contents) + +# 100 Appendix 1 Polling + +The primitives or drivers referenced here do not use polling with the following +exceptions: + 1. Switch and pushbutton drivers. These poll the `Pin` instance for electrical + reasons described below. + 2. `ThreadSafeFlag` and subclass `Message`: these use the stream mechanism. + +Other drivers and primitives are designed such that paused tasks are waiting on +queues and are therefore using no CPU cycles. + +[This reference][1e] states that bouncing contacts can assume invalid logic +levels for a period. It is a reasonable assumption that `Pin.value()` always +returns 0 or 1: the drivers are designed to cope with any sequence of such +readings. By contrast, the behaviour of IRQ's under such conditions may be +abnormal. It would be hard to prove that IRQ's could never be missed, across +all platforms and input conditions. + +Pin polling aims to use minimal resources, the main overhead being `asyncio`'s +task switching overhead: typically about 250 μs. The default polling interval +is 50 ms giving an overhead of ~0.5%. + + +[1m]: https://github.com/peterhinch/micropython-micro-gui +[2m]: https://github.com/peterhinch/micropython-async/blob/master/v3/docs/TUTORIAL.md#38-delay_ms-class + +[3m]: https://github.com/peterhinch/micropython-async/blob/master/v3/docs/TUTORIAL.md#36-threadsafeflag +[4m]: https://github.com/peterhinch/micropython-async/blob/master/v3/docs/TUTORIAL.md#32-event +[5m]: https://github.com/peterhinch/micropython-async/blob/master/v3/docs/DRIVERS.md#31-switch-class +[6m]: https://github.com/peterhinch/micropython-async/blob/master/v3/docs/DRIVERS.md#41-pushbutton-class +[7m]: https://github.com/peterhinch/micropython-async/blob/master/v3/docs/TUTORIAL.md#39-message + +[1r]: http://docs.micropython.org/en/latest/library/machine.UART.html#machine.UART.read +[2r]: https://github.com/micropython/micropython-lib/blob/ad9309b669cd4474bcd4bc0a67a630173222dbec/micropython/umqtt.simple/umqtt/simple.py + +[1e]: http://www.ganssle.com/debouncing.htm diff --git a/v3/docs/GPS.md b/v3/docs/GPS.md new file mode 100644 index 0000000..0ac84c6 --- /dev/null +++ b/v3/docs/GPS.md @@ -0,0 +1,1109 @@ +# An asynchronous GPS receiver + +This repository offers a suite of asynchronous device drivers for GPS devices +which communicate with the host via a UART. GPS [NMEA-0183] sentence parsing is +based on this excellent library [micropyGPS]. + +The code requires asyncio V3. Some modules can run under CPython: doing so +requires Python V3.8 or later. + +The main modules have been tested on Pyboards and RP2 (Pico and Pico W). Since +the interface is a standard UART it is expected that the modules will work on +other hosts. Some modules use GPS for precision timing: the accuracy of these +may be reduced on some platforms. + +###### [Tutorial](./TUTORIAL.md#contents) +###### [Main V3 README](../README.md) + +# 1. Contents + + 1. [Contents](./GPS.md#1-contents) + 1.1 [Driver characteristics](./GPS.md#11-driver-characteristics) + 1.2 [Comparison with micropyGPS](./GPS.md#12-comparison-with-micropygps) + 1.3 [Overview](./GPS.md#13-overview) + 2. [Installation](./GPS.md#2-installation) + 2.1 [Wiring](./GPS.md#21-wiring) + 2.2 [Library installation](GPS.md#22-library-installation) + 2.3 [Dependency](./GPS.md#23-dependency) + 3. [Basic Usage](./GPS.md-basic-usage) + 3.1 [Demo programs](./GPS.md#31-demo-programs) + 4. [The AS_GPS Class read-only driver](./GPS.md#4-the-AS_GPS-class-read-only-driver) Base class: a general purpose driver. + 4.1 [Constructor](./GPS.md#41-constructor) +      4.1.1 [The fix callback](./GPS.md#411-the-fix-callback) Optional callback-based interface. + 4.2 [Public Methods](./GPS.md#42-public-methods) +      4.2.1 [Location](./GPS.md#412-location) +      4.2.2 [Course](./GPS.md#422-course) +      4.2.3 [Time and date](./GPS.md#423-time-and-date) + 4.3 [Public coroutines](./GPS.md#43-public-coroutines) +      4.3.1 [Data validity](./GPS.md#431-data-validity) +      4.3.2 [Satellite data](./GPS.md#432-satellite-data) + 4.4 [Public bound variables and properties](./GPS.md#44-public-bound-variables-and-properties) +      4.4.1 [Position and course](./GPS.md#441-position-and-course) +      4.4.2 [Statistics and status](./GPS.md#442-statistics-and-status) +      4.4.3 [Date and time](./GPS.md#443-date-and-time) +      4.4.4 [Satellite data](./GPS.md#444-satellite-data) + 4.5 [Subclass hooks](./GPS.md#45-subclass-hooks) + 4.6 [Public class variable](./GPS.md#46-public-class-variable) + 5. [The GPS class read-write driver](./GPS.md#5-the-gps-class-read-write-driver) Subclass supports changing GPS hardware modes. + 5.1 [Test script](./GPS.md#51-test-script) + 5.2 [Usage example](./GPS.md#52-usage-example) + 5.3 [The GPS class constructor](./GPS.md#53-the-gps-class-constructor) + 5.4 [Public coroutines](./GPS.md#54-public-coroutines) +      5.4.1 [Changing baudrate](./GPS.md#5-changing-baudrate) + 5.5 [Public bound variables](./GPS.md#55-public-bound-variables) + 5.6 [The parse method developer note](./GPS.md#56-the-parse-method-developer-note) + 6. [Using GPS for accurate timing](./GPS.md#6-using-gps-for-accurate-timing) + 6.1 [Test scripts](./GPS.md#61-test-scripts) + 6.2 [Usage example](./GPS.md#62-usage-example) + 6.3 [GPS_Timer and GPS_RWTimer classes](./GPS.md#63-gps_timer-and-gps_rwtimer-classes) + 6.4 [Public methods](./GPS.md#64-public-methods) + 6.5 [Public coroutines](./GPS.md#65-public-coroutines) + 6.6 [Absolute accuracy](./GPS.md#66-absolute-accuracy) + 6.7 [Demo program as_GPS_time.py](./GPS.md#67-demo-program-as_gps_time) + 7. [Supported sentences](./GPS.md#7-supported-sentences) + 8. [Developer notes](./GPS.md#8-developer-notes) For those wanting to modify the modules. + 8.1 [Subclassing](./GPS.md#81-subclassing) + 8.2 [Special test programs](./GPS.md#82-special-test-programs) + 9. [Notes on timing](./GPS.md#9-notes-on-timing) + 9.1 [Absolute accuracy](./GPS.md#91-absolute-accuracy) + 10. [Files](./GPS.md#10-files) List of files in the repo. + 10.1 [Basic files](./GPS.md#101-basic-files) + 10.2 [Files for read write operation](./GPS.md#102-files-for-read-write-operation) + 10.3 [Files for timing applications](./GPS.md#103-files-for-timing-applications) + 10.4 [Special test programs](./GPS.md#104-special-test-programs) + +## 1.1 Driver characteristics + + * Asynchronous: UART messaging is handled as a background task allowing the + application to perform other tasks such as handling user interaction. + * The read-only driver is suitable for resource constrained devices and will + work with most GPS devices using a UART for communication. + * Can write `.kml` files for displaying journeys on Google Earth. + * The read-write driver enables altering the configuration of GPS devices + based on the popular MTK3329/MTK3339 chips. + * The above drivers are portable between [MicroPython] and Python 3.8 or above. + * Timing drivers for [MicroPython] only extend the capabilities of the + read-only and read-write drivers to provide accurate sub-ms GPS timing. On + STM-based hosts (e.g. the Pyboard) the RTC may be set from GPS and calibrated + to achieve timepiece-level accuracy. + * Drivers may be extended via subclassing, for example to support additional + sentence types. + +Testing was performed using a [Pyboard] with the Adafruit +[Ultimate GPS Breakout] board. Most GPS devices will work with the read-only +driver as they emit [NMEA-0183] sentences on startup. + +## 1.2 Comparison with micropyGPS + +[NMEA-0183] sentence parsing is based on [micropyGPS] but with significant +changes. + + * As asynchronous drivers they require `asyncio` on [MicroPython] or under + Python 3.8+. + * Sentence parsing is adapted for asynchronous use. + * Rollover of local time into the date value enables worldwide use. + * RAM allocation is cut by various techniques to lessen heap fragmentation. + This improves application reliability on RAM constrained devices. + * Some functionality is devolved to a utility module, reducing RAM usage where + these functions are unused. + * The read/write driver is a subclass of the read-only driver. + * Timing drivers are added offering time measurement with μs resolution and + high absolute accuracy. These are implemented by subclassing these drivers. + * Hooks are provided for user-designed subclassing, for example to parse + additional message types. + +## 1.3 Overview + +The `AS_GPS` object runs a coroutine which receives [NMEA-0183] sentences from +the UART and parses them as they arrive. Valid sentences cause local bound +variables to be updated. These can be accessed at any time with minimal latency +to access data such as position, altitude, course, speed, time and date. + +##### [Contents](./GPS.md#1-contents) + +# 2 Installation + +## 2.1 Wiring + +These notes are for the Adafruit Ultimate GPS Breakout. It may be run from 3.3V +or 5V. If running the Pyboard from USB, GPS Vin may be wired to Pyboard V+. If +the Pyboard is run from a voltage >5V the Pyboard 3V3 pin should be used. +Testing on Pico and Pico W used the 3.3V output to power the GPS module. + +| GPS | Pyboard | RP2 | Optional | Use case | +|:----|:-----------|:----|:--------:|:--------------------------------| +| Vin | V+ or 3V3 | 3V3 | | | +| Gnd | Gnd | Gnd | | | +| PPS | X3 | 2 | Y | Precision timing applications. | +| Tx | X2 (U4 rx) | 1 | | | +| Rx | X1 (U4 tx) | 0 | Y | Changing GPS module parameters. | + +Pyboard connections are based on UART 4 as used in the test programs; any UART +may be used. RP2 connections assume UART 0. + +The UART Tx-GPS Rx connection is only necessary if using the read/write driver. +The PPS connection is required only if using the timing driver `as_tGPS.py`. Any +pin may be used. + +On the Pyboard D the 3.3V output is switched. Enable it with the following +(typically in `main.py`): +```python +import time +machine.Pin.board.EN_3V3.value(1) +time.sleep(1) +``` + +## 2.2 Library installation + +The library is implemented as a Python package. To install copy the following +directory and its contents to the target hardware: + * `as_drivers/as_GPS` + +The following directory is required for certain Pyboard-specific test scripts: + * `threadsafe` + +See [section 10.3](./GPS.md#103-files-for-timing-applications). + +On platforms with an underlying OS such as the Raspberry Pi ensure that the +directory is on the Python path and that the Python version is 3.8 or later. +Code samples will need adaptation for the serial port. + +## 2.3 Dependency + +The library requires `asyncio` V3 on MicroPython and `asyncio` on CPython. + +##### [Contents](./GPS.md#1-contents) + +# 3 Basic Usage + +In the example below a UART is instantiated and an `AS_GPS` instance created. +A callback is specified which will run each time a valid fix is acquired. +The test runs for 60 seconds once data has been received. + +Pyboard: +```python +import asyncio +import as_drivers.as_GPS as as_GPS +from machine import UART +def callback(gps, *_): # Runs for each valid fix + print(gps.latitude(), gps.longitude(), gps.altitude) + +uart = UART(4, 9600) +sreader = asyncio.StreamReader(uart) # Create a StreamReader +gps = as_GPS.AS_GPS(sreader, fix_cb=callback) # Instantiate GPS + +async def test(): + print('waiting for GPS data') + await gps.data_received(position=True, altitude=True) + await asyncio.sleep(60) # Run for one minute + +asyncio.run(test()) +``` +RP2: +```python +import asyncio +import as_drivers.as_GPS as as_GPS +from machine import UART, Pin +def callback(gps, *_): # Runs for each valid fix + print(gps.latitude(), gps.longitude(), gps.altitude) + +uart = UART(0, 9600, tx=Pin(0), rx=Pin(1), timeout=5000, timeout_char=5000) +sreader = asyncio.StreamReader(uart) # Create a StreamReader +gps = as_GPS.AS_GPS(sreader, fix_cb=callback) # Instantiate GPS + +async def test(): + print('waiting for GPS data') + await gps.data_received(position=True, altitude=True) + await asyncio.sleep(60) # Run for one minute + +asyncio.run(test()) +``` + +This example achieves the same thing without using a callback: + +```python +import asyncio +import as_drivers.as_GPS as as_GPS +from machine import UART + +uart = UART(4, 9600) +sreader = asyncio.StreamReader(uart) # Create a StreamReader +gps = as_GPS.AS_GPS(sreader) # Instantiate GPS + +async def test(): + print('waiting for GPS data') + await gps.data_received(position=True, altitude=True) + for _ in range(10): + print(gps.latitude(), gps.longitude(), gps.altitude) + await asyncio.sleep(2) + +asyncio.run(test()) +``` + +## 3.1 Demo programs + +This assumes a Pyboard 1.x or Pyboard D with GPS connected to UART 4 and prints +received data: +```python +import as_drivers.as_gps.ast_pb +``` + +A simple demo which logs a route travelled to a .kml file which may be +displayed on Google Earth. Logging stops when the user switch is pressed. +Data is logged to `/sd/log.kml` at 10s intervals. +```python +import as_drivers.as_gps.log_kml +``` + +##### [Contents](./GPS.md#1-contents) + +# 4. The AS_GPS Class read-only driver + +Method calls and access to bound variables are nonblocking and return the most +current data. This is updated transparently by a coroutine. In situations where +updates cannot be achieved, for example in buildings or tunnels, values will be +out of date. The action to take (if any) is application dependent. + +Three mechanisms exist for responding to outages. + * Check the `time_since_fix` method [section 2.2.3](./GPS.md#423-time-and-date). + * Pass a `fix_cb` callback to the constructor (see below). + * Cause a coroutine to pause until an update is received: see + [section 4.3.1](./GPS.md#431-data-validity). This ensures current data. + +## 4.1 Constructor + +Mandatory positional arg: + * `sreader` This is a `StreamReader` instance associated with the UART. +Optional positional args: + * `local_offset` Local timezone offset in hours realtive to UTC (GMT). May be + an integer or float. + * `fix_cb` An optional callback. This runs after a valid message of a chosen + type has been received and processed. + * `cb_mask` A bitmask determining which sentences will trigger the callback. + Default `RMC`: the callback will occur on RMC messages only (see below). + * `fix_cb_args` A tuple of args for the callback (default `()`). + +Notes: +`local_offset` will alter the date when time passes the 00.00.00 boundary. +If `sreader` is `None` a special test mode is engaged (see `astests.py`). + +### 4.1.1 The fix callback + +This receives the following positional args: + 1. The GPS instance. + 2. An integer defining the message type which triggered the callback. + 3. Any args provided in `msg_cb_args`. + +Message types are defined by the following constants in `as_GPS.py`: `RMC`, +`GLL`, `VTG`, `GGA`, `GSA` and `GSV`. + +The `cb_mask` constructor argument may be the logical `or` of any of these +constants. In this example the callback will occur after successful processing +of RMC and VTG messages: + +```python +gps = as_GPS.AS_GPS(sreader, fix_cb=callback, cb_mask= as_GPS.RMC | as_GPS.VTG) +``` + +## 4.2 Public Methods + +These are grouped below by the type of data returned. + +### 4.2.1 Location + + * `latitude` Optional arg `coord_format=as_GPS.DD`. Returns the most recent + latitude. + If `coord_format` is `as_GPS.DM` returns a tuple `(degs, mins, hemi)`. + If `as_GPS.DD` is passed returns `(degs, hemi)` where degs is a float. + If `as_GPS.DMS` is passed returns `(degs, mins, secs, hemi)`. + `hemi` is 'N' or 'S'. + + * `longitude` Optional arg `coord_format=as_GPS.DD`. Returns the most recent + longitude. + If `coord_format` is `as_GPS.DM` returns a tuple `(degs, mins, hemi)`. + If `as_GPS.DD` is passed returns `(degs, hemi)` where degs is a float. + If `as_GPS.DMS` is passed returns `(degs, mins, secs, hemi)`. + `hemi` is 'E' or 'W'. + + * `latitude_string` Optional arg `coord_format=as_GPS.DM`. Returns the most + recent latitude in human-readable format. Formats are `as_GPS.DM`, + `as_GPS.DD`, `as_GPS.DMS` or `as_GPS.KML`. + If `coord_format` is `as_GPS.DM` it returns degrees, minutes and hemisphere + ('N' or 'S'). + `as_GPS.DD` returns degrees and hemisphere. + `as_GPS.DMS` returns degrees, minutes, seconds and hemisphere. + `as_GPS.KML` returns decimal degrees, +ve in northern hemisphere and -ve in + southern, intended for logging to Google Earth compatible kml files. + + * `longitude_string` Optional arg `coord_format=as_GPS.DM`. Returns the most + recent longitude in human-readable format. Formats are `as_GPS.DM`, + `as_GPS.DD`, `as_GPS.DMS` or `as_GPS.KML`. + If `coord_format` is `as_GPS.DM` it returns degrees, minutes and hemisphere + ('E' or 'W'). + `as_GPS.DD` returns degrees and hemisphere. + `as_GPS.DMS` returns degrees, minutes, seconds and hemisphere. + `as_GPS.KML` returns decimal degrees, +ve in eastern hemisphere and -ve in + western, intended for logging to Google Earth compatible kml files. + +### 4.2.2 Course + + * `speed` Optional arg `unit=as_GPS.KPH`. Returns the current speed in the + specified units. Options: `as_GPS.KPH`, `as_GPS.MPH`, `as_GPS.KNOT`. + + * `speed_string` Optional arg `unit=as_GPS.KPH`. Returns the current speed in + the specified units. Options `as_GPS.KPH`, `as_GPS.MPH`, `as_GPS.KNOT`. + + * `compass_direction` No args. Returns current course as a string e.g. 'ESE' + or 'NW'. Note that this requires the file `as_GPS_utils.py`. + +### 4.2.3 Time and date + + * `time_since_fix` No args. Returns time in milliseconds since last valid fix. + + * `time_string` Optional arg `local=True`. Returns the current time in form + 'hh:mm:ss.sss'. If `local` is `False` returns UTC time. + + * `date_string` Optional arg `formatting=MDY`. Returns the date as + a string. Formatting options: + `as_GPS.MDY` returns 'MM/DD/YY'. + `as_GPS.DMY` returns 'DD/MM/YY'. + `as_GPS.LONG` returns a string of form 'January 1st, 2014'. + Note that this requires the file `as_GPS_utils.py`. + +##### [Contents](./GPS.md#1-contents) + +## 4.3 Public coroutines + +### 4.3.1 Data validity + +On startup after a cold start it may take time before valid data is received. +During and shortly after an outage messages will be absent. To avoid reading +stale data, reception of messages can be checked before accessing data. + + * `data_received` Boolean args: `position`, `course`, `date`, `altitude`. + All default `False`. The coroutine will pause until at least one valid message + of each specified types has been received. This example will pause until new + position and altitude messages have been received: + +```python +while True: + await my_gps.data_received(position=True, altitude=True) + # Access these data values now +``` + +No option is provided for satellite data: this functionality is provided by the +`get_satellite_data` coroutine. + +### 4.3.2 Satellite Data + +Satellite data requires multiple sentences from the GPS and therefore requires +a coroutine which will pause execution until a complete set of data has been +acquired. + + * `get_satellite_data` No args. Waits for a set of GSV (satellites in view) + sentences and returns a dictionary. Typical usage in a user coroutine: + +```python + d = await my_gps.get_satellite_data() + print(d.keys()) # List of satellite PRNs + print(d.values()) # [(elev, az, snr), (elev, az, snr)...] +``` + +Dictionary values are (elevation, azimuth, snr) where elevation and azimuth are +in degrees and snr (a measure of signal strength) is in dB in range 0-99. +Higher is better. + +Note that if the GPS module does not support producing GSV sentences this +coroutine will pause forever. It can also pause for arbitrary periods if +satellite reception is blocked, such as in a building. + +## 4.4 Public bound variables and properties + +These are updated whenever a sentence of the relevant type has been correctly +received from the GPS unit. For crucial navigation data the `time_since_fix` +method may be used to determine how current these values are. + +The sentence type which updates a value is shown in brackets e.g. (GGA). + +### 4.4.1 Position and course + + * `course` Track angle in degrees. (VTG). + * `altitude` Metres above mean sea level. (GGA). + * `geoid_height` Height of geoid (mean sea level) in metres above WGS84 + ellipsoid. (GGA). + * `magvar` Magnetic variation. Degrees. -ve == West. Current firmware does not + produce this data: it will always read zero. + +### 4.4.2 Statistics and status + +The following are counts since instantiation. + * `crc_fails` Usually 0 but can occur on baudrate change. + * `clean_sentences` Number of sentences received without major failures. + * `parsed_sentences` Sentences successfully parsed. + * `unsupported_sentences` This is incremented if a sentence is received which + has a valid format and checksum, but is not supported by the class. This + value will also increment if these are supported in a subclass. See + [section 8](./GPS.md#8-developer-notes). + +### 4.4.3 Date and time + + * `utc` (property) [hrs: int, mins: int, secs: int] UTC time e.g. + [23, 3, 58]. Note the integer seconds value. The MTK3339 chip provides a float + but its value is always an integer. To achieve accurate subsecond timing see + [section 6](./GPS.md#6-using-gps-for-accurate-timing). + * `local_time` (property) [hrs: int, mins: int, secs: int] Local time. + * `date` (property) [day: int, month: int, year: int] e.g. [23, 3, 18] + * `local_offset` Local time offset in hrs as specified to constructor. + * `epoch_time` Integer. Time in seconds since the epoch. Epoch start depends + on whether running under MicroPython (Y2K) or Python 3.8+ (1970 on Unix). + +The `utc`, `date` and `local_time` properties updates on receipt of RMC +messages. If a nonzero `local_offset` value is specified the `date` value will +update when local time passes midnight (local time and date are computed from +`epoch_time`). + +### 4.4.4 Satellite data + + * `satellites_in_view` No. of satellites in view. (GSV). + * `satellites_in_use` No. of satellites in use. (GGA). + * `satellites_used` List of satellite PRN's. (GSA). + * `pdop` Dilution of precision (GSA). + * `hdop` Horizontal dilution of precsion (GSA). + * `vdop` Vertical dilution of precision (GSA). + +Dilution of Precision (DOP) values close to 1.0 indicate excellent quality +position data. Increasing values indicate decreasing precision. + +## 4.5 Subclass hooks + +The following public methods are null. They are intended for optional +overriding in subclasses. Or monkey patching if you like that sort of thing. + + * `reparse` Called after a supported sentence has been parsed. + * `parse` Called when an unsupported sentence has been received. + +If the received string is invalid (e.g. bad character or incorrect checksum) +these will not be called. + +Both receive as arguments a list of strings, each being a segment of the comma +separated sentence. The '$' character in the first arg and the '*' character +and subsequent characters are stripped from the last. Thus if the string +`b'$GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,*47\r\n'` +was received `reparse` would see +`['GPGGA','123519','4807.038','N','01131.000','E','1','08','0.9','545.4','M','46.9','M','','']` + +## 4.6 Public class variable + + * `FULL_CHECK` Default `True`. If set `False` disables CRC checking and other + basic checks on received sentences. If GPS is linked directly to the target + (rather than via long cables) these checks are arguably not neccessary. + +##### [Contents](./GPS.md#1-contents) + +# 5. The GPS class read-write driver + +This is a subclass of `AS_GPS` and supports all its public methods, coroutines +and bound variables. It provides support for sending PMTK command packets to +GPS modules based on the MTK3329/MTK3339 chip. These include: + + * Adafruit Ultimate GPS Breakout + * Digilent PmodGPS + * Sparkfun GPS Receiver LS20031 + * 43oh MTK3339 GPS Launchpad Boosterpack + +A subset of the PMTK packet types is supported but this may be extended by +subclassing. + +## 5.1 Test script + +This assumes a Pyboard 1.x or Pyboard D with GPS on UART 4. To run issue: +```python +import as_drivers.as_gps.ast_pbrw +``` + +The test script will pause until a fix has been achieved. After that changes +are made for about 1 minute reporting data at the REPL and on the LEDs. On +completion (or `ctrl-c`) a factory reset is performed to restore the default +baudrate. + +LED's: + * Red: Toggles each time a GPS update occurs. + * Green: ON if GPS data is being received, OFF if no data received for >10s. + * Yellow: Toggles each 4s if navigation updates are being received. + +## 5.2 Usage example + +This reduces to 2s the interval at which the GPS sends messages: + +```python +import asyncio +from as_drivers.as_GPS.as_rwGPS import GPS +# Pyboard +#from machine import UART +#uart = UART(4, 9600) +# RP2 +from machine import UART, Pin +uart = UART(0, 9600, tx=Pin(0), rx=Pin(1), timeout=5000, timeout_char=5000) +# +sreader = asyncio.StreamReader(uart) # Create a StreamReader +swriter = asyncio.StreamWriter(uart, {}) +gps = GPS(sreader, swriter) # Instantiate GPS + +async def test(): + print('waiting for GPS data') + await gps.data_received(position=True, altitude=True) + await gps.update_interval(2000) # Reduce message rate + for _ in range(10): + print(gps.latitude(), gps.longitude(), gps.altitude) + await asyncio.sleep(2) + +asyncio.run(test()) +``` +##### [Contents](./GPS.md#1-contents) + +## 5.3 GPS class Constructor + +This takes two mandatory positional args: + * `sreader` This is a `StreamReader` instance associated with the UART. + * `swriter` This is a `StreamWriter` instance associated with the UART. + +Optional positional args: + * `local_offset` Local timezone offset in hours realtive to UTC (GMT). + * `fix_cb` An optional callback which runs each time a valid fix is received. + * `cb_mask` A bitmask determining which sentences will trigger the callback. + Default `RMC`: the callback will occur on RMC messages only (see below). + * `fix_cb_args` A tuple of args for the callback. + * `msg_cb` Optional callback. This will run if any handled message is received + and also for unhandled `PMTK` messages. + * `msg_cb_args` A tuple of args for the above callback. + +If implemented the message callback will receive the following positional args: + 1. The GPS instance. + 2. A list of text strings from the message. + 3. Any args provided in `msg_cb_args`. + +In the case of handled messages the list of text strings has length 2. The +first is 'version', 'enabled' or 'antenna' followed by the value of the +relevant bound variable e.g. `['antenna', 3]`. + +For unhandled messages text strings are as received, processed as per +[section 4.5](./GPS.md#45-subclass-hooks). + +The args presented to the fix callback are as described in +[section 4.1](./GPS.md#41-constructor). + +## 5.4 Public coroutines + + * `baudrate` Arg: baudrate. Must be 4800, 9600, 14400, 19200, 38400, 57600 + or 115200. See below. + * `update_interval` Arg: interval in ms. Default 1000. Must be between 100 + and 10000. If the rate is to be increased see + [notes on timing](GPS.md#9-notes-on-timing). + * `enable` Determine the frequency with which each sentence type is sent. A + value of 0 disables a sentence, a value of 1 causes it to be sent with each + received position fix. A value of N causes it to be sent once every N fixes. + It takes 7 keyword-only integer args, one for each supported sentence. These, + with default values, are: + `gll=0`, `rmc=1`, `vtg=1`, `gga=1`, `gsa=1`, `gsv=5`, `chan=0`. The last + represents GPS channel status. These values are the factory defaults. + * `command` Arg: a command from the following set: + * `as_rwGPS.HOT_START` Use all available data in the chip's NV Store. + * `as_rwGPS.WARM_START` Don't use Ephemeris at re-start. + * `as_rwGPS.COLD_START` Don't use Time, Position, Almanacs and Ephemeris data + at re-start. + * `as_rwGPS.FULL_COLD_START` A 'cold_start', but additionally clear + system/user configurations at re-start. That is, reset the receiver to the + factory status. + * `as_rwGPS.STANDBY` Put into standby mode. Sending any command resumes + operation. + * `as_rwGPS.DEFAULT_SENTENCES` Sets all sentence frequencies to factory + default values as listed under `enable`. + * `as_rwGPS.VERSION` Causes the GPS to report its firmware version. This will + appear as the `version` bound variable when the report is received. + * `as_rwGPS.ENABLE` Causes the GPS to report the enabled status of the various + message types as set by the `enable` coroutine. This will appear as the + `enable` bound variable when the report is received. + * `as_rwGPS.ANTENNA` Causes the GPS to send antenna status messages. The + status value will appear in the `antenna` bound variable each time a report is + received. + * `as_rwGPS.NO_ANTENNA` Turns off antenna messages. + +**Antenna issues** In my testing the antenna functions have issues which +hopefully will be fixed in later firmware versions. The `NO_ANTENNA` message +has no effect. And, while issuing the `ANTENNA` message works, it affects the +response of the unit to subsequent commands. If possible issue it after all +other commands have been sent. I have also observed issues which can only be +cleared by power cycling the GPS. + +### 5.4.1 Changing baudrate + +I have experienced failures on a Pyboard V1.1 at baudrates higher than 19200. +This may be a problem with my GPS hardware (see below). + +Further, there are problems (at least with my GPS firmware build) where setting +baudrates only works for certain rates. This is clearly an issue with the GPS +unit; rates of 19200, 38400, 57600 and 115200 work. Setting 4800 sets 115200. +Importantly 9600 does nothing. Hence the only way to restore the default is to +perform a `FULL_COLD_START`. The test programs do this. + +If you change the GPS baudrate the UART should be re-initialised immediately +after the `baudrate` coroutine terminates: + +```python +async def change_status(gps, uart): + await gps.baudrate(19200) + uart.init(19200) +``` + +At risk of stating the obvious to seasoned programmers, say your application +changes the GPS unit's baudrate. If interrupted (with a bug or `ctrl-c`) the +GPS will still be running at the new baudrate. The application may need to be +designed to reflect this: see `ast_pbrw.py` which uses `try-finally` to reset +the baudrate in the event that the program terminates due to an exception or +otherwise. + +Particular care needs to be used if a backup battery is employed as the GPS +will then remember its baudrate over a power cycle. + +See also [notes on timing](./GPS.md#9-notes-on-timing). + +## 5.5 Public bound variables + +These are updated when a response to a command is received. The time taken for +this to occur depends on the GPS unit. One solution is to implement a message +callback. Alternatively await a coroutine which periodically (in intervals +measured in seconds) polls the value, returning it when it changes. + + * `version` Initially `None`. A list of version strings. + * `enabled` Initially `None`. A dictionary of frequencies indexed by message + type (see `enable` coroutine above). + * `antenna` Initially 0. Values: + 0 No report received. + 1 Antenna fault. + 2 Internal antenna. + 3 External antenna. + +## 5.6 The parse method developer note + +The null `parse` method in the base class is overridden. It intercepts the +single response to `VERSION` and `ENABLE` commands and updates the above bound +variables. The `ANTENNA` command causes repeated messages to be sent. These +update the `antenna` bound variable. These "handled" messages call the message +callback with the `GPS` instance followed by a list of sentence segments +followed by any args specified in the constructor. + +Other `PMTK` messages are passed to the optional message callback as described +[in section 5.3](GPS.md#53-gps-class-constructor). + +##### [Contents](./GPS.md#1-contents) + +# 6. Using GPS for accurate timing + +Many GPS chips (e.g. MTK3339) provide a PPS signal which is a pulse occurring +at 1s intervals whose leading edge is a highly accurate UTC time reference. + +This driver uses this pulse to provide accurate subsecond UTC and local time +values. The driver requires MicroPython because PPS needs a pin interrupt. + +On STM platforms such as the Pyboard it may be used to set and to calibrate the +realtime clock (RTC). This functionality is not currently portable to other +chips. + +See [Absolute accuracy](GPS.md#91-absolute-accuracy) for a discussion of +the absolute accuracy provided by this module (believed to be on the order of ++-70μs). + +Two classes are provided: `GPS_Timer` for read-only access to the GPS device +and `GPS_RWTimer` for read/write access. + +## 6.1 Test scripts + + * `as_GPS_time.py` Test scripts for read only driver. + * `as_rwGPS_time.py` Test scripts for read/write driver. + +On import, these will list available tests. Example usage: +```python +import as_drivers.as_GPS.as_GPS_time as test +test.usec() +``` + +## 6.2 Usage example + +Pyboard: +```python +import asyncio +import pyb +import as_drivers.as_GPS.as_tGPS as as_tGPS + +async def test(): + fstr = '{}ms Time: {:02d}:{:02d}:{:02d}:{:06d}' + red = pyb.LED(1) + green = pyb.LED(2) + uart = pyb.UART(4, 9600, read_buf_len=200) + sreader = asyncio.StreamReader(uart) + pps_pin = pyb.Pin('X3', pyb.Pin.IN) + gps_tim = as_tGPS.GPS_Timer(sreader, pps_pin, local_offset=1, + fix_cb=lambda *_: red.toggle(), + pps_cb=lambda *_: green.toggle()) + print('Waiting for signal.') + await gps_tim.ready() # Wait for GPS to get a signal + await gps_tim.set_rtc() # Set RTC from GPS + while True: + await asyncio.sleep(1) + # In a precision app, get the time list without allocation: + t = gps_tim.get_t_split() + print(fstr.format(gps_tim.get_ms(), t[0], t[1], t[2], t[3])) + +asyncio.run(test()) +``` +RP2 (note set_rtc function is Pyboard specific) +```python +import asyncio +from machine import UART, Pin +import as_drivers.as_GPS.as_tGPS as as_tGPS + +async def test(): + fstr = '{}ms Time: {:02d}:{:02d}:{:02d}:{:06d}' + uart = UART(0, 9600, tx=Pin(0), rx=Pin(1), rxbuf=200, timeout=5000, timeout_char=5000) + sreader = asyncio.StreamReader(uart) + pps_pin = Pin(2, Pin.IN) + gps_tim = as_tGPS.GPS_Timer(sreader, pps_pin, local_offset=1, + fix_cb=lambda *_: print("fix"), + pps_cb=lambda *_: print("pps")) + print('Waiting for signal.') + await gps_tim.ready() # Wait for GPS to get a signal + while True: + await asyncio.sleep(1) + # In a precision app, get the time list without allocation: + t = gps_tim.get_t_split() + print(fstr.format(gps_tim.get_ms(), t[0], t[1], t[2], t[3])) + +asyncio.run(test()) +``` + +## 6.3 GPS_Timer and GPS_RWTimer classes + +These classes inherit from `AS_GPS` and `GPS` respectively, with read-only and +read/write access to the GPS hardware. All public methods and bound variables of +the base classes are supported. Additional functionality is detailed below. + +### 6.3.1 GPS_Timer class Constructor + +Mandatory positional args: + * `sreader` The `StreamReader` instance associated with the UART. + * `pps_pin` An initialised input `Pin` instance for the PPS signal. + +Optional positional args: + * `local_offset` See [base class](GPS.md#41-constructor) for details of + these args. + * `fix_cb` + * `cb_mask` + * `fix_cb_args` + * `pps_cb` Callback runs when a PPS interrupt occurs. The callback runs in an + interrupt context so it should return quickly and cannot allocate RAM. Default + is a null method. See below for callback args. + * `pps_cb_args` Default `()`. A tuple of args for the callback. The callback + receives the `GPS_Timer` instance as the first arg, followed by any args in + the tuple. + +### 6.3.2 GPS_RWTimer class Constructor + +This takes three mandatory positional args: + * `sreader` The `StreamReader` instance associated with the UART. + * `swriter` The `StreamWriter` instance associated with the UART. + * `pps_pin` An initialised input `Pin` instance for the PPS signal. + +Optional positional args: + * `local_offset` See [base class](GPS.md#41-constructor) for + details of these args. + * `fix_cb` + * `cb_mask` + * `fix_cb_args` + * `msg_cb` + * `msg_cb_args` + * `pps_cb` Callback runs when a PPS interrupt occurs. The callback runs in an + interrupt context so it should return quickly and cannot allocate RAM. Default + is a null method. See below for callback args. + * `pps_cb_args` Default `()`. A tuple of args for the callback. The callback + receives the `GPS_RWTimer` instance as the first arg, followed by any args in + the tuple. + +##### [Contents](./GPS.md#1-contents) + +## 6.4 Public methods + +The methods that return an accurate GPS time of day run as fast as possible. To +achieve this they avoid allocation and dispense with error checking: these +methods should not be called until a valid time/date message and PPS signal +have occurred. Await the `ready` coroutine prior to first use. Subsequent calls +may occur without restriction; see usage example above. + +These methods use the MicroPython microsecond timer to interpolate between PPS +pulses. They do not involve the RTC. Hence they should work on any MicroPython +target supporting `machine.ticks_us`. + + * `get_ms` No args. Returns an integer: the period past midnight in ms. + * `get_t_split` No args. Returns time of day in a list of form + `[hrs: int, mins: int, secs: int, μs: int]`. + * `close` No args. Shuts down the PPS pin interrupt handler. Usage is optional + but in test situations avoids the ISR continuing to run after termination. + +See [Absolute accuracy](GPS.md#91-absolute-accuracy) for a discussion of +the accuracy of these methods. + +## 6.5 Public coroutines + +All MicroPython targets: + * `ready` No args. Pauses until a valid time/date message and PPS signal have + occurred. + +STM hosts only: + * `set_rtc` No args. Sets the RTC to GPS time. Coroutine pauses for up + to 1s as it waits for a PPS pulse. + * `delta` No args. Returns no. of μs RTC leads GPS. Coro pauses for up to 1s. + * `calibrate` Arg: integer, no. of minutes to run default 5. Calibrates the + RTC and returns the calibration factor for it. + +The `calibrate` coroutine sets the RTC (with any existing calibration removed) +and measures its drift with respect to the GPS time. This measurement becomes +more precise as time passes. It calculates a calibration value at 10s intervals +and prints progress information. When the calculated calibration factor is +repeatable within one digit (or the spcified time has elapsed) it terminates. +Typical run times are on the order of two miutes. + +Achieving an accurate calibration factor takes time but does enable the Pyboard +RTC to achieve timepiece quality results. Note that calibration is lost on +power down: solutions are either to use an RTC backup battery or to store the +calibration factor in a file (or in code) and re-apply it on startup. + +Crystal oscillator frequency has a small temperature dependence; consequently +the optimum calibration factor has a similar dependence. For best results allow +the hardware to reach working temperature before calibrating. + +## 6.6 Absolute accuracy + +The claimed absolute accuracy of the leading edge of the PPS signal is +-10ns. +In practice this is dwarfed by errors including latency in the MicroPython VM. +Nevertheless the `get_ms` method can be expected to provide 1 digit (+-1ms) +accuracy and the `get_t_split` method should provide accuracy on the order of +-5μs +65μs (standard deviation). This is based on a Pyboard running at 168MHz. +The reasoning behind this is discussed in +[section 9](./GPS.md#9-notes-on-timing). + +## 6.7 Demo program as_GPS_time + +Run by issuing +```python +import as_drivers.as_GPS.as_GPS_time as test +test.time() # e.g. +``` + +This comprises the following test functions. Reset the chip with ctrl-d between +runs. + * `time(minutes=1)` Print out GPS time values. + * `calibrate(minutes=5)` Determine the calibration factor of the Pyboard RTC. + Set it and calibrate it. + * `drift(minutes=5)` Monitor the drift between RTC time and GPS time. At the + end of the run, print the error in μs/hr and minutes/year. + * `usec(minutes=1)` Measure the accuracy of `utime.ticks_us()` against the PPS + signal. Print basic statistics at the end of the run. Provides an estimate of + some limits to the absolute accuracy of the `get_t_split` method as discussed + above. + +##### [Contents](./GPS.md#1-contents) + +# 7. Supported Sentences + + * GPRMC GP indicates NMEA sentence (US GPS system). + * GLRMC GL indicates GLONASS (Russian system). + * GNRMC GN GNSS (Global Navigation Satellite System). + * GPGLL + * GLGLL + * GPGGA + * GLGGA + * GNGGA + * GPVTG + * GLVTG + * GNVTG + * GPGSA + * GLGSA + * GPGSV + * GLGSV + +##### [Contents](./GPS.md#1-contents) + +# 8 Developer notes + +These notes are for those wishing to adapt these drivers. + +## 8.1 Subclassing + +If support for further sentence types is required the `AS_GPS` class may be +subclassed. If a correctly formed sentence with a valid checksum is received, +but is not supported, the `parse` method is called. By default this is a +`lambda` which ignores args and returns `True`. + +A subclass may override `parse` to parse such sentences. An example this may be +found in the `as_rwGPS.py` module. + +The `parse` method receives an arg `segs` being a list of strings. These are +the parts of the sentence which were delimited by commas. See +[section 4.5](GPS.md#45-subclass-hooks) for details. + +The `parse` method should return `True` if the sentence was successfully +parsed, otherwise `False`. + +Where a sentence is successfully parsed by the driver, a null `reparse` method +is called. It receives the same string list as `parse`. It may be overridden in +a subclass, possibly to extract further information from the sentence. + +## 8.2 Special test programs + +These tests allow NMEA parsing to be verified in the absence of GPS hardware: + + * `astests_pyb.py` Test with synthetic data on UART. GPS hardware replaced by + a loopback on UART 4. Requires a Pyboard. + * `astests.py` Test with synthetic data. Run on a PC under CPython 3.8+ or + MicroPython. Run from the `v3` directory at the REPL as follows: + +```python +from as_drivers.as_GPS.astests import run_tests +run_tests() +``` +or at the command line: +```bash +$ micropython -m as_drivers.as_GPS.astests +``` + +##### [Contents](./GPS.md#1-contents) + +# 9. Notes on timing + +At the default 1s update rate the GPS hardware emits a PPS pulse followed by a +set of messages. It then remains silent until the next PPS. At the default +baudrate of 9600 the UART continued receiving data for 400ms when a set of GPSV +messages came in. This time could be longer depending on data. So if an update +rate higher than the default 1 second is to be used, either the baudrate should +be increased or satellite information messages should be disabled. + +The accuracy of the timing drivers may be degraded if a PPS pulse arrives while +the UART is still receiving. The update rate should be chosen to avoid this. + +The PPS signal on the MTK3339 occurs only when a fix has been achieved. The +leading edge occurs on a 1s boundary with high absolute accuracy. It therefore +follows that the RMC message carrying the time/date of that second arrives +after the leading edge (because of processing and UART latency). It is also +the case that on a one-second boundary minutes, hours and the date may roll +over. + +Further, the local_time offset can affect the date. These drivers aim to handle +these factors. They do this by storing the epoch time (as an integer number of +seconds) as the fundamental time reference. This is updated by the RMC message. +The `utc`, `date` and `localtime` properties convert this to usable values with +the latter two using the `local_offset` value to ensure correct results. + +## 9.1 Absolute accuracy + +Without an atomic clock synchronised to a Tier 1 NTP server, absolute accuracy +(Einstein notwithstanding :-)) is hard to prove. However if the manufacturer's +claim of the accuracy of the PPS signal is accepted, the errors contributed by +MicroPython can be estimated. + +The driver interpolates between PPS pulses using `utime.ticks_us()` to provide +μs precision. The leading edge of PPS triggers an interrupt which records the +arrival time of PPS in the `acquired` bound variable. The ISR also records, to +1 second precision, an accurate datetime derived from the previous RMC message. +The time can therefore be estimated by taking the datetime and adding the +elapsed time since the time stored in the `acquired` bound variable. This is +subject to the following errors: + +Sources of fixed lag: + * Latency in the function used to retrieve the time. + * Mean value of the interrupt latency. + +Sources of variable error: + * Variations in interrupt latency (small on Pyboard). + * Inaccuracy in the `ticks_us` timer (significant over a 1 second interval). + +With correct usage when the PPS interrupt occurs the UART will not be receiving +data (this can substantially affect ISR latency variability). Consequently, on +the Pyboard, variations in interrupt latency are small. Using an osciloscope a +normal latency of 15μs was measured with the `time` test in `as_GPS_time.py` +running. The maximum observed was 17μs. + +The test program `as_GPS_time.py` has a test `usecs` which aims to assess the +sources of variable error. Over a period it repeatedly uses `ticks_us` to +measure the time between PPS pulses. Given that the actual time is effectively +constant the measurement is of error relative to the expected value of 1s. At +the end of the measurement period the test calculates some simple statistics on +the results. On targets other than a 168MHz Pyboard this may be run to estimate +overheads. + +The timing method `get_t_split` measures the time when it is called, which it +records as quickly as possible. Assuming this has a similar latency to the ISR +there is likely to be a 30μs lag coupled with ~+-35μs (SD) jitter largely +caused by inaccuracy of `ticks_us` over a 1 second period. Note that I have +halved the jitter time on the basis that the timing method is called +asynchronously to PPS: the interval will centre on 0.5s. The assumption is that +inaccuracy in the `ticks_us` timer measured in μs is proportional to the +duration over which it is measured. + +##### [Contents](./GPS.md#1-contents) + +# 10 Files + +If space on the filesystem is limited, unneccessary files may be deleted. Many +applications will not need the read/write or timing files. + +## 10.1 Basic files + + * `as_GPS.py` The library. Supports the `AS_GPS` class for read-only access to + GPS hardware. + * `as_GPS_utils.py` Additional formatted string methods for `AS_GPS`. On + RAM-constrained devices this may be omitted in which case the `date_string` + and `compass_direction` methods will be unavailable. + +Demos. Written for Pyboard but readily portable. + * `ast_pb.py` Test/demo program: assumes a Pyboard with GPS connected to UART 4. + * `log_kml.py` A simple demo which logs a route travelled to a .kml file which + may be displayed on Google Earth. + +## 10.2 Files for read/write operation + + * `as_rwGPS.py` Supports the `GPS` class. This subclass of `AS_GPS` enables + writing PMTK packets. + +Demo. Written for Pyboard but readily portable. + * `ast_pbrw.py` + +## 10.3 Files for timing applications + + * `as_tGPS.py` The library. Provides `GPS_Timer` and `GPS_RWTimer` classes. + Cross platform. + +Note that the following are Pyboard specific and require the `threadsafe` +directory to be copied to the target. + + * `as_GPS_time.py` Test scripts for read only driver (Pyboard). + * `as_rwGPS_time.py` Test scripts for read/write driver (Pyboard). + +## 10.4 Special test programs + +These tests allow NMEA parsing to be verified in the absence of GPS hardware. +For those modifying or extending the sentence parsing: + + * `astests.py` Test with synthetic data. Run on PC under CPython 3.8+ or MicroPython. + * `astests_pyb.py` Test with synthetic data on UART. GPS hardware replaced by + a loopback on UART 4. Requires a Pyboard. + + +[MicroPython]:https://micropython.org/ +[frozen module]:https://learn.adafruit.com/micropython-basics-loading-modules/frozen-modules +[NMEA-0183]:http://aprs.gids.nl/nmea/ +[TinyGPS]:http://arduiniana.org/libraries/tinygps/ +[pyboard]:http://docs.micropython.org/en/latest/pyboard/pyboard/quickref.html +[MTK_command]:https://github.com/inmcm/MTK_commands +[Ultimate GPS Breakout]:http://www.adafruit.com/product/746 +[micropyGPS]:https://github.com/inmcm/micropyGPS.git + +##### [Contents](./GPS.md#1-contents) diff --git a/v3/docs/HTU21D.md b/v3/docs/HTU21D.md new file mode 100644 index 0000000..07feb14 --- /dev/null +++ b/v3/docs/HTU21D.md @@ -0,0 +1,87 @@ +# The HTU21D temperature/humidity sensor. + +Breakout boards are available from +[Adafruit](https://www.adafruit.com/product/1899). + +This [Sparkfun board](https://www.sparkfun.com/products/13763) has an Si7021 +chip which, from a look at the datasheet, appears to be a clone of the HTU21D. +The Sparkfun prduct ID is the same as boards which I own: mine have HTU21D +chips. + +This driver was derived from the synchronous Pyboard-specific driver +[here](https://github.com/manitou48/pyboard/blob/master/htu21d.py). It is +designed to be multi-platform and uses `asyncio` to achieve asynchronous (non- +blocking) operation. The driver maintains `temperature` and `humidity` bound +variables as a non-blocking background task. Consequently reading the values is +effectively instantaneous. + +###### [Main V3 README](../README.md) + +# Installation + +Copy the `as_drivers/htu21d` directory and contents to the target hardware. +Copy `primitives` and contents to the target. + +Files: + 1. `htu21d_mc.py` The asynchronous driver. + 2. `htu_test.py` Test/demo program. + +# The test script + +This runs on any Pyboard or ESP32. for other platforms pin numbers will need to +be changed. + +| Pin | Pyboard | ESP32 | +|:----:|:-------:|:-----:| +| gnd | gnd | gnd | +| Vin | 3V3 | 3V3 | +| scl | X9 | 22 | +| sda | X10 | 23 | + +On the Pyboard D the 3.3V supply must be enabled with +```python +machine.Pin.board.EN_3V3.value(1) +``` +This also enables the I2C pullups on the X side. To run the demo issue: +```python +import as_drivers.htu21d.htu_test +``` + +# The driver + +This provides a single class `HTU21D`. + +Constructor. +This takes the following args +* `i2c` (mandatory) An initialised I2C bus instance. +* `read_delay=10`. The frequency (secs) at which data values are updated. +* `address=0x40` I2C address of the chip. + +Public bound values + 1. `temperature` Latest value in Celcius. + 2. `humidity` Latest value of relative humidity (%). + +Initial readings will not be complete until about 120ms after the class is +instantiated. Prior to this the values will be `None`. To avoid such invalid +readings the class is awaitable and may be used as follows. + +```python +import asyncio +from machine import Pin, I2C +from as_drivers.htu21d import HTU21D + +htu = HTU21D(I2C(1)) # Pyboard scl=X9 sda=X10 + +async def main(): + await htu # Wait for device to be ready + while True: + fstr = 'Temp {:5.1f} Humidity {:5.1f}' + print(fstr.format(htu.temperature, htu.humidity)) + await asyncio.sleep(5) + +asyncio.run(main()) +``` + +Thermal inertia of the chip packaging means that there is a lag between the +occurrence of a temperature change and the availability of accurate readings. +There is therefore little practical benefit in reducing the `read_delay`. diff --git a/v3/docs/I2C.md b/v3/docs/I2C.md new file mode 100644 index 0000000..e44ca80 --- /dev/null +++ b/v3/docs/I2C.md @@ -0,0 +1,462 @@ +# A communication link using I2C + +This library implements an asynchronous bidirectional communication link +between MicroPython targets using I2C. It presents a UART-like interface +supporting `StreamReader` and `StreamWriter` classes. In doing so, it emulates +the behaviour of a full duplex link despite the fact that the underlying I2C +link is half duplex. + +This version is for `asyncio` V3 which requires firmware V1.13 or later - +until the release of V1.13 a daily build is required. + +One use case is to provide a UART-like interface to an ESP8266 while leaving +the one functional UART free for the REPL. + +The blocking nature of the MicroPython I2C device driver is mitigated by +hardware synchronisation on two wires. This ensures that the slave is +configured for a transfer before the master attempts to access it. + +The Pyboard or similar STM based boards are currently the only targets +supporting I2C slave mode. Consequently at least one end of the interface +(known as the`Initiator`) must be a Pyboard or other board supporting the `pyb` +module. The `Responder` may be any hardware running MicroPython and supporting +`machine`. + +If the `Responder` (typically an ESP8266) crashes the resultant I2C failure is +detected by the `Initiator` which can issue a hardware reboot to the +`Responder` enabling the link to recover. This can occur transparently to the +application and is covered in detail +[in section 5.3](./I2C.md#53-responder-crash-detection). + +## Changes + +V0.18 Apr 2020 Ported to `asyncio` V3. Convert to Python package. Test script +pin numbers changed to be WBUS_DIP28 fiendly. +V0.17 Dec 2018 Initiator: add optional "go" and "fail" user coroutines. +V0.16 Minor improvements and bugfixes. Eliminate `timeout` option which caused +failures where `Responder` was a Pyboard. +V0.15 RAM allocation reduced. Flow control implemented. +V0.1 Initial release. + +###### [Main README](../README.md) + +# Contents + + 1. [Files](./I2C.md#1-files) + 2. [Wiring](./I2C.md#2-wiring) + 3. [Design](./I2C.md#3-design) + 4. [API](./I2C.md#4-api) + 4.1 [Channel class](./I2C.md#41-channel-class) + 4.2 [Initiator class](./I2C.md#42-initiator-class) + 4.2.1 [Configuration](./I2C.md#421-configuration) Fine-tuning the interface. + 4.2.2 [Optional coroutines](./I2C.md#422-optional-coroutines) + 4.3 [Responder class](./I2C.md#43-responder-class) + 5. [Limitations](./I2C.md#5-limitations) + 5.1 [Blocking](./I2C.md#51-blocking) + 5.2 [Buffering and RAM usage](./I2C.md#52-buffering-and-ram-usage) + 5.3 [Responder crash detection](./I2C.md#53-responder-crash-detection) + 6. [Hacker notes](./I2C.md#6-hacker-notes) For anyone wanting to hack on + the code. + +# 1. Files + + 1. `asi2c.py` Module for the `Responder` target. + 2. `asi2c_i.py` The `Initiator` target requires this and `asi2c.py`. + 3. `i2c_init.py` Initiator test/demo to run on a Pyboard. + 4. `i2c_resp.py` Responder test/demo to run on a Pyboard. + 5. `i2c_esp.py` Responder test/demo for ESP8266. + +#### Dependency: + 1. `asyncio` Official V3 library. + +#### Installation +Copy the `as_drivers/i2c` directory and contents to the target hardware. + +###### [Main V3 README](../README.md) + +# 2. Wiring + +Pin numbers are for the test programs: these may be changed. I2C pin numbers +may be changed by using soft I2C. In each case except `rs_out`, the two targets +are connected by linking identically named pins. + +ESP pins are labelled reference board pin no./WeMOS D1 Mini pin no. + +| Pyboard | Target | PB | ESP | Comment | +|:-------:|:------:|:---:|:----:|:-------:| +| gnd | gnd | | | | +| sda | sda | Y10 | 2/D4 | I2C | +| scl | scl | Y9 | 0/D3 | I2C | +| syn | syn | Y11 | 5/D1 | Any pin may be used. | +| ack | ack | X6 | 4/D2 | Any pin. | +| rs_out | rst | Y12 | | Optional reset link. | + +The `syn` and `ack` wires provide synchronisation: pins used are arbitrary. In +addition provision may be made for the Pyboard to reset the target if it +crashes and fails to respond. If this is required, link a Pyboard pin to the +target's `reset` pin. + +I2C requires the devices to be connected via short links and to share a common +ground. The `sda` and `scl` lines also require pullup resistors. On the Pyboard +V1.x these are fitted. If pins lacking these resistors are used, pullups to +3.3V should be supplied. A typical value is 4.7KΩ. + +On the Pyboard D the 3.3V supply must be enabled with +```python +machine.Pin.board.EN_3V3.value(1) +``` +This also enables the I2C pullups on the X side. + +###### [Contents](./I2C.md#contents) + +# 3. Design + +The I2C specification is asymmetrical: only master devices can initiate +transfers. This library enables slaves to initiate a data exchange by +interrupting the master which then starts the I2C transactions. There is a +timing issue in that the I2C master requires that the slave be ready before it +initiates a transfer. Further, in the MicroPython implementation, a slave which +is ready will block until the transfer is complete. + +To meet the timing constraint the slave must initiate all exchanges; it does +this by interrupting the master. The slave is therefore termed the `Initiator` +and the master `Responder`. The `Initiator` must be a Pyboard or other STM +board supporting slave mode via the `pyb` module. + +To enable `Responder` to start an unsolicited data transfer, `Initiator` +periodically interrupts `Responder` to cause a data exchange. If either +participant has no data to send it sends an empty string. Strings are exchanged +at a fixed rate to limit the interrupt overhead on `Responder`. This implies a +latency on communications in either direction; the rate (maximum latency) is +under application control. By default it is 100ms. + +The module will run under official or `fast_io` builds of `asyncio`. Owing to +the latency discussed above, the choice has little effect on the performance of +this interface. + +A further issue common to most communications protocols is synchronisation: +the devices won't boot simultaneously. Initially, and after the `Initiator` +reboots the `Responder`, both ends run a synchronisation phase. The interface +starts to run once each end has determined that its counterpart is ready. + +The design assumes exclusive use of the I2C interface. Hard or soft I2C may be +used. + +###### [Contents](./I2C.md#contents) + +# 4. API + +Demos and the scripts below assume a Pyboard linked to an ESP8266 as follows: + +| Pyboard | ESP8266 | Notes | +|:-------:|:-------:|:--------:| +| gnd | gnd | | +| Y9 | 0/D3 | I2C scl | +| Y10 | 2/D4 | I2C sda | +| Y11 | 5/D1 | syn | +| Y12 | rst | Optional | +| X6 | 4/D2 | ack | + +#### Running the demos + +On the ESP8266 issue: +```python +import as_drivers.i2c.i2c_esp +``` +and on the Pyboard: +```python +import as_drivers.i2c.i2c_init +``` + +The following scripts demonstrate basic usage. They may be copied and pasted at +the REPL. +On Pyboard: + +```python +import asyncio +from pyb import I2C # Only pyb supports slave mode +from machine import Pin +from as_drivers.i2c.asi2c_i import Initiator + +i2c = I2C(2, mode=I2C.SLAVE) +syn = Pin('Y11') +ack = Pin('X6') +rst = (Pin('Y12'), 0, 200) +chan = Initiator(i2c, syn, ack, rst) + +async def receiver(): + sreader = asyncio.StreamReader(chan) + while True: + res = await sreader.readline() + print('Received', int(res)) + +async def sender(): + swriter = asyncio.StreamWriter(chan, {}) + n = 0 + while True: + await swriter.awrite('{}\n'.format(n)) + n += 1 + await asyncio.sleep_ms(800) + +asyncio.create_task(receiver()) +try: + asyncio.run(sender()) +except KeyboardInterrupt: + print('Interrupted') +finally: + asyncio.new_event_loop() # Still need ctrl-d because of interrupt vector + chan.close() # for subsequent runs +``` + +On ESP8266: + +```python +import asyncio +from machine import Pin, I2C +from as_drivers.i2c.asi2c import Responder + +i2c = I2C(scl=Pin(0),sda=Pin(2)) # software I2C +syn = Pin(5) +ack = Pin(4) +chan = Responder(i2c, syn, ack) + +async def receiver(): + sreader = asyncio.StreamReader(chan) + while True: + res = await sreader.readline() + print('Received', int(res)) + +async def sender(): + swriter = asyncio.StreamWriter(chan, {}) + n = 1 + while True: + await swriter.awrite('{}\n'.format(n)) + n += 1 + await asyncio.sleep_ms(1500) + +asyncio.create_task(receiver()) +try: + asyncio.run(sender()) +except KeyboardInterrupt: + print('Interrupted') +finally: + asyncio.new_event_loop() # Still need ctrl-d because of interrupt vector + chan.close() # for subsequent runs +``` + +###### [Contents](./I2C.md#contents) + +## 4.1 Channel class + +This is the base class for `Initiator` and `Responder` subclasses and provides +support for the streaming API. Applications do not instantiate `Channel` +objects. + +Method: + 1. `close` No args. Restores the interface to its power-up state. + +Coroutine: + 1. `ready` No args. Pause until synchronisation has been achieved. + +## 4.2 Initiator class + +##### Constructor args: + 1. `i2c` An `I2C` instance. + 2. `pin` A `Pin` instance for the `syn` signal. + 3. `pinack` A `Pin` instance for the `ack` signal. + 4. `reset=None` Optional tuple defining a reset pin (see below). + 5. `verbose=True` If `True` causes debug messages to be output. + 6. `cr_go=False` Optional coroutine to run at startup. See + [4.2.2](./I2C.md#422-optional-coroutines). + 7. `go_args=()` Optional tuple of args for above coro. + 8. `cr_fail=False` Optional coro to run on ESP8266 fail or reboot. + 9. `f_args=()` Optional tuple of args for above. + +The `reset` tuple consists of (`pin`, `level`, `time`). If provided, and the +`Responder` times out, `pin` will be set to `level` for duration `time` ms. A +Pyboard or ESP8266 target with an active low reset might have: + +```python +(machine.Pin('Y12'), 0, 200) +``` + +If the `Initiator` has no `reset` tuple and the `Responder` times out, an +`OSError` will be raised. + +`Pin` instances passed to the constructor must be instantiated by `machine`. + +##### Class variables: + 1. `t_poll=100` Interval (ms) for `Initiator` polling `Responder`. + 2. `rxbufsize=200` Size of receive buffer. This should exceed the maximum + message length. + +See [Section 4.2.1](./I2C.md#421-configuration). + +##### Instance variables: + +The `Initiator` maintains instance variables which may be used to measure its +peformance. See [Section 4.2.1](./I2C.md#421-configuration). + +##### Coroutine: + 1. `reboot` If a `reset` tuple was provided, reboot the `Responder`. + +## 4.2.1 Configuration + +The `Initiator` class variables determine the behaviour of the interface. Where +these are altered, it should be done before instantiating `Initiator` or +`Responder`. + +`Initiator.t_poll` This defines the polling interval for incoming data. Shorter +values reduce the latency when the `Responder` sends data; at the cost of a +raised CPU overhead (at both ends) in processing `Responder` polling. + +Times are in ms. + +To measure performance when running application code these `Initiator` instance +variables may be read: + 1. `nboots` Number of times `Responder` has failed and been rebooted. + 2. `block_max` Maximum blocking time in μs. + 3. `block_sum` Cumulative total of blocking time (μs). + 4. `block_cnt` Transfer count: mean blocking time is `block_sum/block_cnt`. + +See test program `i2c_init.py` for an example of using the above. + +## 4.2.2 Optional coroutines + +These are intended for applications where the `Responder` may reboot at runtime +either because I2C failure was detected or because the application issues an +explicit reboot command. + +The `cr_go` and `cr_fail` coroutines provide for applications which implement +an application-level initialisation sequence on first and subsequent boots of +the `Responder`. Such applications need to ensure that the initialisation +sequence does not conflict with other coros accessing the channel. + +The `cr_go` coro runs after synchronisation has been achieved. It runs +concurrently with the coro which keeps the link open (`Initiator._run()`), but +should run to completion reasonably quickly. Typically it performs any app +level synchronisation, starts or re-enables application coros, and quits. + +The `cr_fail` routine will prevent the automatic reboot from occurring until +it completes. This may be used to prevent user coros from accessing the channel +until reboot is complete. This may be done by means of locks or task +cancellation. Typically `cr_fail` will terminate when this is done, so that +`cr_go` has unique access to the channel. + +If an explicit `.reboot()` is issued, a reset tuple was provided, and `cr_fail` +exists, it will run and the physical reboot will be postponed until it +completes. + +Typical usage: +```python +from as_drivers.i2c.asi2c_i import Initiator +chan = Initiator(i2c, syn, ack, rst, verbose, self._go, (), self._fail) +``` + +###### [Contents](./I2C.md#contents) + +## 4.3 Responder class + +##### Constructor args: + 1. `i2c` An `I2C` instance. + 2. `pin` A `Pin` instance for the `syn` signal. + 3. `pinack` A `Pin` instance for the `ack` signal. + 4. `verbose=True` If `True` causes debug messages to be output. + +`Pin` instances passed to the constructor must be instantiated by `machine`. + +##### Class variables: + 1. `addr=0x12` Address of I2C slave. If the default address is to be changed, + it should be set before instantiating `Initiator` or `Responder`. `Initiator` + application code must then instantiate the I2C accordingly. + 2. `rxbufsize=200` Size of receive buffer. This should exceed the maximum + message length. Consider reducing this in ESP8266 applications to save RAM. + +###### [Contents](./I2C.md#contents) + +# 5. Limitations + +Currently, on the ESP8266, the code is affected by +[iss 5714](https://github.com/micropython/micropython/issues/5714). Unless the +board is repeatedly pinged, the ESP8266 fails periodically and is rebooted by +the Pyboard. + +## 5.1 Blocking + +Exchanges of data occur via `Initiator._sendrx()`, a synchronous method. This +blocks the schedulers at each end for a duration dependent on the number of +bytes being transferred. Tests were conducted with the supplied test scripts +and the official version of `asyncio`. Note that these scripts send short +strings. + +With `Responder` running on a Pyboard V1.1 the duration of the ISR was up to +1.3ms. + +With `Responder` on an ESP8266 running at 80MHz, `Initiator` blocked for up to +10ms with a mean time of 2.7ms; at 160MHz the figures were 7.5ms and 2.1ms. The +ISR uses soft interrupts, and blocking commences as soon as the interrupt pin +is asserted. Consequently the time for which `Initiator` blocks depends on +`Responder`'s interrupt latency; this may be extended by garbage collection. + +Figures are approximate: actual blocking time is dependent on the length of the +strings, the speed of the processors, soft interrupt latency and the behaviour +of other coroutines. If blocking time is critical it should be measured while +running application code. + +## 5.2 Buffering and RAM usage + +The protocol implements flow control: the `StreamWriter` at one end of the link +will pause until the last string transmitted has been read by the corresponding +`StreamReader`. + +Outgoing data is unbuffered. `StreamWriter.awrite` will pause until pending +data has been transmitted. + +Incoming data is stored in a buffer whose length is set by the `rxbufsize` +constructor arg. If an incoming payload is too long to fit the buffer a +`ValueError` will be thrown. + +## 5.3 Responder crash detection + +The `Responder` protocol executes in a soft interrupt context. This means that +the application code might fail (for example executing an infinite loop) while +the ISR continues to run; `Initiator` would therefore see no problem. To trap +this condition regular messages should be sent from `Responder`, with +`Initiator` application code timing out on their absence and issuing `reboot`. + +This also has implications when testing. If a `Responder` application is +interrupted with `ctrl-c` the ISR will continue to run. To test crash detection +issue a soft or hard reset to the `Responder`. + +###### [Contents](./I2C.md#contents) + +# 6. Hacker notes + +I tried a variety of approaches before settling on a synchronous method for +data exchange coupled with 2-wire hardware handshaking. The chosen approach +minimises the time for which the schedulers are blocked. Blocking occurs +because of the need to initiate a blocking transfer on the I2C slave before the +master can initiate a transfer. + +A one-wire handshake using open drain outputs is feasible but involves explicit +delays. I took the view that a 2-wire solution is easier should anyone want to +port the `Responder` to a platform such as the Raspberry Pi. The design has no +timing constraints and uses normal push-pull I/O pins. + +I experienced a couple of obscure issues affecting reliability. Calling `pyb` +`I2C` methods with an explicit timeout caused rare failures when the target was +also a Pyboard. Using `micropython.schedule` to defer RAM allocation also +provoked rare failures. This may be the reason why I never achieved reliable +operation with hard IRQ's on ESP8266. + +I created a version which eliminated RAM allocation by the `Responder` ISR to +use hard interrupts. This reduced blocking further. Unfortunately I failed to +achieve reliable operation on an ESP8266 target. This version introduced some +complexity into the code so was abandoned. If anyone feels like hacking, the +branch `i2c_hard_irq` exists. + +The main branch aims to minimise allocation while achieving reliability. + +PR's to reduce allocation and enable hard IRQ's welcome. I will expect them to +run the two test programs for >10,000 messages with ESP8266 and Pyboard +targets. Something I haven't yet achieved (with hard IRQ's). diff --git a/v3/docs/INTERRUPTS.md b/v3/docs/INTERRUPTS.md new file mode 100644 index 0000000..a2c684c --- /dev/null +++ b/v3/docs/INTERRUPTS.md @@ -0,0 +1,252 @@ +# Interfacing asyncio to interrupts + +This note aims to provide guidance in resolving common queries about the use of +interrupts in `asyncio` applications. + +# 1. Does the requirement warrant an interrupt? + +Writing an interrupt service routine (ISR) requires care: see the +[official docs](https://docs.micropython.org/en/latest/reference/isr_rules.html). +There are restrictions (detailed below) on the way an ISR can interface with +`asyncio`. Finally, on many platforms interrupts are a limited resource. In +short interrupts are extremely useful but, if a practical alternative exists, +it should be seriously considered. + +Requirements that warrant an interrupt along with a `asyncio` interface are +ones that require a microsecond-level response, followed by later processing. +Examples are: + * Where the event requires an accurate timestamp. + * Where a device supplies data and needs to be rapidly serviced. Data is put + in a pre-allocated buffer for later processing. + +Examples needing great care: + * Where arrival of data triggers an interrupt and subsequent interrupts may + occur after a short period of time. + * Where arrival of an interrupt triggers complex application behaviour: see + notes on [context](./INTERRUPTS.md#32-context). + +# 2. Alternatives to interrupts + +## 2.1 Polling + +An alternative to interrupts is to use polling. For values that change slowly +such as ambient temperature or pressure this simplification is achieved with no +discernible impact on performance. +```python +temp = 0 +async def read_temp(): + global temp + while True: + temp = thermometer.read() + await asyncio.sleep(60) +``` +In cases where interrupts arrive at a low frequency it is worth considering +whether there is any gain in using an interrupt rather than polling the +hardware: + +```python +async def read_data(): + while True: + while not device.ready(): + await asyncio.sleep_ms(0) + data = device.read() + # process the data +``` +The overhead of polling is typically low. The MicroPython VM might use +300μs to determine that the device is not ready. This will occur once per +iteration of the scheduler, during which time every other pending task gets a +slice of execution. If there were five tasks, each of which used 5ms of VM time, +the overhead would be `0.3*100/(5*5)=1.2%` - see [latency](./INTERRUPTS.md#31-latency-in-asyncio). + +Devices such as pushbuttons and switches are best polled as, in most +applications, latency of (say) 100ms is barely detectable. Interrupts lead to +difficulties with +[contact bounce](http://www.ganssle.com/debouncing.htm) which is readily +handled using a simple [asyncio driver](./DRIVERS.md). There may be exceptions +which warrant an interrupt such as fast games or cases where switches are +machine-operated such as limit switches. + +## 2.2 The I/O mechanism + +Devices such as UARTs and sockets are supported by the `asyncio` stream +mechanism. The UART driver uses interrupts at a firmware level, but exposes +its interface to `asyncio` by means of the `StreamReader` and `StreamWriter` +classes. These greatly simplify the use of such devices. + +It is also possible to write device drivers in Python enabling the use of the +stream mechanism. This is covered in +[the tutorial](https://github.com/peterhinch/micropython-async/blob/master/v3/docs/TUTORIAL.md#64-writing-streaming-device-drivers). + +# 3. Using interrupts + +This section details some of the issues to consider where interrupts are to be +used with `asyncio`. + +## 3.1 Latency in asyncio + +Consider an application with four continuously running tasks, plus a fifth +which is paused waiting on an interrupt. Each of the four tasks will yield to +the scheduler at intervals. Each task will have a worst-case period +of blocking between yields. Assume that the worst-case times for each task are +50, 30, 25 and 10ms. If the program logic allows it, the situation may arise +where all of these tasks are queued for execution, and all are poised to block +for the maximum period. Assume that at that moment the fifth task is triggered. + +With current `asyncio` design that fifth task will be queued for execution +after the four pending tasks. It will therefore run after +(50+30+25+10) = 115ms +An enhancement to `asyncio` has been discussed that would reduce that to 50ms, +but that is the irreduceable minimum for any cooperative scheduler. + +The key issue with latency is the case where a second interrupt occurs while +the first is still waiting for its `asyncio` handler to be scheduled. If this +is a possibility, mechanisms such as buffering or queueing must be considered. + +## 3.2 Context + +Consider an incremental encoder providing input to a GUI. Owing to the need to +track phase information an interrupt must be used for the encoder's two +signals. An ISR determines the current position of the encoder, and if it has +changed, calls a method in the GUI code. + +The consequences of this can be complex. A widget's visual appearance may +change. User callbacks may be triggered, running arbitrary Python code. +Crucially all of this occurs in an ISR context. This is unacceptable for all +the reasons identified in +[this doc](https://docs.micropython.org/en/latest/reference/isr_rules.html). + +Note that using `micropython.schedule` does not address every issue associated +with ISR context because restictions remain on the use of `asyncio` +operations. This is because such code can pre-empt the `asyncio` scheduler. +This is discussed further below. + +A solution to the encoder problem is to have the ISR maintain a value of the +encoder's position, with a `asyncio` task polling this and triggering the GUI +callback. This ensures that the callback runs in a `asyncio` context and can +run any Python code, including `asyncio` operations such as creating and +cancelling tasks. This will work if the position value is stored in a single +word, because changes to a word are atomic (non-interruptible). A more general +solution is to use `asyncio.ThreadSafeFlag`. + +## 3.3 Interfacing an ISR with asyncio + +This should be read in conjunction with the discussion of the `ThreadSafeFlag` +in [the official docs](https://docs.micropython.org/en/latest/library/asyncio.html#asyncio.ThreadSafeFlag) +and [the tutorial](./TUTORIAL.md#36-threadsafeflag). + +Assume a hardware device capable of raising an interrupt when data is +available. The requirement is to read the device fast and subsequently process +the data using a `asyncio` task. An obvious (but wrong) approach is: + +```python +data = bytearray(4) +# isr runs in response to an interrupt from device +def isr(): + device.read_into(data) # Perform a non-allocating read + asyncio.create_task(process_data()) # BUG +``` + +This is incorrect because when an ISR runs, it can pre-empt the `asyncio` +scheduler with the result that `asyncio.create_task()` may disrupt the +scheduler. This applies whether the interrupt is hard or soft and also applies +if the ISR has passed execution to another function via `micropython.schedule`: +as described above, all such code runs in an ISR context. + +The safe way to interface between ISR-context code and `asyncio` is to have a +coroutine with synchronisation performed by `asyncio.ThreadSafeFlag`. The +following fragment illustrates the creation of a task in response to an +interrupt: +```python +tsf = asyncio.ThreadSafeFlag() +data = bytearray(4) + +def isr(_): # Interrupt handler + device.read_into(data) # Perform a non-allocating read + tsf.set() # Trigger task creation + +async def check_for_interrupts(): + while True: + await tsf.wait() + asyncio.create_task(process_data()) +``` +It is worth considering whether there is any point in creating a task rather +than using this template: +```python +tsf = asyncio.ThreadSafeFlag() +data = bytearray(4) + +def isr(_): # Interrupt handler + device.read_into(data) # Perform a non-allocating read + tsf.set() # Trigger task creation + +async def process_data(): + while True: + await tsf.wait() + # Process the data here before waiting for the next interrupt +``` +## 3.4 micropython.RingIO + +This is a byte-oriented circular buffer [documented here] +(https://docs.micropython.org/en/latest/library/micropython.html#micropython.RingIO), +which provides an efficient way to return data from an ISR to an `asyncio` task. +It is implemented in C so performance is high, and supports stream I/O. The +following is a usage example: +```py +import asyncio +from machine import Timer +import micropython +micropython.alloc_emergency_exception_buf(100) + +imu = SomeDevice() # Fictional hardware IMU device + +FRAMESIZE = 8 # Count, x, y, z accel +BUFSIZE = 200 # No. of records. Size allows for up to 200ms of asyncio latency. +rio = micropython.RingIO(FRAMESIZE * BUFSIZE + 1) # RingIO requires an extra byte +count = 0x4000 # Bit14 is "Start of frame" marker. Low bits are a frame counter. + +def cb(_): # Timer callback. Runs at 1KHz. + global count # Frame count + imu.get_accel_irq() # Trigger the device + rio.write(chr(count >> 8)) + rio.write(chr(count & 0xff)) + rio.write(imu.accel.ix) # Device returns bytes objects (length 2) + rio.write(imu.accel.iy) + rio.write(imu.accel.iz) + count += 1 + +async def main(nrecs): + t = Timer(freq=1_000, callback=cb) + sreader = asyncio.StreamReader(rio) + rpb = 100 # Records per block + blocksize = FRAMESIZE * rpb + with open('/sd/imudata', 'wb') as f: + swriter = asyncio.StreamWriter(f, {}) + while nrecs: + data = await sreader.readexactly(blocksize) + swriter.write(data) + await swriter.drain() + nrecs -= rpb + t.deinit() + +asyncio.run(main(1_000)) +``` +In this example data is acquired at a timer-controlled rate of 1KHz, with eight +bytes being written to the `RingIO` every tick. The `main()` task reads the data +stream and writes it out to a file. Similar code was tested on a Pyboard 1.1. + +## 3.5 Other Thread Safe Classes + +Other classes capable of being used to interface an ISR with `asyncio` are +discussed [here](https://github.com/peterhinch/micropython-async/blob/master/v3/docs/THREADING.md), +notably the `ThreadSafeQueue`. This ring buffer allows entries to be objects +other than bytes. It supports the asynchronous iterator protocol (rather than +stream I/O) and is written in Python. + +# 4. Conclusion + +The `ThreadSafeFlag` and `RingIO` classes are the official `asyncio` constructs +which can safely be used in an ISR context. Unofficial "thread safe" classes may +also be used. Beware of classes such as `Queue` and `RingbufQueue` which are not +thread safe. + +###### [Main tutorial](./TUTORIAL.md#contents) diff --git a/nec_ir/README.md b/v3/docs/NEC_IR.md similarity index 67% rename from nec_ir/README.md rename to v3/docs/NEC_IR.md index fb4ad2c..794ef39 100644 --- a/nec_ir/README.md +++ b/v3/docs/NEC_IR.md @@ -8,17 +8,39 @@ microcontroller. The driver and test programs run on the Pyboard and ESP8266. -# Files +## An alternative solution + +This solution provides an example of an asynchronous device driver. A more +complete IR solution may be found +[here](https://github.com/peterhinch/micropython_ir). This supports other +protocols and IR "blasting". It does not use `asyncio` but is nonblocking and +is compatible with `asyncio` applications. + +# Demo scripts + +The following prints data and address values received from a remote. These +values enable you to respond to individual butons. +```python +import as_drivers.nec_ir.art +``` + +Control an onboard LED using a remote. The data and addresss values must be +changed to match your characterised remote. +```python +import as_drivers.nec_ir.art1 +``` - 1. ``aremote.py`` The device driver. - 2. ``art.py`` A test program to characterise a remote. - 3. ``art1.py`` Control an onboard LED using a remote. The data and addresss - values need changing to match your characterised remote. +###### [Main V3 README](../README.md) + +# Installation + +Copy the `as_drivers/nec_ir` directory and contents to the target hardware. +Copy `primitives` and contents to the target. # Dependencies -The driver requires the ``uasyncio`` library and the file ``asyn.py`` from this -repository. +The driver requires the `asyncio` V3 library and the `primitives` package +from this repository. # Usage @@ -36,10 +58,10 @@ Data values are 8 bit. Addresses may be 8 or 16 bit depending on whether the remote uses extended addressing. If a button is held down a repeat code is sent. In this event the driver -returns a data value of ``REPEAT`` and the address associated with the last +returns a data value of `REPEAT` and the address associated with the last valid data block. -To characterise a remote run ``art.py`` and note the data value for each button +To characterise a remote run `art.py` and note the data value for each button which is to be used. If the address is less than 256, extended addressing is not in use. @@ -48,7 +70,7 @@ not in use. IR reception is inevitably subject to errors, notably if the remote is operated near the limit of its range, if it is not pointed at the receiver or if its batteries are low. So applications must check for, and usually ignore, errors. -These are flagged by data values < ``REPEAT``. +These are flagged by data values < `REPEAT`. On the ESP8266 there is a further source of errors. This results from the large and variable interrupt latency of the device which can exceed the pulse @@ -62,9 +84,9 @@ Users tend to press the key again if no acknowledgement is received. The constructor takes the following positional arguments. - 1. ``pin`` A ``Pin`` instance for the decoder chip. - 2. ``cb`` The user callback function. - 3. ``extended`` Set ``False`` to enable extra error checking if the remote + 1. `pin` A `Pin` instance for the decoder chip. + 2. `cb` The user callback function. + 3. `extended` Set `False` to enable extra error checking if the remote returns an 8 bit address. 4. Further arguments, if provided, are passed to the callback. @@ -72,11 +94,11 @@ The callback receives the following positional arguments: 1. The data value returned from the remote. 2. The address value returned from the remote. - 3. Any further arguments provided to the ``NEC_IR`` constructor. + 3. Any further arguments provided to the `NEC_IR` constructor. Negative data values are used to signal repeat codes and transmission errors. -The test program ``art1.py`` provides an example of a minimal application. +The test program `art1.py` provides an example of a minimal application. # How it works @@ -103,7 +125,7 @@ interrupt in a burst sets an event, passing the time of the state change. A coroutine waits on the event, yields for the duration of a data burst, then decodes the stored data before calling the user-specified callback. -Passing the time to the ``Event`` instance enables the coro to compensate for +Passing the time to the `Event` instance enables the coro to compensate for any asyncio latency when setting its delay period. The algorithm promotes interrupt handler speed over RAM use: the 276 bytes used @@ -115,19 +137,25 @@ in the interrupt service routine. Data values passed to the callback are normally positive. Negative values indicate a repeat code or an error. -``REPEAT`` A repeat code was received. +`REPEAT` A repeat code was received. -Any data value < ``REPEAT`` denotes an error. In general applications do not +Any data value < `REPEAT` denotes an error. In general applications do not need to decode these, but they may be of use in debugging. For completeness they are listed below. -``BADSTART`` A short (<= 4ms) start pulse was received. May occur due to IR +`BADSTART` A short (<= 4ms) start pulse was received. May occur due to IR interference, e.g. from fluorescent lights. The TSOP4838 is prone to producing 200µs pulses on occasion, especially when using the ESP8266. -``BADBLOCK`` A normal data block: too few edges received. Occurs on the ESP8266 +`BADBLOCK` A normal data block: too few edges received. Occurs on the ESP8266 owing to high interrupt latency. -``BADREP`` A repeat block: an incorrect number of edges were received. -``OVERRUN`` A normal data block: too many edges received. -``BADDATA`` Data did not match check byte. -``BADADDR`` Where ``extended`` is ``False`` the 8-bit address is checked +`BADREP` A repeat block: an incorrect number of edges were received. +`OVERRUN` A normal data block: too many edges received. +`BADDATA` Data did not match check byte. +`BADADDR` Where `extended` is `False` the 8-bit address is checked against the check byte. This code is returned on failure. + +# Files + + 1. `aremote.py` The device driver. + 2. `art.py` A test program to characterise a remote. + 3. `art1.py` diff --git a/v3/docs/PRIMITIVES.md b/v3/docs/PRIMITIVES.md new file mode 100644 index 0000000..30e6e97 --- /dev/null +++ b/v3/docs/PRIMITIVES.md @@ -0,0 +1 @@ +### For historical reasons documentation for primitives may be found [here](./DRIVERS.md). diff --git a/v3/docs/SCHEDULE.md b/v3/docs/SCHEDULE.md new file mode 100644 index 0000000..38bf0a6 --- /dev/null +++ b/v3/docs/SCHEDULE.md @@ -0,0 +1,635 @@ +# 0. Contents + + 1. [Scheduling tasks](./SCHEDULE.md#1-scheduling-tasks) + 2. [Overview](./SCHEDULE.md#2-overview) + 3. [Installation](./SCHEDULE.md#3-installation) + 4. [The schedule coroutine](./SCHEDULE.md#4-the-schedule-coroutine) The primary interface for asyncio. + 4.1 [Time specifiers](./SCHEDULE.md#41-time-specifiers) + 4.2 [Calendar behaviour](./SCHEDULE.md#42-calendar-behaviour) Calendars can be tricky... +      4.2.1 [Behaviour of mday and wday values](./SCHEDULE.md#421-behaviour-of-mday-and-wday-values) +      4.2.2 [Time causing month rollover](./SCHEDULE.md#422-time-causing-month-rollover) + 4.3 [Limitations](./SCHEDULE.md#43-limitations) + 4.4 [The Unix build](./SCHEDULE.md#44-the-unix-build) + 4.5 [Callback interface](./SCHEDULE.md#45-callback-interface) Alternative interface using callbacks. + 4.6 [Event interface](./SCHEDULE.md#46-event-interface) Alternative interface using Event instances. +5. [The cron object](./SCHEDULE.md#5-the-cron-object) The rest of this doc is for hackers and synchronous coders. + 5.1 [The time to an event](./SCHEDULE.md#51-the-time-to-an-event) + 5.2 [How it works](./SCHEDULE.md#52-how-it-works) + 6. [Hardware timing limitations](./SCHEDULE.md#6-hardware-timing-limitations) + 7. [Use in synchronous code](./SCHEDULE.md#7-use-in-synchronous-code) If you really must. + 7.1 [Initialisation](./SCHEDULE.md#71-initialisation)__ + 8. [The simulate script](./SCHEDULE.md#8-the-simulate-script) Rapidly test sequences. + 9. [Daylight saving time](./SCHEDULE.md#9-daylight-saving-time) Notes on timezone and DST when running under an OS. + +Release note: +7th Sep 2024 Document timezone and DST behaviour under Unix build. +11th Dec 2023 Document astronomy module, allowing scheduling based on Sun and +Moon rise and set times. +23rd Nov 2023 Add asynchronous iterator interface. +3rd April 2023 Fix issue #100. Where an iterable is passed to `secs`, triggers +must now be at least 10s apart (formerly 2s). + +##### [Tutorial](./TUTORIAL.md#contents) +##### [Main V3 README](../README.md) + +# 1. Scheduling tasks + +A common requirement is to schedule tasks to occur at specific times in the +future. This module facilitates this. The module can accept wildcard values +enabling tasks to be scheduled in a flexible manner. For example you might want +a callback to run at 3.10 am on every month which has an "r" in the name. + +It is partly inspired by the Unix cron table, also by the +[Python schedule](https://github.com/dbader/schedule) module. Compared to the +latter it is less capable but is small, fast and designed for microcontroller +use. Repetitive and one-shot events may be created. + +It is ideally suited for use with `asyncio` and basic use requires minimal +`asyncio` knowledge. Example code is provided offering various ways of +responding to timing triggers including running callbacks. The module can be +also be used in synchronous code and an example is provided. + +It is cross-platform and has been tested on Pyboard, Pyboard D, ESP8266, ESP32 +and the Unix build. + +The `astronomy` module extends this to enable tasks to be scheduled at times +related to Sun and Moon rise and set times. This is documented +[here](https://github.com/peterhinch/micropython-samples/blob/master/astronomy/README.md). + +# 2. Overview + +The `schedule` coroutine (`sched/sched.py`) is the interface for use with +`asyncio`. Three interface alternatives are offered which vary in the behaviour: +which occurs when a scheduled trigger occurs: +1. An asynchronous iterator is triggered. +2. A user defined `Event` is set. +3. A user defined callback or coroutine is launched. + +One or more `schedule` tasks may be assigned to a `Sequence` instance. This +enables an `async for` statement to be triggered whenever any of the `schedule` +tasks is triggered. + +Under the hood the `schedule` function instantiates a `cron` object (in +`sched/cron.py`). This is the core of the scheduler: it is a closure created +with a time specifier and returning the time to the next scheduled event. Users +of `asyncio` do not need to deal with `cron` instances. This library can also be +used in synchronous code, in which case `cron` instances must explicitly be +created. + +##### [Top](./SCHEDULE.md#0-contents) + +# 3. Installation + +The `sched` directory and contents must be copied to the target's filesystem. +This may be done with the official +[mpremote](https://docs.micropython.org/en/latest/reference/mpremote.html): +```bash +$ mpremote mip install "github:peterhinch/micropython-async/v3/as_drivers/sched" +``` +On networked platforms it may be installed with [mip](https://docs.micropython.org/en/latest/reference/packages.html). +```py +>>> mip.install("github:peterhinch/micropython-async/v3/as_drivers/sched") +``` +Currently these tools install to `/lib` on the built-in Flash memory. To install +to a Pyboard's SD card [rshell](https://github.com/dhylands/rshell) may be used. +Move to `as_drivers` on the PC, run `rshell` and issue: +``` +> rsync sched /sd/sched +``` + +The following files are installed in the `sched` directory. + 1. `cron.py` Computes time to next event. + 2. `sched.py` The `asyncio` `schedule` function: schedule a callback or coro. + 3. `primitives/__init__.py` Necessary for `sched.py`. + 4. `asynctest.py` Demo of asynchronous scheduling. + 5. `synctest.py` Synchronous scheduling demo. For `asyncio` phobics only. + 6. `crontest.py` A test for `cron.py` code. + 7. `simulate.py` A simple script which may be adapted to prove that a `cron` + instance will behave as expected. See [The simulate script](./SCHEDULE.md#8-the-simulate-script). + 8. `__init__.py` Empty file for Python package. + +The `crontest` script is only of interest to those wishing to adapt `cron.py`. +It will run on any MicroPython target. + +The [astronomy](https://github.com/peterhinch/micropython-samples/blob/master/astronomy/README.md) +module may be installed with +```bash +$ mpremote mip install "github:peterhinch/micropython-samples/astronomy" +``` + +# 4. The schedule coroutine + +This enables a response to be triggered at intervals. The response can be +specified to occur forever, once only or a fixed number of times. `schedule` +is a coroutine and is typically run as a background task as follows: +```python +asyncio.create_task(schedule(foo, 'every 4 mins', hrs=None, mins=range(0, 60, 4))) +``` + +Positional args: + 1. `func` This may be a callable (callback or coroutine) to run, a user defined + `Event` or an instance of a `Sequence`. + 2. Any further positional args are passed to the callable or the `Sequence`; + these args can be used to enable the triggered object to determine the source + of the trigger. + +Keyword-only args. Args 1..6 are +[Time specifiers](./SCHEDULE.md#41-time-specifiers): a variety of data types +may be passed, but all ultimately produce integers (or `None`). Valid numbers +are shown as inclusive ranges. + 1. `secs=0` Seconds (0..59). + 2. `mins=0` Minutes (0..59). + 3. `hrs=3` Hours (0..23). + 4. `mday=None` Day of month (1..31). + 5. `month=None` Months (1..12). + 6. `wday=None` Weekday (0..6 Mon..Sun). + 7. `times=None` If an integer `n` is passed the callable will be run at the + next `n` scheduled times. Hence a value of 1 specifies a one-shot event. + +The `schedule` function only terminates if `times` is not `None`. In this case +termination occurs after the last run of the callable and the return value is +the value returned by that run of the callable. + +Because `schedule` does not terminate promptly it is usually started with +`asyncio.create_task`, as in the following example where a callback is +scheduled at various times. The code below may be run by issuing +The event-based interface can be simpler than using callables: + +The remainder of this section describes the asynchronous iterator interface as +this is the simplest to use. The other interfaces are discussed in +* [4.5 Callback interface](./SCHEDULE.md#45-callback-interface) +* [4.6 Event interface](./SCHEDULE.md#46-event-interface) + +One or more `schedule` instances are collected in a `Sequence` object. This +supports the asynchronous iterator interface: +```python +import asyncio +from sched.sched import schedule, Sequence +from time import localtime + +async def main(): + print("Asynchronous test running...") + seq = Sequence() # A Sequence comprises one or more schedule instances + asyncio.create_task(schedule(seq, 'every 4 mins', hrs=None, mins=range(0, 60, 4))) + asyncio.create_task(schedule(seq, 'every 5 mins', hrs=None, mins=range(0, 60, 5))) + asyncio.create_task(schedule(seq, 'every 3 mins', hrs=None, mins=range(0, 60, 3))) + # A one-shot trigger + asyncio.create_task(schedule(seq, 'one shot', hrs=None, mins=range(0, 60, 2), times=1)) + async for args in seq: + yr, mo, md, h, m, s, wd = localtime()[:7] + print(f"Event {h:02d}:{m:02d}:{s:02d} on {md:02d}/{mo:02d}/{yr} args: {args}") + +try: + asyncio.run(main()) +finally: + _ = asyncio.new_event_loop() +``` +Note that the asynchronous iterator produces a `tuple` of the args passed to the +`schedule` that triggered it. This enables the code to determine the source of +the trigger. + +##### [Top](./SCHEDULE.md#0-contents) + +## 4.1 Time specifiers + +The args may be of the following types. + 1. `None` This is a wildcard matching any value. Do not use for `secs`. + 2. An integer. + 3. An object supporting the Python iterator protocol and iterating over + integers. For example `hrs=(3, 17)` will cause events to occur at 3am and 5pm, + `wday=range(0, 5)` specifies weekdays. Tuples, lists, ranges or sets may be + passed. + +Legal integer values are listed above. Basic validation is done as soon as +`schedule` is run. + +Note the implications of the `None` wildcard. Setting `mins=None` will schedule +the event to occur on every minute (equivalent to `*` in a Unix cron table). +Setting `secs=None` will cause a `ValueError`. + +Passing an iterable to `secs` is not recommended: this library is intended for +scheduling relatively long duration events. For rapid sequencing, schedule a +coroutine which awaits `asyncio` `sleep` or `sleep_ms` routines. If an +iterable is passed, triggers must be at least ten seconds apart or a +`ValueError` will result. + +Default values schedule an event every day at 03.00.00. + +## 4.2 Calendar behaviour + +Specifying a day in the month which exceeds the length of a specified month +(e.g. `month=(2, 6, 7), mday=30`) will produce a `ValueError`. February is +assumed to have 28 days. + +### 4.2.1 Behaviour of mday and wday values + +The following describes how to schedule something for (say) the second Sunday +in a month, plus limitations of doing this. + +If a month is specified which differs from the current month, the day in the +month defaults to the 1st. This can be overridden with `mday` and `wday`, so +you can specify the 21st (`mday=21`) or the first Sunday in the month +(`wday=6`). If `mday` and `wday` are both specified, `mday` is applied first. +This enables the Nth instance of a day to be defined. To specify the second +Sunday in the month specify `mday=8` to skip the first week, and set `wday=6` +to specify Sunday. Unfortunately you can't specify the last (say) Tuesday in +the month. + +Specifying `wday=d` and `mday=n` where n > 22 could result in a day beyond the +end of the month. It's not obvious what constitutes rational behaviour in this +pathological corner case. Validation will throw a `ValueError` in this case. + +### 4.2.2 Time causing month rollover + +The following describes behaviour which I consider correct. + +On the last day of the month there are circumstances where a time specifier can +cause a day rollover. Consider application start. If a callback is scheduled +with a time specifier offering only times prior to the current time, its month +increments and the day changes to the 1st. This is the soonest that the event +can occur at the specified time. + +Consider the case where the next month is disallowed. In this case the month +will change to the next valid month. This code, run at 9am on 31st July, would +aim to run `foo` at 1.59 on 1st October. +```python +asyncio.create_task(schedule(foo, month=(2, 7, 10), hrs=1, mins=59)) +``` + +##### [Top](./SCHEDULE.md#0-contents) + +## 4.3 Limitations + +The underlying `cron` code has a resolution of 1 second. The library is +intended for scheduling infrequent events (`asyncio` has its own approach to +fast scheduling). + +Specifying `secs=None` will cause a `ValueError`. The minimum interval between +scheduled events is 2 seconds. Attempts to schedule events with a shorter gap +will raise a `ValueError`. + +A `cron` call typically takes 270 to 520μs on a Pyboard, but the upper bound +depends on the complexity of the time specifiers. + +On hardware platforms the MicroPython `time` module does not handle daylight +saving time. Scheduled times are relative to system time. Under the Unix build, +where the locale uses daylight saving, its effects should be considered. See +[Daylight saving time](./SCHEDULE.md#9-daylight-saving-time). + +## 4.4 The Unix build + +Asynchronous use requires `asyncio` V3, so ensure this is installed on the +Linux target. This may be checked with: +```py +import asyncio +asyncio.__version__ +``` +The module uses local time. When running under an OS, local time is affected by +geographical longitude (timezone - TZ) and daylight saving time (DST). The use +of local time avoids TZ issues but has consequences when the underlying time +source changes due to crossing a DST boundary. + +This is explained in detail in [Daylight saving time](./SCHEDULE.md#9-daylight-saving-time). + +##### [Top](./SCHEDULE.md#0-contents) + +## 4.5 Callback interface + +In this instance a user defined `callable` is passed as the first `schedule` arg. +A `callable` may be a function or a coroutine. It is possible for multiple +`schedule` instances to call the same callback, as in the example below. The +code is included in the library as `sched/asyntest.py` and may be run as below. +```python +import sched.asynctest +``` +This is the demo code. +```python +import asyncio +from sched.sched import schedule +from time import localtime + +def foo(txt): # Demonstrate callback + yr, mo, md, h, m, s, wd = localtime()[:7] + fst = 'Callback {} {:02d}:{:02d}:{:02d} on {:02d}/{:02d}/{:02d}' + print(fst.format(txt, h, m, s, md, mo, yr)) + +async def bar(txt): # Demonstrate coro launch + yr, mo, md, h, m, s, wd = localtime()[:7] + fst = 'Coroutine {} {:02d}:{:02d}:{:02d} on {:02d}/{:02d}/{:02d}' + print(fst.format(txt, h, m, s, md, mo, yr)) + await asyncio.sleep(0) + +async def main(): + print('Asynchronous test running...') + asyncio.create_task(schedule(foo, 'every 4 mins', hrs=None, mins=range(0, 60, 4))) + asyncio.create_task(schedule(foo, 'every 5 mins', hrs=None, mins=range(0, 60, 5))) + # Launch a coroutine + asyncio.create_task(schedule(bar, 'every 3 mins', hrs=None, mins=range(0, 60, 3))) + # Launch a one-shot task + asyncio.create_task(schedule(foo, 'one shot', hrs=None, mins=range(0, 60, 2), times=1)) + await asyncio.sleep(900) # Quit after 15 minutes + +try: + asyncio.run(main()) +finally: + _ = asyncio.new_event_loop() +``` +##### [Top](./SCHEDULE.md#0-contents) + +## 4.6 Event interface + +In this instance a user defined `Event` is passed as the first `schedule` arg. +It is possible for multiple `schedule` instances to trigger the same `Event`. +The user is responsible for clearing the `Event`. This interface has a drawback +in that extra positional args passed to `schedule` are lost. +```python +import asyncio +from sched.sched import schedule +from time import localtime + +async def main(): + print("Asynchronous test running...") + evt = asyncio.Event() + asyncio.create_task(schedule(evt, hrs=10, mins=range(0, 60, 4))) + while True: + await evt.wait() # Multiple tasks may wait on an Event + evt.clear() # It must be cleared. + yr, mo, md, h, m, s, wd = localtime()[:7] + print(f"Event {h:02d}:{m:02d}:{s:02d} on {md:02d}/{mo:02d}/{yr}") + +try: + asyncio.run(main()) +finally: + _ = asyncio.new_event_loop() +``` +See [tutorial](https://github.com/peterhinch/micropython-async/blob/master/v3/docs/TUTORIAL.md#32-event). +Also [this doc](https://github.com/peterhinch/micropython-async/blob/master/v3/docs/EVENTS.md) +for a discussion of event-based programming. + +##### [Top](./SCHEDULE.md#0-contents) + +# 5. The cron object + +This is the core of the scheduler. Users of `asyncio` do not need to concern +themseleves with it. It is documented for those wishing to modify the code and +for those wanting to perform scheduling in synchronous code. + +It is a closure whose creation accepts a time specification for future +triggers. When called it is passed a time value in seconds since the epoch. It +returns the number of seconds to wait for the next trigger to occur. It stores +no state. + +It takes the following keyword-only args. A flexible set of data types are +accepted namely [time specifiers](./SCHEDULE.md#41-time-specifiers). Valid +numbers are shown as inclusive ranges. + 1. `secs=0` Seconds (0..59). + 2. `mins=0` Minutes (0..59). + 3. `hrs=3` Hours (0..23). + 4. `mday=None` Day of month (1..31). + 5. `month=None` Months (1..12). + 6. `wday=None` Weekday (0..6 Mon..Sun). + +## 5.1 The time to an event + +When the `cron` instance is run, it must be passed a time value (normally the +time now as returned by `time.time()`). The instance returns the number of +seconds to the first event matching the specifier. + +```python +from sched.cron import cron +cron1 = cron(hrs=None, mins=range(0, 60, 15)) # Every 15 minutes of every day +cron2 = cron(mday=25, month=12, hrs=9) # 9am every Christmas day +cron3 = cron(wday=(0, 4)) # 3am every Monday and Friday +now = int(time.time()) # Unix build returns a float here +tnext = min(cron1(now), cron2(now), cron3(now)) # Seconds until 1st event +``` + +##### [Top](./SCHEDULE.md#0-contents) + +## 5.2 How it works + +When a cron instance is run it seeks a future time and date relative to the +passed time value. This will be the soonest matching the specifier. A `cron` +instance is a conventional function and does not store state. Repeated calls +will return the same value if passed the same time value (`now` in the above +example). + +##### [Top](./SCHEDULE.md#0-contents) + +# 6. Hardware timing limitations + +The code has been tested on Pyboard 1.x, Pyboard D, RP2, ESP32 and ESP8266. All +except ESP8266 have good timing performance. Pyboards can be calibrated to +timepiece precision using a cheap DS3231 and +[this utility](https://github.com/peterhinch/micropython-samples/tree/master/DS3231). + +The ESP8266 has poor time stability so is not well suited to long term timing +applications. On my reference board timing drifted by 1.4mins/hr, an error of +2.3%. + +Boards with internet connectivity can periodically synchronise to an NTP server +but this carries a risk of sudden jumps in the system time which may disrupt +`asyncio` and the scheduler. + +##### [Top](./SCHEDULE.md#0-contents) + +# 7. Use in synchronous code + +It is possible to use the `cron` closure in synchronous code. This involves +the mildly masochistic task of writing an event loop, an example of which is +illustrated below. In this example a task list entry is a tuple with the +following contents. + 1. The `cron` instance. + 2. The callback to run. + 3. A tuple of arguments for the callback. + 4. A boolean, `True` if the callback is to be run once only. + 5. A boolean, `True` if the task has been put on the pending queue. + +The code below may be found in `sched/synctest.py` and may be run by issuing +```python +import sched.synctest +``` +This is the demo code. +```python +from .cron import cron +from time import localtime, sleep, time + +def foo(txt): + yr, mo, md, h, m, s, wd = localtime()[:7] + fst = "{} {:02d}:{:02d}:{:02d} on {:02d}/{:02d}/{:02d}" + print(fst.format(txt, h, m, s, md, mo, yr)) + +def main(): + print('Synchronous test running...') + tasks = [] # Entries: cron, callback, args, one_shot + cron4 = cron(hrs=None, mins=range(0, 60, 4)) + tasks.append([cron4, foo, ('every 4 mins',), False, False]) + cron5 = cron(hrs=None, mins=range(0, 60, 5)) + tasks.append([cron5, foo, ('every 5 mins',), False, False]) + cron3 = cron(hrs=None, mins=range(0, 60, 3)) + tasks.append([cron3, foo, ('every 3 mins',), False, False]) + cron2 = cron(hrs=None, mins=range(0, 60, 2)) + tasks.append([cron2, foo, ('one shot',), True, False]) + to_run = [] + while True: + now = time() # Ensure constant: get once per iteration. + tasks.sort(key=lambda x:x[0](now)) + to_run.clear() # Pending tasks + deltat = tasks[0][0](now) # Time to pending task(s) + for task in (t for t in tasks if t[0](now) == deltat): # Tasks with same delta t + to_run.append(task) + task[4] = True # Has been scheduled + # Remove one-shot tasks which have been scheduled + tasks = [t for t in tasks if not (t[3] and t[4])] + sleep(deltat) + for tsk in to_run: + tsk[1](*tsk[2]) + sleep(2) # Ensure seconds have rolled over + +main() +``` + +In my opinion the asynchronous version is cleaner and easier to understand. It +is also more versatile because the advanced features of `asyncio` are +available to the application including cancellation of scheduled tasks. The +above code is incompatible with `asyncio` because of the blocking calls to +`time.sleep()`. + +## 7.1 Initialisation + +Where a time specifier is an iterator (e.g. `mins=range(0, 60, 15)`) and there +are additional constraints (e.g. `hrs=3`) it may be necessary to delay the +start. The problem is specific to scheduling a sequence at a future time, and +there is a simple solution (which the asynchronous version implements +transparently). + +A `cron` object searches forwards from the current time. Assume the above case. +If the code start at 7:05 it picks the first later minute in the `range`, +i.e. `mins=15`, then picks the hour. This means that the first trigger occurs +at 3:15. Subsequent behaviour will be correct, but the first trigger would be +expected at 3:00. The solution is to delay start until the minutes value is in +the range`45 < mins <= 59`. The general solution is to delay until just before +the first expected callback: + +```python +def wait_for(**kwargs): + tim = mktime(localtime()[:3] + (0, 0, 0, 0, 0)) # Midnight last night + now = round(time()) + scron = cron(**kwargs) # Cron instance for search. + while tim < now: # Find first event in sequence + # Defensive. scron should never return 0, but if it did the loop would never quit + tim += max(scron(tim), 1) + twait = tim - now - 2 # Wait until 2 secs before first trigger + if twait > 0: + sleep(twait) + while True: + now = round(time()) + tw = scron(now) + sleep(tw + 2) +``` + +##### [Top](./SCHEDULE.md#0-contents) + +# 8. The simulate script + +In `sched/simulate.py`. This enables the behaviour of sets of args to `schedule` +to be rapidly checked. The `sim` function should be adapted to reflect the +application specifics. The default is: +```python +def sim(*args): + set_time(*args) + cs = cron(hrs = 0, mins = 59) + wait(cs) + cn = cron(wday=(0, 5), hrs=(1, 10), mins = range(0, 60, 15)) + for _ in range(10): + wait(cn) + print("Run payload.\n") + +sim(2023, 3, 29, 15, 20, 0) # Start time: year, month, mday, hrs, mins, secs +``` +The `wait` function returns immediately, but prints the length of the delay and +the value of system time when the delay ends. In this instance the start of a +sequence is delayed to ensure that the first trigger occurs at 01:00. + +##### [Top](./SCHEDULE.md#0-contents) + +# 9. Daylight saving time + +Thanks are due to @rhermanklink for raising this issue. + +This module is primarily intended for use on a microcontroller, where the time +source is a hardware RTC. This is usually set to local time, and must not change +for daylight saving time (DST); on a microcontroller neither this module nor +`asyncio` will work correctly if system time is changed at runtime. Under an OS, +some kind of thaumaturgy enables `asyncio` to tolerate this behaviour. + +Internally the module uses local time (`time.time()` and `time.localtime()`) to +retrieve the current time. Under an OS, in a locale where DST is used, the time +returned by these methods does not increase monotonically but is subject to +sudden changes at a DST boundary. + +A `cron` instance accepts "time now" measured in seconds from the epoch, and +returns the time to wait for the first scheduled event. This wait time is +calculated on the basis of a monotonic local time. Assume that the time is +10:00:00 on 1st August, and the first scheduled event is at 10:00:00 on 1st +November. The `cron` instance will return the time to wait. The application task +waits for that period, but local clocks will have changed so that the time reads +9:00:00. + +The primary application for this module is on microcontrollers. Further, there +are alternatives such as [Python schedule](https://github.com/dbader/schedule) +which are designed to run under an OS. Fixing this would require a timezone +solution; in many cases the application can correct for DST. Consequently this +behaviour has been deemed to be in the "document, don't fix" category. + +The following notes are general observations which may be of interest. + +### The epoch + +The Python `time.time()` method returns the number of seconds since the epoch. +This is computed relative to the system clock; consecutive calls around a DST +change will yield a sudden change (+3600 secs for a +one hour change). +This value may be converted to a time tuple with `time.gmtime(secs)` or with +`time.localtime(secs)`. If UTC and local time differ, for the same value of +`secs` these will produce UTC-relative and localtime-relative tuples. + +Consider `time.mktime()`. This converts a time tuple to a number of seconds +since the epoch. The time difference between a specified time and the epoch is +independent of timezone and DST. The specified time and the epoch are assumed to +be defined in the same (unknown, unspecified) time system. Consequently, if a +delay is defined by the difference between two `mktime()` values, that delay +will be unaffected if a DST change occurs between those two values. This may be +verified with the following script: +```py +from time import mktime, gmtime, localtime +from sys import implementation +cpython = implementation.name == 'cpython' +if cpython: + from time import struct_time + +start = [2024, 9, 5, 11, 49, 2, 3, 249, 1] +sept = round(mktime(struct_time(start))) if cpython else mktime(start) + +end = start[:] +end[1] += 2 # Same time + 2months Crosses DST boundary in the UK + +november = round(mktime(struct_time(end))) if cpython else mktime(end) +print(november - sept) + +if november - sept == 5270400: + print('PASS') +else: + print('FAIL') +``` +This test passes on the Unix build, under CPython, and on MicroPython on a +microcontroller. It also passes under an OS if the system's local time differs +substantially from UTC. + +The `cron` module returns a time difference between a passed time value and one +produced by `mktime()`: accordingly `cron` takes no account of local time or +DST. If local time is changed while waiting for the period specified by `cron`, +at the end of the delay, clocks measuring local time will indicate an incorrect +time. + +This is only an issue when running under an OS: if it is considered an error, it +should be addressed in application code. diff --git a/syncom_as/README.md b/v3/docs/SYNCOM.md similarity index 63% rename from syncom_as/README.md rename to v3/docs/SYNCOM.md index e32a943..4a9e50e 100644 --- a/syncom_as/README.md +++ b/v3/docs/SYNCOM.md @@ -5,7 +5,7 @@ MicroPython, where a UART cannot be used. An example is where one device is an ESP8266 board. While this has one bidirectional UART, this may be in use either as a REPL console, for viewing debug output, or for other puposes. -It is intended for use in asynchronous programs and uses uasyncio. +It is intended for use in asynchronous programs and uses asyncio. The module offers a bidirectional full duplex communication channel between two hardware devices. Its unit of communication is an arbitrary Python object @@ -21,14 +21,14 @@ indefinitely. The module will run on devices with minimal features and makes no assumptions about processing performance: at a physical level the interface is synchronous. If each device has two pins which can be used for output and two for input and -supports uasyncio it should work. +supports asyncio it should work. ###### [Main README](./README.md) ## Example usage ```python -import uasyncio as asyncio +import asyncio from syncom import SynCom from machine import Pin @@ -44,14 +44,13 @@ mrx = Pin(13, Pin.IN) mckin = Pin(12, Pin.IN) channel = SynCom(True, mckin, mckout, mrx, mtx) -loop = asyncio.get_event_loop() -loop.create_task(channel.start(passive_task)) try: - loop.run_forever() + asyncio.run(channel.start(passive_task)) except KeyboardInterrupt: pass finally: mckout(0) # For a subsequent run + _ = asyncio.new_event_loop() ``` ## Advantages @@ -88,7 +87,7 @@ message. Further such blocking calls are incompatible with asynchronous programming. -The two ends of the link are defined as ``initiator`` and ``passive``. These +The two ends of the link are defined as `initiator` and `passive`. These describe their roles in initialisation. Once running the protocol is symmetrical and the choice as to which unit to assign to each role is arbitrary: the test programs assume that the Pyboard is the initiator. @@ -103,15 +102,13 @@ arbitrary: the test programs assume that the Pyboard is the initiator. # Hardware connections -Each device has the following logical connections, ``din``, ``dout``, ``ckin``, -``ckout``. The ``din`` (data in) of one device is linked to ``dout`` (data out) -of the other, and vice versa. Likewise the clock signals ``ckin`` and ``ckout``. +Each device has the following logical connections, `din`, `dout`, `ckin`, +`ckout`. The `din` (data in) of one device is linked to `dout` (data out) +of the other, and vice versa. Likewise the clock signals `ckin` and `ckout`. To enable a response to crash detection a pin on the Pyboard is connected to -the Reset pin on the target. The polarity of the reset pulse is definable in -code by virtue of the ``Signal`` object. The pins below are those used in the -test programs. - +the Reset pin on the target. The polarity of the reset pulse is assumed to be +active low. | Initiator | Passive | Pyboard | ESP8266 | |:-----------:|:-----------:|:-------:|:-------:| @@ -122,15 +119,9 @@ test programs. | ckin (i/p) | ckout (o/p) | Y8 | 12 | -# Dependency - -Unless using string mode the Pickle module is required. - -[pickle.py](https://github.com/micropython/micropython-lib/tree/master/pickle) - # class SynCom -A SynCom instance is idle until its ``start`` task is scheduled. The driver +A SynCom instance is idle until its `start` task is scheduled. The driver causes the host device to resets the target and wait for synchronisation. When the interface is running the passed user task is launched; unless an error occurs this runs forever using the interface as required by the application. If @@ -142,51 +133,50 @@ interface to re-synchronise. The user task is then re-launched. Positional arguments: - 1. ``passive`` Boolean. One end of the link sets this ``True``, the other - ``False``. - 2. ``ckin`` An initialised input ``Pin`` instance. - 3. ``ckout`` An initialised output ``Pin`` instance. It should be set to zero. - 4. ``din`` An initialised input ``Pin`` instance. - 5. ``dout`` An initialised output ``Pin`` instance. - 6. ``sig_reset`` (optional) default ``None``. A ``Signal`` instance. Should be - configured so that when ``True`` the target will be reset. - 7. ``timeout`` (optional) default 0. Units ms. See below. - 8. ``string_mode`` (optional) default ``False``. See String Mode below. - 9. ``verbose`` (optional) default ``True``. If set, debug messages will be + 1. `passive` Boolean. One end of the link sets this `True`, the other + `False`. + 2. `ckin` An initialised input `Pin` instance. + 3. `ckout` An initialised output `Pin` instance. It should be set to zero. + 4. `din` An initialised input `Pin` instance. + 5. `dout` An initialised output `Pin` instance. + 6. `sig_reset` (optional) default `None`. A `Pin` instance. + 7. `timeout` (optional) default 0. Units ms. See below. + 8. `string_mode` (optional) default `False`. See String Mode below. + 9. `verbose` (optional) default `True`. If set, debug messages will be output to the REPL. ## Synchronous Methods - * ``send`` Argument an arbitrary Python object (or a string in string mode). + * `send` Argument an arbitrary Python object (or a string in string mode). Puts the item on the queue for transmission. - * ``any`` No args. + * `any` No args. Returns the number of received objects on the receive queue. - * ``running`` No args. - Returns ``True`` if the channel is running, ``False`` if the target has timed + * `running` No args. + Returns `True` if the channel is running, `False` if the target has timed out. ## Asynchronous Methods (tasks) - * ``await_obj`` Argument ``t_ms`` default 10ms. See below. + * `await_obj` Argument `t_ms` default 10ms. See below. Wait for reception of a Python object or string and return it. If the - interface times out (because the target has crashed) return ``None``. - * ``start`` Optional args ``user_task``, ``fail_delay``. + interface times out (because the target has crashed) return `None`. + * `start` Optional args `user_task`, `fail_delay`. Starts the interface. If a user_task is provided this will be launched when synchronisation is achived. The user task should return if a timeout is - detected (by ``await_obj`` returning ``None``). On return the driver will wait - for ``fail_delay`` (see below) before asserting the reset signal to reset the + detected (by `await_obj` returning `None`). On return the driver will wait + for `fail_delay` (see below) before asserting the reset signal to reset the target. The user task will be re-launched when synchronisation is achieved. The user_task is passed a single argument: the SynCom instance. If the user task is a bound method it should therefore be declared as taking two args: - ``self`` and the channel. + `self` and the channel. -The ``fail_delay`` (in seconds) is a convenience to allow user tasks to +The `fail_delay` (in seconds) is a convenience to allow user tasks to terminate before the user task is restarted. On detection of a timeout an application should set a flag to cause tasks instantiated by the user task to -terminate, then issue ``return``. This avoids unlimited growth of the task +terminate, then issue `return`. This avoids unlimited growth of the task queue. -The ``t_ms`` argument to ``await_obj`` determines how long the task pauses +The `t_ms` argument to `await_obj` determines how long the task pauses between checks for received data. Longer intervals increase latency but (possibly) improve raw throughput. @@ -194,27 +184,24 @@ between checks for received data. Longer intervals increase latency but ## Synchronisation -When the host launches the ``start`` coroutine it runs forever. It resets the -target which instantiates a SynCom object and launches its ``start`` coroutine. -The two then synchronise by repeatedly transmitting a ``_SYN`` character. Once +When the host launches the `start` coroutine it runs forever. It resets the +target which instantiates a SynCom object and launches its `start` coroutine. +The two then synchronise by repeatedly transmitting a `_SYN` character. Once this has been received the link is synchronised and the user task is launched. The user task runs forever on the target. On the host it may return if a target -timeout is detected. In this instance the host's ``start`` task waits for the -optional ``fail_delay`` before resetting the target and re-synchronising the +timeout is detected. In this instance the host's `start` task waits for the +optional `fail_delay` before resetting the target and re-synchronising the interface. The user task, which ran to completion, is re-launched. ## String Mode -On resource constrained platforms the pickle module can be problematic: the -method used to convert a string to an arbitrary Python object involves invoking -the compiler which demands significant amounts of RAM. This can be avoided by -sending only strings to the resource constrained platform, which must then -interpret the strings as required by the application. The protocol places some -restrictions. The bytes must not include 0, and they are limited to 7 bits. The -latter limitation can be removed (with small performance penalty) by changing -the value of ``_BITS_PER_CH`` to 8. The limitations allow for normal UTF8 -strings. +By default `ujson` is used to serialise data. This can be avoided by sending +strings to the remote platform, which must then interpret the strings as +required by the application. The protocol places some restrictions. The bytes +must not include 0, and they are limited to 7 bits. The latter limitation can +be removed (with small performance penalty) by changing the value of +`_BITS_PER_CH` to 8. The limitations allow for normal UTF8 strings. ## Timing @@ -230,13 +217,3 @@ ESP8266. This includes encoding the object as a string, transmitting it, decoding and modifying it, followed by similar processing to send it back. Hence converting the figures to bps will produce a lower figure (on the order of 656bps at 160MHz). - -## The Pickle module - -In normal mode the library uses the Python pickle module for object -serialisation. This has some restrictions, notably on the serialisation of user -defined class instances. See the Python documentation. Currently there is a -MicroPython issue #2280 where a memory leak occurs if you pass a string which -varies regularly. Pickle saves a copy of the string (if it hasn't already -occurred) each time until RAM is exhausted. The workround is to use any data -type other than strings or bytes objects; or to use string mode. diff --git a/v3/docs/THREADING.md b/v3/docs/THREADING.md new file mode 100644 index 0000000..04c6b34 --- /dev/null +++ b/v3/docs/THREADING.md @@ -0,0 +1,825 @@ +# Linking asyncio and other contexts + +This document is primarily for those wishing to interface `asyncio` code with +that running under the `_thread` module. It presents classes for that purpose +which may also find use for communicating between threads and in interrupt +service routine (ISR) applications. It provides an overview of the problems +implicit in pre-emptive multi tasking. + +It is not an introduction into ISR coding. For this see +[the official docs](http://docs.micropython.org/en/latest/reference/isr_rules.html) +and [this doc](https://github.com/peterhinch/micropython-async/blob/master/v3/docs/INTERRUPTS.md) +which provides specific guidance on interfacing `asyncio` with ISR's. + +Because of [this issue](https://github.com/micropython/micropython/issues/7965) +the `ThreadSafeFlag` class does not work under the Unix build. The classes +presented here depend on this: none can be expected to work on Unix until this +is fixed. + +To install the threadsafe classes discussed here, connect the target hardware +to WiFi and issue: +```python +import mip +mip.install("github:peterhinch/micropython-async/v3/threadsafe") +``` +On any target `mpremote` may be used: +```bash +$ mpremote mip install github:peterhinch/micropython-async/v3/threadsafe +``` + +###### [Main README](../README.md) +###### [Tutorial](./TUTORIAL.md) + +# Contents + + 1. [Introduction](./THREADING.md#1-introduction) The various types of pre-emptive code. + 1.1 [Hard Interrupt Service Routines](./THREADING.md#11-hard-interrupt-service-routines) + 1.2 [Soft Interrupt Service Routines](./THREADING.md#12-soft-interrupt-service-routines) Also code scheduled by micropython.schedule() + 1.3 [Threaded code on one core](./THREADING.md#13-threaded-code-on-one-core) + 1.4 [Threaded code on multiple cores](./THREADING.md#14-threaded-code-on-multiple-cores) + 1.5 [Globals](./THREADING.md#15-globals) + 1.6 [Allocation](./THREADING.md#16-allocation) + 1.7 [Debugging](./THREADING.md#17-debugging) + 2. [Sharing data](./THREADING.md#2-sharing-data) + 2.1 [A pool](./THREADING.md#21-a-pool) Sharing a set of variables. + 2.2 [ThreadSafeQueue](./THREADING.md#22-threadsafequeue) +      2.2.1 [Blocking](./THREADING.md#221-blocking) +      2.2.2 [Object ownership](./THREADING.md#222-object-ownership) +      2.2.3 [A complete example](./THREADING.md#223-a-complete-example) + 3. [Synchronisation](./THREADING.md#3-synchronisation) + 3.1 [Threadsafe Event](./THREADING.md#31-threadsafe-event) + 3.2 [Message](./THREADING.md#32-message) A threadsafe event with data payload. + 4. [Taming blocking functions](./THREADING.md#4-taming-blocking-functions) Enabling asyncio to handle blocking code. + 4.1 [Basic approach](./THREADING.md#41-basic-approach) + 4.2 [More general solution](./THREADING,md#42-more-general-solution) + 5. [Sharing a stream device](./THREADING.md#5-sharing-a-stream-device) + 6. [Glossary](./THREADING.md#6-glossary) Terminology of realtime coding. + +# 1. Introduction + +Various issues arise when `asyncio` applications interface with code running +in a different context. Supported contexts are: + 1. A hard interrupt service routine (ISR). + 2. A soft ISR. This includes code scheduled by `micropython.schedule()`. + 3. Another thread running on the same core. + 4. Code running on a different core (currently only supported on RP2). + +In all these cases the contexts share a common VM (the virtual machine which +executes Python bytecode). This enables the contexts to share global state. The +contexts differ in their use of the GIL [see glossary](./THREADING.md#5-glossary). + +This section compares the characteristics of the four contexts. Consider this +function which updates a global dictionary `d` from a hardware device. The +dictionary is shared with a `asyncio` task. (The function serves to illustrate +concurrency issues: it is not the most effcient way to transfer data.) +```python +def update_dict(): + d["x"] = read_data(0) + d["y"] = read_data(1) + d["z"] = read_data(2) +``` +This might be called in a hard or soft ISR, in a thread running on the same +core as `asyncio`, or in a thread running on a different core. Each of these +contexts has different characteristics, outlined below. In all these cases +"thread safe" constructs are needed to interface `asyncio` tasks with code +running in these contexts. The official `ThreadSafeFlag`, or the classes +documented here, may be used. + +Beware that some apparently obvious ways to interface an ISR to `asyncio` +introduce subtle bugs discussed in +[this doc](https://github.com/peterhinch/micropython-async/blob/master/v3/docs/INTERRUPTS.md) +referenced above. The only reliable interface is via a thread safe class, +usually `ThreadSafeFlag`. + +## 1.1 Hard Interrupt Service Routines + + 1. The ISR sees the GIL state of the main program: if the latter has locked + the GIL, the ISR will still run. This renders the GIL, as seen by the ISR, + ineffective. Built in Python objects (`list`, `dict` etc.) will not be + corrupted if an ISR runs while the object's contents are being modified as + these updates are atomic. This guarantee is limited: the code will not crash, + but there may be consistency problems. See **consistency** below. The lack of GIL + functionality means that failure can occur if the object's _structure_ is + modified, for example by the main program adding or deleting a dictionary + entry. This results in issues for [globals](./THREADING.md#15-globals). + 2. An ISR will run to completion before the main program regains control. This + means that if the ISR updates multiple items, when the main program resumes, + those items will be mutually consistent. The above code fragment will provide + mutually consistent data (but see **consistency** below). + 3. The fact that ISR code runs to completion means that it must run fast to + avoid disrupting the main program or delaying other ISR's. ISR code should not + call blocking routines. It should not wait on locks because there is no way + for the interrupted code to release the lock. See locks below. + 4. If a burst of interrupts can occur faster than `asyncio` can schedule the + handling task, data loss can occur. Consider using a `ThreadSafeQueue`. Note + that if this high rate is sustained something will break: the overall design + needs review. It may be necessary to discard some data items. + +#### locks + +There is a valid case where a hard ISR checks the status of a lock, aborting if +the lock is set. + +#### consistency + +Consider this code fragment: +```python +a = [0, 0, 0] +b = [0, 0, 0] +def hard_isr(): + a[0] = read_data(0) + b[0] = read_data(1) + +async def foo(): + while True: + await process(a + b) +``` +A hard ISR can occur during the execution of a bytecode. This means that the +combined list passed to `process()` might comprise old a + new b. Even though +the ISR produces consistent data, the fact that it can preempt the main code +at any time means that to read consistent data interrupts must be disabled: +```python +async def foo(): + while True: + state = machine.disable_irq() + d = a + b # Disable for as short a time as possible + machine.enable_irq(state) + await process(d) +``` + +## 1.2 Soft Interrupt Service Routines + +This also includes code scheduled by `micropython.schedule()` which is assumed +to have been called from a hard ISR. + + 1. A soft ISR can only run at certain bytecode boundaries, not during + execution of a bytecode. It cannot interrupt garbage collection; this enables + soft ISR code to allocate. + 2. As per hard ISR's. + 3. A soft ISR should still be designed to complete quickly. While it won't + delay hard ISR's it nevertheless pre-empts the main program. In principle it + can wait on a lock, but only if the lock is released by a hard ISR or another + hard context (a thread or code on another core). + 4. As per hard ISR's. + +## 1.3 Threaded code on one core + + 1. The common GIL ensures that built-in Python objects (`list`, `dict` etc.) + will not be corrupted if a read on one thread occurs while the object's + contents or the object's structure are being updated. + 2. This protection does not extend to user defined data structures. The fact + that a dictionary won't be corrupted by concurrent access does not imply that + its contents will be mutually consistent. In the code sample in section 1, if + the application needs mutual consistency between the dictionary values, a lock + is needed to ensure that a read cannot be scheduled while an update is in + progress. + 3. The above means that, for example, calling `asyncio.create_task` from a + thread is unsafe as it can destroy the mutual consistency of `asyncio` data + structures. + 4. Code running on a thread other than that running `asyncio` may block for + as long as necessary (an application of threading is to handle blocking calls + in a way that allows `asyncio` to continue running). + +## 1.4 Threaded code on multiple cores + +Currently this applies to RP2 and Unix ports, although as explained above the +thread safe classes offered here do not yet support Unix. + + 1. There is no common GIL. This means that under some conditions Python built + in objects can be corrupted. + 2. In the code sample there is a risk of the `asyncio` task reading the dict + at the same moment as it is being written. Updating a dictionary data entry is + atomic: there is no risk of corrupt data being read. In the code sample a lock + is only required if mutual consistency of the three values is essential. + 3. In the absence of a GIL some operations on built-in objects are not thread + safe. For example adding or deleting items in a `dict`. This extends to global + variables because these are implemented as a `dict`. See [Globals](./THREADING.md#15-globals). + 4. The observations in 1.3 re user defined data structures and `asyncio` + interfacing apply. + 5. Code running on a core other than that running `asyncio` may block for + as long as necessary. + +[See this reference from @jimmo](https://github.com/orgs/micropython/discussions/10135#discussioncomment-4309865). + +## 1.5 Globals + +Globals are implemented as a `dict`. Adding or deleting an entry is unsafe in +the main program if there is a context which accesses global data and does not +use the GIL. This means hard ISR's and code running on another core. The +following guidelines should be followed. + +Note that [PR 11604](https://github.com/micropython/micropython/pull/11604) +aims to fix this issue. Once merged, the use of globals will be threadsafe. + +All globals should be declared in the main program before an ISR starts to run, +and before code on another core is started. It is valid to insert placeholder +data, as updates to `dict` data are atomic. In the example below, a pointer to +the `None` object is replaced by a pointer to a class instance: a pointer +update is atomic so can occur while globals are accessed by code in other +contexts. +```python +display_driver = None +# Start code on other core +# It's now valid to do +display_driver = DisplayDriverClass(args) +``` +The hazard with globals can occur in other ways. The following would present a +hazard if `foo` were run for the first time while globals were being accessed: +```python +def foo(): + global bar + bar = 42 +``` +The hazard is avoided by instantiating `bar` in global scope (populated with a +placeholder) before allowing other contexts to run. + +If globals must be created or destroyed dynamically, a lock must be used. + +## 1.6 Allocation + +Memory allocation must be prevented from occurring while a garbage collection +(GC) is in progress. Normally this is handled transparently by the GIL; where +there is no GIL a lock is used. The one exception is the case of a hard ISR. It +is invalid to have a hard ISR waiting on a lock. Consequently hard ISR's are +disallowed from allocating and an exception is thrown if this is attempted. + +Consequently code running in all other contexts is free to allocate. + +## 1.7 Debugging + +A key practical point is that coding errors in synchronising threads can be +hard to locate: consequences can be extremely rare bugs or (in the case of +multi-core systems) crashes. It is vital to be careful in the way that +communication between the contexts is achieved. This doc aims to provide some +guidelines and code to assist in this task. + +There are two fundamental problems: data sharing and synchronisation. + +###### [Contents](./THREADING.md#contents) + +# 2. Sharing data + +## 2.1 A pool + +The simplest case is a shared pool of data. It is possible to share an `int` or +`bool` because at machine code level writing an `int` is "atomic": it cannot be +interrupted. A shared global `dict` might be replaced in its entirety by one +process and read by another. This is safe because the shared variable is a +pointer, and replacing a pointer is atomic. Problems arise when multiple fields +are updated by one process and read by another, as the read might occur while +the write operation is in progress. + +One approach is to use locking. This example solves data sharing, but does not +address synchronisation: +```python +lock = _thread.allocate_lock() +values = { "X": 0, "Y": 0, "Z": 0} +def producer(): + while True: + lock.acquire() + values["X"] = sensor_read(0) + values["Y"] = sensor_read(1) + values["Z"] = sensor_read(2) + lock.release() + time.sleep_ms(100) + +_thread.start_new_thread(producer, ()) + +async def consumer(): + while True: + lock.acquire() + await process(values) # Do something with the data + lock.release() + await asyncio.sleep_ms(0) # Ensure producer has time to grab the lock +``` +Condsider also this code: +```python +def consumer(): + send(d["x"].height()) # d is a global dict + send(d["x"].width()) # d["x"] is an instance of a class +``` +In this instance if the producer, running in a different context, changes +`d["x"]` between the two `send()` calls, different objects will be accessed. A +lock should be used. + +Locking is recommended where the producer runs in a different thread from +`asyncio`. However the consumer might hold the lock for some time: in the +first sample it will take time for the scheduler to execute the `process()` +call, and the call itself will take time to run. In cases where the duration +of a lock is problematic a `ThreadSafeQueue` is more appropriate than a locked +pool as it decouples producer and consumer code. + +As stated above, if the producer is an ISR a lock is normally unusable. +Producer code would follow this pattern: +```python +values = { "X": 0, "Y": 0, "Z": 0} +def producer(): + values["X"] = sensor_read(0) + values["Y"] = sensor_read(1) + values["Z"] = sensor_read(2) +``` +and the ISR would run to completion before `asyncio` resumed. However the ISR +might run while the `asyncio` task was reading the values: to ensure mutual +consistency of the dict values the consumer should disable interrupts while the +read is in progress. + +###### [Contents](./THREADING.md#contents) + +## 2.2 ThreadSafeQueue + +This queue is designed to interface between one `asyncio` task and a single +thread running in a different context. This can be an interrupt service routine +(ISR), code running in a different thread or code on a different core. See +[section 2.2.3](./THREADING.md#223-a-complete-example) for a complete usage +example. + +Any Python object may be placed on a `ThreadSafeQueue`. If bi-directional +communication is required between the two contexts, two `ThreadSafeQueue` +instances are required. + +Attributes of `ThreadSafeQueue`: + 1. It is of fixed capacity defined on instantiation. + 2. It uses a pre-allocated buffer of user selectable type (`Queue` uses a + dynamically allocated `list`). + 3. It is an asynchronous iterator allowing retrieval with `async for`. + 4. It provides synchronous "put" and "get" methods. If the queue becomes full + (put) or empty (get), behaviour is user definable. The method either blocks or + raises an `IndexError`. + +Constructor mandatory arg: + * `buf` Buffer for the queue, e.g. list, bytearray or array. If an integer is + passed, a list of this size is created. A buffer of size `N` can hold a + maximum of `N-1` items. Note that, where items on the queue are suitably + limited, bytearrays or arrays are more efficient than lists. + +Synchronous methods. + * `qsize` No arg. Returns the number of items in the queue. + * `empty` No arg. Returns `True` if the queue is empty. + * `full` No arg. Returns `True` if the queue is full. + * `get_sync` Arg `block=False`. Returns an object from the queue. Raises + `IndexError` if the queue is empty, unless `block==True` in which case the + method blocks until the `asyncio` tasks put an item on the queue. + * `put_sync` Args: the object to put on the queue, `block=False`. Raises + `IndexError` if the queue is full unless `block==True` in which case the + method blocks until the `asyncio` tasks remove an item from the queue. + +See the note below re blocking methods. + +Asynchronous methods: + * `put` Arg: the object to put on the queue. If the queue is full, it will + block until space is available. + * `get` No arg. Returns an object from the queue. If the queue is empty, it + will block until an object is put on the queue. Normal retrieval is with + `async for` but this method provides an alternative. + +In use as a data consumer the `asyncio` code will use `async for` to retrieve +items from the queue. If it is a data provider it will use `put` to place +objects on the queue. + +Data consumer: +```python +async def handle_queued_data(q): + async for obj in q: + # Process obj +``` +Data provider: +```python +async def feed_queue(q): + while True: + data = await data_source() + await q.put(data) +``` +The alternate thread will use synchronous methods. + +Data provider (throw if full): +```python +while True: + data = data_source() + try: + q.put_sync(data) + except IndexError: + # Queue is full +``` +Data consumer (block while empty): +```python +while True: + data = q.get(block=True) # May take a while if the asyncio side is slow + process(data) # Do something with it +``` + +###### [Contents](./THREADING.md#contents) + +### 2.2.1 Blocking + +These methods, called with `blocking=False`, produce an immediate return. To +avoid an `IndexError` the user should check for full or empty status before +calling. + +The synchronous `get_sync` and `put_sync` methods have blocking modes invoked +by passing `block=True`. Blocking modes are primarily intended for use in the +non-`asyncio ` context. If invoked in a `asyncio` task they must not be +allowed to block because it would lock up the scheduler. Nor should they be +allowed to block in an ISR where blocking can have unpredictable consequences. + +###### [Contents](./THREADING.md#contents) + +### 2.2.2 Object ownership + +Any Python object can be placed on a queue, but the user should be aware that +once the producer puts an object on the queue it loses ownership of the object +until the consumer has finished using it. In this sample the producer reads X, +Y and Z values from a sensor, puts them in a list or array and places the +object on a queue: +```python +def get_coordinates(q): + while True: + lst = [axis(0), axis(1), axis(2)] # Read sensors and put into list + putq.put_sync(lst, block=True) +``` +This is valid because a new list is created each time. The following will not +work: +```python +def get_coordinates(q): + a = array.array("I", (0,0,0)) + while True: + a[0], a[1], a[2] = [axis(0), axis(1), axis(2)] + putq.put_sync(lst, block=True) +``` +The problem here is that the array is modified after being put on the queue. If +the queue is capable of holding 10 objects, 10 array instances are required. Re +using objects requires the producer to be notified that the consumer has +finished with the item. In general it is simpler to create new objects and let +the MicroPython garbage collector delete them as per the first sample. + +###### [Contents](./THREADING.md#contents) + +### 2.2.3 A complete example + +This demonstrates an echo server running on core 2. The `sender` task sends +consecutive integers to the server, which echoes them back on a second queue. +To install the threadsafe primitives, the `threadsafe` directory and its +contents should be copied to the MicroPython target. +```python +import asyncio +from threadsafe import ThreadSafeQueue +import _thread +from time import sleep_ms + +def core_2(getq, putq): # Run on core 2 + buf = [] + while True: + while getq.qsize(): # Ensure no exception when queue is empty + buf.append(getq.get_sync()) + for x in buf: + putq.put_sync(x, block=True) # Wait if queue fills. + buf.clear() + sleep_ms(30) + +async def sender(to_core2): + x = 0 + while True: + await to_core2.put(x := x + 1) + +async def main(): + to_core2 = ThreadSafeQueue([0 for _ in range(10)]) + from_core2 = ThreadSafeQueue([0 for _ in range(10)]) + _thread.start_new_thread(core_2, (to_core2, from_core2)) + asyncio.create_task(sender(to_core2)) + n = 0 + async for x in from_core2: + if not x % 1000: + print(f"Received {x} queue items.") + n += 1 + assert x == n + +asyncio.run(main()) +``` +###### [Contents](./THREADING.md#contents) + +# 3. Synchronisation + +The principal means of synchronising `asyncio` code with that running in +another context is the `ThreadsafeFlag`. This is discussed in the +[official docs](https://docs.micropython.org/en/latest/library/asyncio.html#asyncio.ThreadSafeFlag) +and [tutorial](https://github.com/peterhinch/micropython-async/blob/master/v3/docs/TUTORIAL.md#36-threadsafeflag). +In essence a single `asyncio` task waits on a shared `ThreadSafeEvent`. Code +running in another context sets the flag. When the scheduler regains control +and other pending tasks have run, the waiting task resumes. + +## 3.1 Threadsafe Event + +The `ThreadsafeFlag` has a limitation in that only a single task can wait on +it. The `ThreadSafeEvent` overcomes this. It is subclassed from `Event` and +presents the same interface. The `set` method may be called from an ISR or from +code running on another core. Any number of tasks may wait on it. To install +the threadsafe primitives, the `threadsafe` directory and its contents should +be copied to the MicroPython target. + +The following Pyboard-specific code demos its use in a hard ISR: +```python +import asyncio +from threadsafe import ThreadSafeEvent +from pyb import Timer + +async def waiter(n, evt): + try: + await evt.wait() + print(f"Waiter {n} got event") + except asyncio.CancelledError: + print(f"Waiter {n} cancelled") + +async def can(task): + await asyncio.sleep_ms(100) + task.cancel() + +async def main(): + evt = ThreadSafeEvent() + tim = Timer(4, freq=1, callback=lambda t: evt.set()) + nt = 0 + while True: + tasks = [asyncio.create_task(waiter(n + 1, evt)) for n in range(4)] + asyncio.create_task(can(tasks[nt])) + await asyncio.gather(*tasks, return_exceptions=True) + evt.clear() + print("Cleared event") + nt = (nt + 1) % 4 + +asyncio.run(main()) +``` +## 3.2 Message + +The `Message` class uses [ThreadSafeFlag](./TUTORIAL.md#36-threadsafeflag) to +provide an object similar to `Event` with the following differences: + + * `.set()` has an optional data payload. + * `.set()` can be called from another thread, another core, or from an ISR. + * It is an awaitable class. + * Payloads may be retrieved in an asynchronous iterator. + * Multiple tasks can wait on a single `Message` instance. + +Constructor: + * No args. + +Synchronous methods: + * `set(data=None)` Trigger the `Message` with optional payload (may be any + Python object). + * `is_set()` Returns `True` if the `Message` is set, `False` if `.clear()` has + been issued. + * `clear()` Clears the triggered status. At least one task waiting on the + message should issue `clear()`. + * `value()` Return the payload. + +Asynchronous Method: + * `wait()` Pause until message is triggered. You can also `await` the message + as per the examples. + +The `.set()` method can accept an optional data value of any type. The task +waiting on the `Message` can retrieve it by means of `.value()` or by awaiting +the `Message` as below. A `Message` can provide a means of communication from +an interrupt handler and a task. The handler services the hardware and issues +`.set()` which causes the waiting task to resume (in relatively slow time). + +To install the threadsafe primitives, the `threadsafe` directory and its +contents should be copied to the MicroPython target. This illustrates basic +usage: +```python +import asyncio +from threadsafe import Message + +async def waiter(msg): + print('Waiting for message') + res = await msg + print('waiter got', res) + msg.clear() + +async def main(): + msg = Message() + asyncio.create_task(waiter(msg)) + await asyncio.sleep(1) + msg.set('Hello') # Optional arg + await asyncio.sleep(1) + +asyncio.run(main()) +``` +The following example shows multiple tasks awaiting a `Message`. +```python +from threadsafe import Message +import asyncio + +async def bar(msg, n): + while True: + res = await msg + msg.clear() + print(n, res) + # Pause until other coros waiting on msg have run and before again + # awaiting a message. + await asyncio.sleep_ms(0) + +async def main(): + msg = Message() + for n in range(5): + asyncio.create_task(bar(msg, n)) + k = 0 + while True: + k += 1 + await asyncio.sleep_ms(1000) + msg.set('Hello {}'.format(k)) + +asyncio.run(main()) +``` +Receiving messages in an asynchronous iterator: +```python +import asyncio +from threadsafe import Message + +async def waiter(msg): + async for text in msg: + print(f"Waiter got {text}") + msg.clear() + +async def main(): + msg = Message() + task = asyncio.create_task(waiter(msg)) + for text in ("Hello", "This is a", "message", "goodbye"): + msg.set(text) + await asyncio.sleep(1) + task.cancel() + await asyncio.sleep(1) + print("Done") + +asyncio.run(main()) +``` +The `Message` class does not have a queue: if the instance is set, then set +again before it is accessed, the first data item will be lost. + +###### [Contents](./THREADING.md#contents) + +# 4. Taming blocking functions + +Blocking functions or methods have the potential of stalling the `asyncio` +scheduler. Short of rewriting them to work properly the only way to tame them +is to run them in another thread. Any function to be run in this way must +conform to the guiedelines above, notably with regard to side effects. + +## 4.1 Basic approach + +The following is a way to "unblock" a single function or method. +```python +async def unblock(func, *args, **kwargs): + def wrap(func, message, args, kwargs): + message.set(func(*args, **kwargs)) # Run the blocking function. + msg = Message() + _thread.start_new_thread(wrap, (func, msg, args, kwargs)) + return await msg +``` +Given a blocking function `blocking` taking two positional and two keyword args +it may be awaited in a `asyncio` task with +```python + res = await unblock(blocking, 1, 2, c = 3, d = 4) +``` +The function runs "in the background" with other tasks running; only the +calling task is paused. Note how the args are passed. There is a "gotcha" which +is cancellation. It is not valid to cancel the `unblock` task because the +underlying thread will still be running. There is no general solution to this. +If the specific blocking function has a means of interrupting it or of forcing +a timeout then it may be possible to code a solution. + +The following is a complete example where blocking is demonstrated with +`time.sleep`. +```python +import asyncio +from threadsafe import Message +import _thread +from time import sleep + +def slow_add(a, b, *, c, d): # Blocking function. + sleep(5) + return a + b + c + d + +# Convert a blocking function to a nonblocking one using threading. +async def unblock(func, *args, **kwargs): + def wrap(func, message, args, kwargs): + message.set(func(*args, **kwargs)) # Run the blocking function. + msg = Message() + _thread.start_new_thread(wrap, (func, msg, args, kwargs)) + return await msg + +async def busywork(): # Prove asyncio is running. + while True: + print("#", end="") + await asyncio.sleep_ms(200) + +async def main(): + bw = asyncio.create_task(busywork()) + res = await unblock(slow_add, 1, 2, c = 3, d = 4) + bw.cancel() + print(f"\nDone. Result = {res}") + +asyncio.run(main()) +``` +###### [Contents](./THREADING.md#contents) + +## 4.2 More general solution + +This provides a queueing mechanism. A task can assign a blocking function to a +core even if the core is already busy. Further it allows for multiple cores or +threads; these are defined as `Context` instances. Typical use: +```python +from threadsafe import Context + +core1 = Context() # Has an instance of _thread, so a core on RP2 + +def rats(t, n): # Arbitrary blocking function or method + time.sleep(t) + return n * n + +async def some_task(): + await core1.assign(rats, t=3, n=99) # rats() runs on other core +``` +#### Context class + +Constructor arg: + * `qsize=10` Size of function queue. + +Asynchronous method: + * `assign(func, *args, **kwargs)` Accepts a synchronous function with optional + args. These are placed on a queue for execution in the `Context` instance. The + method pauses until execution is complete, returning the fuction's return + value. + +The `Context` class constructor spawns a thread which waits on the `Context` +queue. The`assign` method accepts a fuction and creates a `Job` instance. This +includes a `ThreadSafeFlag` along with the function and its args. The `Assign` +method places the `Job` on the queue and waits on the `ThreadSafeFlag`. + +The thread removes a `Job` from the queue and executes it. When complete it +assigns the return value to the `Job` and sets the `ThreadSafeFlag`. + +# 5. Sharing a stream device + +Typical stream devices are a UART or a socket. These are typically employed to +exchange multi-byte messages between applications running on different systems. + +When sharing a stream device between concurrent functions, similar issues arise +whether the functions are `asyncio` tasks or code with hard concurrency. In +the case of transmission of multi-character messages a lock must be used to +ensure that transmitted characters cannot become interleaved. + +In theory a lock can also be used for reception, but in practice it is rarely +feasible. Synchronising multiple receiving tasks is hard. This is because the +receiving processes seldom have precise control over the timing of the +(remote) transmitting device. It is therefore hard to determine when to +initiate each receiving process. If there is a requirement to handle +communication errors, the difficulties multiply. + +The usual approach is to design the message format to enable the intended +receiving process to be determined from the message contents. The application +has a single receiving task. This parses incoming messages and routes them to +the appropriate destination. Routing may be done by the data sharing mechanisms +discussed above. Error handling may be done by the receiving process or passed +on to the message destination. + +###### [Contents](./THREADING.md#contents) + +# 6. Glossary + +### ISR + +An Interrupt Service Routine: code that runs in response to an interrupt. Hard +ISR's offer very low latency but require careful coding - see +[official docs](http://docs.micropython.org/en/latest/reference/isr_rules.html). + +### Context + +In MicroPython terms a `context` may be viewed as a stream of bytecodes. An +`asyncio` program comprises a single context: execution is passed between +tasks and the scheduler as a single stream of code. By contrast code in an ISR +can preempt the main stream to run its own stream. This is also true of threads +which can preempt each other at arbitrary times, and code on another core +which runs independently albeit under the same VM. + +### GIL + +MicroPython has a Global Interpreter Lock. The purpose of this is to ensure +that multi-threaded programs cannot cause corruption in the event that two +contexts simultaneously modify an instance of a Python built-in class. It does +not protect user defined objects. + +### micropython.schedule + +The relevance of this is that it is normally called in a hard ISR. In this +case the scheduled code runs in a different context to the main program. See +[official docs](http://docs.micropython.org/en/latest/library/micropython.html#micropython.schedule). + +### VM + +In MicroPython terms a VM is the Virtual Machine that executes bytecode. Code +running in different contexts share a common VM which enables the contexts to +share global objects. + +### Atomic + +An operation is described as "atomic" if it can be guaranteed to proceed to +completion without being preempted. Writing an integer is atomic at the machine +code level. Updating a dictionary value is atomic at bytecode level. Adding or +deleting a dictionary key is not. diff --git a/v3/docs/TUTORIAL.md b/v3/docs/TUTORIAL.md new file mode 100644 index 0000000..b818d58 --- /dev/null +++ b/v3/docs/TUTORIAL.md @@ -0,0 +1,2910 @@ +# MicroPython asyncio: a tutorial + +This tutorial is intended for users having varying levels of experience with +asyncio and includes a section for complete beginners. It is based on the +current version of `asyncio`, V3.0.0. Most code samples are complete scripts +which can be cut and pasted at the REPL. + +See [this overview](../README.md) for a summary of resources for `asyncio` +including device drivers, debugging aids, and documentation. + +The name of the module was formerly `uasyncio`. To run the demo scripts on old +firmware please use +```python +import uasyncio as asyncio +``` + +# Contents + + 0. [Introduction](./TUTORIAL.md#0-introduction) + 0.1 [Installing asyncio primitives](./TUTORIAL.md#01-installing-asyncio-primitives) Extensions used in the demos. + 1. [Cooperative scheduling](./TUTORIAL.md#1-cooperative-scheduling) + 1.1 [Modules](./TUTORIAL.md#11-modules) +      1.1.1 [Primitives](./TUTORIAL.md#111-primitives) +      1.1.2 [Demo programs](./TUTORIAL.md#112-demo-programs) +      1.1.3 [Device drivers](./TUTORIAL.md#113-device-drivers) + 2. [asyncio concept](./TUTORIAL.md#2-asyncio-concept) + 2.1 [Program structure](./TUTORIAL.md#21-program-structure) + 2.2 [Coroutines and Tasks](./TUTORIAL.md#22-coroutines-and-tasks) +      2.2.1 [Queueing a task for scheduling](./TUTORIAL.md#221-queueing-a-task-for-scheduling) +      2.2.2 [Running a callback function](./TUTORIAL.md#222-running-a-callback-function) +      2.2.3 [Notes](./TUTORIAL.md#223-notes) Coros as bound methods. Returning values. +      2.2.4 [A typical firmware app](./TUTORIAL.md#224-a-typical-firmware-app) Avoiding a minor error + 2.3 [Delays](./TUTORIAL.md#23-delays) + 3. [Synchronisation](./TUTORIAL.md#3-synchronisation) + 3.1 [Lock](./TUTORIAL.md#31-lock) + 3.2 [Event](./TUTORIAL.md#32-event) +      3.2.1 [Wait on multiple events](./TUTORIAL.md#321-wait-on-multiple-events) Pause until 1 of N events is set. + 3.3 [Coordinating multiple tasks](./TUTORIAL.md#33-coordinating-multiple-tasks) +      3.3.1 [gather](./TUTORIAL.md#331-gather) + 3.4 [Semaphore](./TUTORIAL.md#34-semaphore) +      3.4.1 [BoundedSemaphore](./TUTORIAL.md#341-boundedsemaphore) + 3.5 [Queue](./TUTORIAL.md#35-queue) + 3.6 [ThreadSafeFlag](./TUTORIAL.md#36-threadsafeflag) Synchronisation with asynchronous events and interrupts. +      3.6.1 [Querying a ThreadSafeFlag](./TUTORIAL.md#361-querying-a-threadsafeflag) Check its state without blocking. + 3.7 [Barrier](./TUTORIAL.md#37-barrier) + 3.8 [Delay_ms](./TUTORIAL.md#38-delay_ms-class) Software retriggerable delay. + 3.9 [Message](./TUTORIAL.md#39-message) + 3.10 [Message broker](./TUTORIAL.md#310-message-broker) A publish-subscribe model of messaging and control. + 3.11 [Synchronising to hardware](./TUTORIAL.md#311-synchronising-to-hardware) + Debouncing switches, pushbuttons, ESP32 touchpads and encoder knobs. Taming ADC's. + 4. [Designing classes for asyncio](./TUTORIAL.md#4-designing-classes-for-asyncio) + 4.1 [Awaitable classes](./TUTORIAL.md#41-awaitable-classes) +      4.1.1 [Use in context managers](./TUTORIAL.md#411-use-in-context-managers) +      4.1.2 [Portable code](./TUTORIAL.md#412-portable-code) + 4.2 [Asynchronous iterators](./TUTORIAL.md#42-asynchronous-iterators) + 4.3 [Asynchronous context managers](./TUTORIAL.md#43-asynchronous-context-managers) + 4.4 [Object scope](./TUTORIAL.md#44-object-scope) What happens when an object goes out of scope. + 5. [Exceptions timeouts and cancellation](./TUTORIAL.md#5-exceptions-timeouts-and-cancellation) + 5.1 [Exceptions](./TUTORIAL.md#51-exceptions) +      5.1.1 [Global exception handler](./TUTORIAL.md#511-global-exception-handler) +      5.1.2 [Keyboard interrupts](./TUTORIAL.md#512-keyboard-interrupts) + 5.2 [Cancellation and Timeouts](./TUTORIAL.md#52-cancellation-and-timeouts) +      5.2.1 [Task cancellation](./TUTORIAL.md#521-task-cancellation) +      5.2.2 [Tasks with timeouts](./TUTORIAL.md#522-tasks-with-timeouts) +      5.2.3 [Cancelling running tasks](./TUTORIAL.md#523-cancelling-running-tasks) A "gotcha". + 6. [Interfacing hardware](./TUTORIAL.md#6-interfacing-hardware) + 6.1 [Timing issues](./TUTORIAL.md#61-timing-issues) + 6.2 [Polling hardware with a task](./TUTORIAL.md#62-polling-hardware-with-a-task) + 6.3 [Using the stream mechanism](./TUTORIAL.md#63-using-the-stream-mechanism) +      6.3.1 [A UART driver example](./TUTORIAL.md#631-a-uart-driver-example) + 6.4 [Writing streaming device drivers](./TUTORIAL.md#64-writing-streaming-device-drivers) + 6.5 [A complete example: aremote.py](./TUTORIAL.md#65-a-complete-example-aremotepy) + A driver for an IR remote control receiver. + 6.6 [Driver for HTU21D](./TUTORIAL.md#66-htu21d-environment-sensor) A + temperature and humidity sensor. + 7. [Hints and tips](./TUTORIAL.md#7-hints-and-tips) + 7.1 [Program hangs](./TUTORIAL.md#71-program-hangs) + 7.2 [asyncio retains state](./TUTORIAL.md#72-asyncio-retains-state) + 7.3 [Garbage Collection](./TUTORIAL.md#73-garbage-collection) + 7.4 [Testing](./TUTORIAL.md#74-testing) + 7.5 [A common error](./TUTORIAL.md#75-a-common-error) This can be hard to find. + 7.6 [Socket programming](./TUTORIAL.md#76-socket-programming) +      7.6.1 [WiFi issues](./TUTORIAL.md#761-wifi-issues) + 7.7 [CPython compatibility and the event loop](./TUTORIAL.md#77-cpython-compatibility-and-the-event-loop) Compatibility with CPython 3.5+ + 7.8 [Race conditions](./TUTORIAL.md#78-race-conditions) + 7.9 [Undocumented asyncio features](./TUTORIAL.md#79-undocumented-asyncio-features) + 8. [Notes for beginners](./TUTORIAL.md#8-notes-for-beginners) + 8.1 [Problem 1: event loops](./TUTORIAL.md#81-problem-1:-event-loops) + 8.2 [Problem 2: blocking methods](./TUTORIAL.md#8-problem-2:-blocking-methods) + 8.3 [The asyncio approach](./TUTORIAL.md#83-the-asyncio-approach) + 8.4 [Scheduling in asyncio](./TUTORIAL.md#84-scheduling-in-asyncio) + 8.5 [Why cooperative rather than pre-emptive?](./TUTORIAL.md#85-why-cooperative-rather-than-pre-emptive) + 8.6 [Communication](./TUTORIAL.md#86-communication) + 9. [Polling vs Interrupts](./TUTORIAL.md#9-polling-vs-interrupts) A common + source of confusion. + 10. [Interfacing threaded code](./TUTORIAL.md#10-interfacing-threaded-code) Taming blocking functions. Multi core coding. + + +###### [Main README](../README.md) + +# 0. Introduction + +Most of this document assumes some familiarity with asynchronous programming. +For those new to it an introduction may be found +[in section 8](./TUTORIAL.md#8-notes-for-beginners). + +The MicroPython `asyncio` library comprises a subset of Python's `asyncio` +library. It is designed for use on microcontrollers. As such it has a small RAM +footprint and fast context switching with zero RAM allocation. This document +describes its use with a focus on interfacing hardware devices. The aim is to +design drivers in such a way that the application continues to run while the +driver is awaiting a response from the hardware. The application remains +responsive to events such as user interaction. + +Another major application area for asyncio is in network programming: many +guides to this may be found online. + +Note that MicroPython is based on Python 3.4 with additions from later versions. +This version of `asyncio` supports a subset of CPython 3.8 `asyncio`. This +document identifies supported features. Except where stated program samples run +under MicroPython and CPython 3.8. + +This tutorial aims to present a consistent programming style compatible with +CPython V3.8 and above. + +## 0.1 Installing asyncio primitives + +This repository has optional unofficial primitives and extensions. To install +these, connect the target hardware to WiFi and issue: +```python +import mip +mip.install("github:peterhinch/micropython-async/v3/primitives") +mip.install("github:peterhinch/micropython-async/v3/threadsafe") +``` +For non-networked targets use `mpremote` as described in +[the official docs](http://docs.micropython.org/en/latest/reference/packages.html#installing-packages-with-mpremote). +```bash +$ mpremote mip install github:peterhinch/micropython-async/v3/primitives +$ mpremote mip install github:peterhinch/micropython-async/v3/threadsafe +``` + +###### [Main README](../README.md) + +# 1. Cooperative scheduling + +The technique of cooperative multi-tasking is widely used in embedded systems. +It offers lower overheads than pre-emptive scheduling and avoids many of the +pitfalls associated with truly asynchronous threads of execution. + +###### [Contents](./TUTORIAL.md#contents) + +## 1.1 Modules + +### 1.1.1 Primitives + +The directory `primitives` contains a Python package containing the following: + * Synchronisation primitives: "micro" versions of CPython's classes. + * Additional Python primitives including a software retriggerable delay class + and a MicroPython optimised `ringbuf_queue`. + * Primitives for interfacing hardware. These comprise classes for debouncing + switches and pushbuttons, an `Encoder` class and an asynchronous ADC class. + These are documented [here](./DRIVERS.md). + * Primitives for event-based coding which aims to reduce the use of callbacks + and is discussed [here](./EVENTS.md). + +The directory `threadsafe` includes primitives designed to interface `asyncio` +tasks to code running on other threads. These are documented +[here](./THREADING.md). + +See above for installation. + +### 1.1.2 Demo programs + +The directory `as_demos` contains various demo programs implemented as a Python +package. Copy the directory and its contents to the target hardware. + +The first two are the most immediately rewarding as they produce visible +results by accessing Pyboard hardware. With all demos, issue ctrl-d between +runs to soft reset the hardware. + + 1. [aledflash.py](../as_demos/aledflash.py) Flashes three Pyboard LEDs + asynchronously for 10s. Requires any Pyboard. + 2. [apoll.py](../as_demos/apoll.py) A device driver for the Pyboard + accelerometer. Demonstrates the use of a task to poll a device. Runs for 20s. + Requires a Pyboard V1.x. + 3. [roundrobin.py](../as_demos/roundrobin.py) Demo of round-robin scheduling. + Also a benchmark of scheduling performance. Runs for 5s on any target. + 4. [auart.py](../as_demos/auart.py) Demo of streaming I/O via a Pyboard UART. + Requires a link between X1 and X2. + 5. [auart_hd.py](../as_demos/auart_hd.py) Use of the Pyboard UART to communicate + with a device using a half-duplex protocol e.g. devices such as those using + the 'AT' modem command set. Link X1-X4, X2-X3. + 6. [gather.py](../as_demos/gather.py) Use of `gather`. Any target. + 7. [iorw.py](../as_demos/iorw.py) Demo of a read/write device driver using the + stream I/O mechanism. Requires a Pyboard. + 8. [rate.py](../as_demos/rate.py) Benchmark for asyncio. Any target. + +Demos are run using this pattern: +```python +import as_demos.aledflash +``` + +### 1.1.3 Device drivers + +These are installed by copying the `as_drivers` directory and contents to the +target. They have their own documentation as follows: + + 1. [A driver for GPS modules](./GPS.md) Runs a background task to + read and decode NMEA sentences, providing constantly updated position, course, + altitude and time/date information. + 2. [HTU21D](./HTU21D.md) An I2C temperature and humidity sensor. A task + periodically queries the sensor maintaining constantly available values. + 3. [NEC IR](./NEC_IR.md) A decoder for NEC IR remote controls. A callback occurs + whenever a valid signal is received. + 4. [HD44780](./hd44780.md) Driver for common character based LCD displays + based on the Hitachi HD44780 controller + 5. [I2C](./I2C.md) Use Pyboard I2C slave mode to implement a UART-like + asynchronous stream interface. Uses: communication with ESP8266, + or (with coding) interface a Pyboard to I2C masters. + +###### [Contents](./TUTORIAL.md#contents) + +# 2. asyncio concept + +The asyncio concept is of cooperative multi-tasking based on coroutines +(coros). A coro is similar to a function but is intended to run concurrently +with other coros. The illusion of concurrency is achieved by periodically +yielding to the scheduler, enabling other coros to be scheduled. + +## 2.1 Program structure + +Consider the following example: + +```python +import asyncio +async def bar(): + count = 0 + while True: + count += 1 + print(count) + await asyncio.sleep(1) # Pause 1s + +asyncio.run(bar()) +``` + +Program execution proceeds normally until the call to `asyncio.run(bar())`. At +this point, execution is controlled by the scheduler. A line after +`asyncio.run(bar())` would never be executed. The scheduler runs `bar` +because this has been placed on the scheduler's queue by `asyncio.run(bar())`. +In this trivial example, there is only one task: `bar`. If there were others, +the scheduler would schedule them in periods when `bar` was paused: + +```python +import asyncio +async def bar(x): + count = 0 + while True: + count += 1 + print('Instance: {} count: {}'.format(x, count)) + await asyncio.sleep(1) # Pause 1s + +async def main(): + tasks = [None] * 3 # For CPython compaibility must store a reference see 2.2 Note + for x in range(3): + tasks[x] = asyncio.create_task(bar(x)) + await asyncio.sleep(10) + +asyncio.run(main()) +``` +In this example, three instances of `bar` run concurrently. The +`asyncio.create_task` method returns immediately but schedules the passed coro +for execution. When `main` sleeps for 10s the `bar` instances are scheduled in +turn, each time they yield to the scheduler with `await asyncio.sleep(1)`. + +In this instance `main()` terminates after 10s. This is atypical of embedded +`asyncio` systems. Normally the application is started at power up by a one +line `main.py` and runs forever. + +###### [Contents](./TUTORIAL.md#contents) + +## 2.2 Coroutines and Tasks + +The fundamental building block of `asyncio` is a coro. This is defined with +`async def` and usually contains at least one `await` statement. This minimal +example waits 1 second before printing a message: + +```python +async def bar(): + await asyncio.sleep(1) + print('Done') +``` +Just as a function does nothing until called, a coro does nothing until awaited +or converted to a `Task`. The `create_task` method takes a coro as its argument +and returns a `Task` instance, which is scheduled for execution. In +```python +async def foo(): + await coro +``` +`coro` is run with `await` pausing until `coro` has completed. Sometimes coros +and tasks are interchangeable: the CPython docs refer to them as `awaitable`, +because either may be the target of an `await`. Consider this: + +```python +import asyncio +async def bar(t): + print('Bar started: waiting {}secs'.format(t)) + await asyncio.sleep(t) + print('Bar done') + +async def main(): + await bar(1) # Pauses here until bar is complete + task = asyncio.create_task(bar(5)) + await asyncio.sleep(0) # bar has now started + print('Got here: bar running') # Can run code here + await task # Now we wait for the bar task to complete + print('All done') +asyncio.run(main()) +``` +There is a crucial difference between `create_task` and `await`: the former +is synchronous code and returns immediately, with the passed coro being +converted to a `Task` and queued to run "in the background". By contrast, +`await` causes the passed `Task` or coro to run to completion before the next +line executes. Consider these lines of code: + +```python +await asyncio.sleep(delay_secs) +await asyncio.sleep(0) +``` + +The first causes the code to pause for the duration of the delay, with other +tasks being scheduled for this duration. A delay of 0 causes any pending tasks +to be scheduled in round-robin fashion before the following line is run. See +the `roundrobin.py` example. + +If a `Task` is run concurrently with `.create_task` it may be cancelled. The +`.create_task` method returns the `Task` instance which may be saved for status +checking or cancellation. See note below. + +In the following code sample three `Task` instances are created and scheduled +for execution. The "Tasks are running" message is immediately printed. The +three instances of the task `bar` appear to run concurrently. In fact, when one +pauses, the scheduler grants execution to the next, giving the illusion of +concurrency: + +```python +import asyncio +async def bar(x): + count = 0 + while True: + count += 1 + print('Instance: {} count: {}'.format(x, count)) + await asyncio.sleep(1) # Pause 1s + +async def main(): + tasks = [None] * 3 # For CPython compaibility must store a reference see 2.2 Note + for x in range(3): + tasks[x] = asyncio.create_task(bar(x)) + print('Tasks are running') + await asyncio.sleep(10) + +asyncio.run(main()) +``` +### Note on CPython compatibility + +The CPython [docs](https://docs.python.org/3/library/asyncio-task.html#creating-tasks) +have a warning that a reference to the task instance should be saved for the +task's duration. This was raised in +[this issue](https://github.com/micropython/micropython/issues/12299). +MicroPython `asyncio` does not suffer from this bug, but writers of code which +must work in CPython and MicroPython should take note. Code samples in this doc +are CPython-compatible, but following version is valid in MicroPython: +```python +import asyncio +async def bar(x): + count = 0 + while True: + count += 1 + print('Instance: {} count: {}'.format(x, count)) + await asyncio.sleep(1) # Pause 1s + +async def main(): + for x in range(3): + asyncio.create_task(bar(x)) # No reference stored + print('Tasks are running') + await asyncio.sleep(10) + +asyncio.run(main()) +``` + +###### [Contents](./TUTORIAL.md#contents) + +### 2.2.1 Queueing a task for scheduling + + * `asyncio.create_task` Arg: the coro to run. The scheduler converts the coro + to a `Task` and queues the task to run ASAP. Return value: the `Task` + instance. It returns immediately. The coro arg is specified with function call + syntax with any required arguments passed. + * `asyncio.run` Arg: the coro to run. Return value: any value returned by the + passed coro. The scheduler queues the passed coro to run ASAP. The coro arg is + specified with function call syntax with any required arguments passed. In the + current version the `run` call returns when the task terminates. However, under + CPython, the `run` call does not terminate. + * `await` Arg: the task or coro to run. If a coro is passed it must be + specified with function call syntax. Starts the task ASAP. The awaiting task + blocks until the awaited one has run to completion. As described + [in section 2.2](./TUTORIAL.md#22-coroutines-and-tasks), it is possible to + `await` a task which has already been started. In this instance, the `await` is + on the `task` object (function call syntax is not used). + +The above are compatible with CPython 3.8 or above. + +###### [Contents](./TUTORIAL.md#contents) + +### 2.2.2 Running a callback function + +Callbacks should be Python functions designed to complete in a short period of +time. This is because tasks will have no opportunity to run for the +duration. If it is necessary to schedule a callback to run after `t` seconds, +it may be done as follows: +```python +async def schedule(cb, t, *args, **kwargs): + await asyncio.sleep(t) + cb(*args, **kwargs) +``` +In this example the callback runs after three seconds: +```python +import asyncio + +async def schedule(cbk, t, *args, **kwargs): + await asyncio.sleep(t) + cbk(*args, **kwargs) + +def callback(x, y): + print('x={} y={}'.format(x, y)) + +async def bar(): + asyncio.create_task(schedule(callback, 3, 42, 100)) + for count in range(6): + print(count) + await asyncio.sleep(1) # Pause 1s + +asyncio.run(bar()) +``` + +###### [Contents](./TUTORIAL.md#contents) + +### 2.2.3 Notes + +Coros may be bound methods. A coro usually contains at least one `await` +statement, but nothing will break (in MicroPython or CPython 3.8) if it has +none. + +Similarly to a function or method, a coro can contain a `return` statement. To +retrieve the returned data issue: + +```python +result = await my_task() +``` + +It is possible to await completion of a set of multiple asynchronously running +tasks, accessing the return value of each. This is done by +[asyncio.gather](./TUTORIAL.md#33-gather) which launches the tasks and pauses +until the last terminates. It returns a list containing the data returned by +each task: +```python +import asyncio + +async def bar(n): + for count in range(n): + await asyncio.sleep_ms(200 * n) # Pause by varying amounts + print('Instance {} stops with count = {}'.format(n, count)) + return count * count + +async def main(): + tasks = (bar(2), bar(3), bar(4)) + print('Waiting for gather...') + res = await asyncio.gather(*tasks) + print(res) + +asyncio.run(main()) +``` + +###### [Contents](./TUTORIAL.md#contents) + +### 2.2.4 A typical firmware app + +Most firmware applications run forever. This requires the coro passed to +`asyncio.run()` to `await` a non-terminating coro. + +To ease debugging, and for CPython compatibility, some "boilerplate" code is +suggested in the sample below. + +By default, an exception in a task will not stop the application as a whole from +running. This can make debugging difficult. The fix shown below is discussed +[in 5.1.1](./TUTORIAL.md#511-global-exception-handler). + +It is bad practice to create a task prior to issuing `asyncio.run()`. CPython +will throw an exception in this case. MicroPython +[does not](https://github.com/micropython/micropython/issues/6174), but it's +wise to avoid doing this. + +Lastly, `asyncio` retains state. This means that, by default, you need to +reboot between runs of an application. This can be fixed with the +`new_event_loop` method discussed +[in 7.2](./TUTORIAL.md#72-asyncio-retains-state). + +These considerations suggest the following application structure: +```python +import asyncio +from my_app import MyClass + +def set_global_exception(): + def handle_exception(loop, context): + import sys + sys.print_exception(context["exception"]) + sys.exit() + loop = asyncio.get_event_loop() + loop.set_exception_handler(handle_exception) + +async def main(): + set_global_exception() # Debug aid + my_class = MyClass() # Constructor might create tasks + task = asyncio.create_task(my_class.foo()) # Or you might do this + await my_class.run_forever() # Non-terminating method +try: + asyncio.run(main()) +finally: + asyncio.new_event_loop() # Clear retained state +``` + +###### [Contents](./TUTORIAL.md#contents) + +## 2.3 Delays + +Where a delay is required in a task there are two options. For longer delays and +those where the duration need not be precise, the following should be used: + +```python +async def foo(delay_secs, delay_ms): + await asyncio.sleep(delay_secs) + print('Hello') + await asyncio.sleep_ms(delay_ms) +``` + +While these delays are in progress the scheduler will schedule other tasks. +This is generally highly desirable, but it does introduce uncertainty in the +timing as the calling routine will only be rescheduled when the one running at +the appropriate time has yielded. The amount of latency depends on the design +of the application, but is likely to be on the order of tens or hundreds of ms; +this is discussed further in [Section 6](./TUTORIAL.md#6-interfacing-hardware). + +Very precise delays may be issued by using the `utime` functions `sleep_ms` +and `sleep_us`. These are best suited for short delays as the scheduler will +be unable to schedule other tasks while the delay is in progress. + +###### [Contents](./TUTORIAL.md#contents) + +# 3 Synchronisation + +There is often a need to provide synchronisation between tasks. A common +example is to avoid what are known as "race conditions" where multiple tasks +compete to access a single resource. These are discussed +[in section 7.8](./TUTORIAL.md#78-race-conditions). Another hazard is the +"deadly embrace" where two tasks each wait on the other's completion. + +Another synchronisation issue arises with producer and consumer tasks. The +producer generates data which the consumer uses. Asyncio provides the `Queue` +object. The producer puts data onto the queue while the consumer waits for its +arrival (with other tasks getting scheduled for the duration). The `Queue` +guarantees that items are removed in the order in which they were received. +Alternatively, a `Barrier` instance can be used if the producer must wait +until the consumer is ready to access the data. + +In simple applications, communication may be achieved with global flags or bound +variables. A more elegant approach is to use synchronisation primitives. +CPython provides the following classes: + * `Lock` - already incorporated in new `asyncio`. + * `Event` - already incorporated. + * `ayncio.gather` - already incorporated. + * `Semaphore` In this repository. + * `BoundedSemaphore`. In this repository. + * `Condition`. In this repository. + * `Queue`. In this repository. + +As the table above indicates, not all are yet officially supported. In the +interim, implementations may be found in the `primitives` directory. The +following classes which are non-standard, are also in that directory: + * `Message` An ISR-friendly `Event` with an optional data payload. + * `Barrier` Based on a Microsoft class, enables multiple coros to synchronise + in a similar (but not identical) way to `gather`. + * `Delay_ms` A useful software-retriggerable monostable, akin to a watchdog. + Calls a user callback if not cancelled or regularly retriggered. + * `RingbufQueue` a MicroPython-optimised queue. + * `Broker` a means of messaging and control based on a publish/subscribe model. + +A further set of primitives for synchronising hardware are detailed in +[section 3.9](./TUTORIAL.md#39-synchronising-to-hardware). + +To install the primitives, copy the `primitives` directory and contents to the +target. A primitive is loaded by issuing (for example): +```python +from primitives import Semaphore, BoundedSemaphore +from primitives import Queue +``` +When `asyncio` acquires official versions of the CPython primitives, the +invocation lines alone should be changed. E.g.: +```python +from asyncio import Semaphore, BoundedSemaphore +from asyncio import Queue +``` +##### Note on CPython compatibility + +CPython will throw a `RuntimeError` on first use of a synchronisation primitive +that was instantiated prior to starting the scheduler. By contrast, +`MicroPython` allows instantiation in synchronous code executed before the +scheduler is started. Early instantiation can be advantageous in low resource +environments. For example, a class might have a large buffer and bound `Event` +instances. Such a class should be instantiated early, before RAM fragmentation +sets in. + +The following provides a discussion of the primitives. + +###### [Contents](./TUTORIAL.md#contents) + +## 3.1 Lock + +This describes the use of the official `Lock` primitive. This guarantees unique +access to a shared resource. +```python +from asyncio import Lock +lock = Lock() +``` +Synchronous methods: + * `locked` No args. Returns `True` if locked. + * `release` No args. Releases the lock. See note below. + +Asynchronous method: + * `acquire` No args. Pauses until the lock has been acquired. Use by executing + `await lock.acquire()`. + +A task waiting on a lock may be cancelled or may be run subject to a timeout. +The normal way to use a `Lock` is in a context manager. In the following code +sample a `Lock` instance `lock` has been created and is passed to all tasks +wishing to access the shared resource. Each task attempts to acquire the lock, +pausing execution until it succeeds. +```python +import asyncio +from asyncio import Lock + +async def task(i, lock): + while 1: + async with lock: + print("Acquired lock in task", i) + await asyncio.sleep(0.5) + +async def main(): + lock = Lock() # The Lock instance + tasks = [None] * 3 # For CPython compaibility must store a reference see 2.2 Note + for n in range(1, 4): + tasks[n - 1] = asyncio.create_task(task(n, lock)) + await asyncio.sleep(10) + +asyncio.run(main()) # Run for 10s +``` +Use of a context manager is strongly recommended - otherwise an application must +ensure that `.release` is only ever called when that same task has called +`.locked`. Calling `.release` on an unlocked `Lock` will raise a `ValueError`. +Calling it on a `Lock` which has been locked by another task will cause that +second task to produce a `ValueError` when it attempts to release the `Lock` or +when its context manager exits. Context managers avoid these issues. + +For the brave the following illustrates use without a CM. +```python +import asyncio +from asyncio import Lock + +async def task(i, lock): + while 1: + await lock.acquire() + print("Acquired lock in task", i) + await asyncio.sleep(0.5) + lock.release() + +async def main(): + lock = Lock() # The Lock instance + tasks = [None] * 3 # For CPython compaibility must store a reference see 2.2 Note + for n in range(1, 4): + tasks[n - 1] = asyncio.create_task(task(n, lock)) + await asyncio.sleep(10) + +asyncio.run(main()) # Run for 10s +``` +###### [Contents](./TUTORIAL.md#contents) + +## 3.2 Event + +This describes the use of the official `Event` primitive. + +This provides a way for one or more tasks to pause until another one flags them to +continue. An `Event` object is instantiated and made accessible to all tasks +using it: + +```python +import asyncio +from asyncio import Event + +async def waiter(event): + print('Waiting for event') + await event.wait() # Pause here until event is set + print('Waiter got event.') + event.clear() # Flag caller and enable re-use of the event + +async def main(): + event = Event() + task = asyncio.create_task(waiter(event)) + await asyncio.sleep(2) + print('Setting event') + event.set() + await asyncio.sleep(1) + # Caller can check if event has been cleared + print('Event is {}'.format('set' if event.is_set() else 'clear')) + +asyncio.run(main()) +``` +Constructor: no args. +Synchronous Methods: + * `set` Initiates the event. + * `clear` No args. Clears the event. + * `is_set` No args. Returns `True` if the event is set. + +Asynchronous Method: + * `wait` Pause until event is set. + +Tasks wait on the event by issuing `await event.wait()`; execution pauses until +another one issues `event.set()`. This causes all tasks waiting on the `Event` to +be queued for execution. Note that the synchronous sequence +```python +event.set() +event.clear() +``` +will cause any tasks waiting on the event to resume in round-robin order. In +general, the waiting task should clear the event, as in the `waiter` example +above. This caters for the case where the waiting task has not reached the +event at the time when it is triggered. In this instance, by the time the task +reaches the event, the task will find it clear and will pause. This can lead to +non-deterministic behaviour if timing is marginal. + +The `Event` class is an efficient and effective way to synchronise tasks, but +firmware applications often have multiple tasks running `while True:` loops. +The number of `Event` instances required to synchronise these can multiply. +Consider the case of one producer task feeding N consumers. The producer sets +an `Event` to tell the consumer that data is ready; it then needs to wait until +all consumers have completed before triggering them again. Consider these +approaches: + 1. Each consumer sets an `Event` on completion. Producer waits until all + `Event`s are set before clearing them and setting its own `Event`. + 2. Consumers do not loop, running to completion. Producer uses `gather` to + instantiate consumer tasks and wait on their completion. + 3. `Event` instances are replaced with a single [Barrier](./TUTORIAL.md#37-barrier) + instance. + +Solution 1 suffers a proliferation of `Event`s and suffers an inefficient +busy-wait where the producer waits on N events. Solution 2 is inefficient with +constant creation of tasks. Arguably the `Barrier` class is the best approach. + +**WARNING** +`Event` methods must not be called from an interrupt service routine (ISR). The +`Event` class is not thread safe. See [ThreadSafeFlag](./TUTORIAL.md#36-threadsafeflag). + +### 3.2.1 Wait on multiple events + +The `WaitAny` primitive allows a task to wait on a list of events. When one +of the events is triggered, the task continues. It is effectively a logical +`or` of events. +```python +from primitives import WaitAny +evt1 = Event() +evt2 = Event() +# Launch tasks that might trigger these events +evt = await WaitAny((evt1, evt2)).wait() +# One or other was triggered +if evt is evt1: + evt1.clear() + # evt1 was triggered +else: + evt2.clear() + # evt2 was triggered +``` +The `WaitAll` primitive is similar except that the calling task will pause +until all passed `Event`s have been set: +```python +from primitives import WaitAll +evt1 = Event() +evt2 = Event() +wa = WaitAll((evt1, evt2)).wait() +# Launch tasks that might trigger these events +await wa +# Both were triggered +``` +Awaiting `WaitAll` or `WaitAny` may be cancelled or subject to a timeout. These +primitives are documented in +[event baed programming](https://github.com/peterhinch/micropython-async/blob/master/v3/docs/EVENTS.md). + +###### [Contents](./TUTORIAL.md#contents) + +## 3.3 Coordinating multiple tasks + +Several tasks may be launched together with the launching task pausing until +all have completed. The `gather` mechanism is supported by CPython and +MicroPython. CPython 3.11 adds a `TaskGroup` class which is particularly +suited to applications where runtime exceptions may be encountered. It is not +yet officially supported by MicroPython. + +### 3.3.1 gather + +This official `asyncio` asynchronous method causes a number of awaitables to +run, pausing until all have either run to completion or been terminated by +cancellation or timeout. It returns a list of the return values of each task. + +Its call signature is +```python +res = await asyncio.gather(*awaitables, return_exceptions=False) +``` +`awaitables` may comprise tasks or coroutines, the latter being converted to +tasks. + +The keyword-only boolean arg `return_exceptions` determines the behaviour in +the event of a cancellation or timeout of tasks. If `False`, the `gather` +terminates immediately, raising the relevant exception which should be trapped +by the caller. If `True`, the `gather` continues to pause until all have either +run to completion or been terminated by cancellation or timeout. In this case, +tasks which have been terminated will return the exception object in the list +of return values. + +The following script may be used to demonstrate this behaviour: + +```python +import asyncio + +async def barking(n): + print('Start barking') + for _ in range(6): + await asyncio.sleep(1) + print('Done barking.') + return 2 * n + +async def foo(n): + print('Start timeout coro foo()') + while True: + await asyncio.sleep(1) + n += 1 + return n + +async def bar(n): + print('Start cancellable bar()') + while True: + await asyncio.sleep(1) + n += 1 + return n + +async def do_cancel(task): + await asyncio.sleep(5) + print('About to cancel bar') + task.cancel() + +async def main(): + tasks = [asyncio.create_task(bar(70))] + tasks.append(barking(21)) + tasks.append(asyncio.wait_for(foo(10), 7)) + can = asyncio.create_task(do_cancel(tasks[0])) + res = None + try: + res = await asyncio.gather(*tasks, return_exceptions=True) + except asyncio.TimeoutError: # These only happen if return_exceptions is False + print('Timeout') # With the default times, cancellation occurs first + except asyncio.CancelledError: + print('Cancelled') + print('Result: ', res) + +asyncio.run(main()) +``` + +## 3.4 Semaphore + +This is currently an unofficial implementation. Its API is as per CPython +asyncio. + +A semaphore limits the number of tasks which can access a resource. It can be +used to limit the number of instances of a particular task which can run +concurrently. It performs this using an access counter which is initialised by +the constructor and decremented each time a task acquires the semaphore. + +Constructor: Optional arg `value` default 1. Number of permitted concurrent +accesses. + +Synchronous method: + * `release` No args. Increments the access counter. + +Asynchronous method: + * `acquire` No args. If the access counter is greater than 0, decrements it + and terminates. Otherwise waits for it to become greater than 0 before + decrementing it and terminating. + +The easiest way to use it is with an asynchronous context manager. The +following illustrates tasks accessing a resource one at a time: + +```python +import asyncio +from primitives import Semaphore + +async def foo(n, sema): + print('foo {} waiting for semaphore'.format(n)) + async with sema: + print('foo {} got semaphore'.format(n)) + await asyncio.sleep_ms(200) + +async def main(): + sema = Semaphore() + tasks = [None] * 3 # For CPython compaibility must store a reference see 2.2 Note + for num in range(3): + tasks[num] = asyncio.create_task(foo(num, sema)) + await asyncio.sleep(2) + +asyncio.run(main()) +``` + +There is a difference between a `Semaphore` and a `Lock`. A `Lock` instance is +owned by the coro which locked it: only that coro can release it. A +`Semaphore` can be released by any coro which acquired it. + +###### [Contents](./TUTORIAL.md#contents) + +### 3.4.1 BoundedSemaphore + +This is currently an unofficial implementation. Its API is as per CPython +asyncio. + +This works identically to the `Semaphore` class except that if the `release` +method causes the access counter to exceed its initial value, a `ValueError` +is raised. + +###### [Contents](./TUTORIAL.md#contents) + +## 3.5 Queue + +Queue objects provide a means of synchronising producer and consumer tasks: the +producer puts data items onto the queue with the consumer removing them. If the +queue becomes full, the producer task will block, likewise if the queue becomes +empty the consumer will block. Some queue implementations allow producer and +consumer to run in different contexts: for example where one runs in an +interrupt service routine or on a different thread or core from the `asyncio` +application. Such a queue is termed "thread safe". + +The `Queue` class is an unofficial implementation whose API is a subset of that +of CPython's `asyncio.Queue`. Like `asyncio.Queue` this class is not thread +safe. A queue class optimised for MicroPython is presented in +[Ringbuf Queue](./DRIVERS.md#7-ringbuf-queue). A thread safe version is +documented in [ThreadSafeQueue](./THREADING.md#22-threadsafequeue). + +Constructor: +Optional arg `maxsize=0`. If zero, the queue can grow without limit subject to +heap size. If `maxsize>0` the queue's size will be constrained. + +Synchronous methods (immediate return): + * `qsize` No arg. Returns the number of items in the queue. + * `empty` No arg. Returns `True` if the queue is empty. + * `full` No arg. Returns `True` if the queue is full. + * `put_nowait` Arg: the object to put on the queue. Raises an exception if the + queue is full. + * `get_nowait` No arg. Returns an object from the queue. Raises an exception + if the queue is empty. + * `task_done` No arg. Indicate that a task associated with a dequeued item is complete. + +Asynchronous methods: + * `put` Arg: the object to put on the queue. If the queue is full, it will + block until space is available. + * `get` No arg. Returns an object from the queue. If the queue is empty, it + will block until an object is put on the queue. + * `join` No arg. Block until all items in the queue have been received and + processed (indicated via task_done). + +```python +import asyncio +from primitives import Queue + +async def slow_process(): + await asyncio.sleep(2) + return 42 + +async def produce(queue): + print('Waiting for slow process.') + result = await slow_process() + print('Putting result onto queue') + await queue.put(result) # Put result on queue + +async def consume(queue): + print("Running consume()") + result = await queue.get() # Blocks until data is ready + print('Result was {}'.format(result)) + +async def queue_go(delay): + queue = Queue() + t1 = asyncio.create_task(consume(queue)) + t2 = asyncio.create_task(produce(queue)) + await asyncio.sleep(delay) + print("Done") + +asyncio.run(queue_go(4)) +``` + +###### [Contents](./TUTORIAL.md#contents) + +## 3.6 ThreadSafeFlag + +See also [Interfacing asyncio to interrupts](./INTERRUPTS.md). Because of +[this issue](https://github.com/micropython/micropython/issues/7965) the +`ThreadSafeFlag` class does not work under the Unix build. + +This official class provides an efficient means of synchronising a task with a +truly asynchronous event such as a hardware interrupt service routine or code +running in another thread or on another core. It operates in a similar way to +`Event` with the following key differences: + * It is thread safe: the `set` event may be called from asynchronous code. + * It is self-clearing. + * Only one task may wait on the flag. + +Synchronous methods: + * `set` Triggers the flag. Like issuing `set` then `clear` to an `Event`. + * `clear` Unconditionally clear down the flag. + +Asynchronous method: + * `wait` Wait for the flag to be set. If the flag is already set then it + returns immediately. + +Typical usage is having a `asyncio` task wait on a hard ISR. Only one task +should wait on a `ThreadSafeFlag`. The hard ISR services the interrupting +device, sets the `ThreadSafeFlag`, and quits. A single task waits on the flag. +This design conforms with the self-clearing behaviour of the `ThreadSafeFlag`. +Each interrupting device has its own `ThreadSafeFlag` instance and its own +waiting task. +```python +import asyncio +from pyb import Timer + +tsf = asyncio.ThreadSafeFlag() + +def cb(_): + tsf.set() + +async def foo(): + while True: + await tsf.wait() + # Could set an Event here to trigger multiple tasks + print('Triggered') + +tim = Timer(1, freq=1, callback=cb) + +asyncio.run(foo()) +``` +An example [based on one posted by Damien](https://github.com/micropython/micropython/pull/6886#issuecomment-779863757) +Link pins X1 and X2 to test. +```python +from machine import Pin, Timer +import asyncio + +class AsyncPin: + def __init__(self, pin, trigger): + self.pin = pin + self.flag = asyncio.ThreadSafeFlag() + self.pin.irq(lambda pin: self.flag.set(), trigger, hard=True) + + async def wait_edge(self): + await self.flag.wait() + +async def foo(): + pin_in = Pin('X1', Pin.IN) + async_pin = AsyncPin(pin_in, Pin.IRQ_RISING) + pin_out = Pin('X2', Pin.OUT) # Toggle pin to test + t = Timer(-1, period=500, callback=lambda _: pin_out(not pin_out())) + await asyncio.sleep(0) + while True: + await async_pin.wait_edge() + print('Got edge.') + +asyncio.run(foo()) +``` + +The current implementation provides no performance benefits against polling the +hardware: other pending tasks may be granted execution first in round-robin +fashion. However the `ThreadSafeFlag` uses the I/O mechanism. There is a plan +to provide a means to reduce the latency such that selected I/O devices are +polled every time the scheduler acquires control. This will provide the highest +possible level of performance as discussed in +[Polling vs Interrupts](./TUTORIAL.md#9-polling-vs-interrupts). + +Regardless of performance issues, a key use for `ThreadSafeFlag` is where a +hardware device requires the use of an ISR for a μs level response. Having +serviced the device, the ISR flags an asynchronous routine, typically +processing received data. + +See [Threadsafe Event](./THREADING.md#31-threadsafe-event) for a thread safe +class which allows multiple tasks to wait on it. + +### 3.6.1 Querying a ThreadSafeFlag + +The `ThreadSafeFlag` class has no equivalent to `Event.is_set`. A synchronous +function which returns the state of a `ThreadSafeFlag` instance may be created +as follows: +```python +import asyncio +from select import poll, POLLIN +from time import ticks_us, ticks_diff + +async def foo(tsf): # Periodically set the ThreadSafeFlag + while True: + await asyncio.sleep(1) + tsf.set() + +def ready(tsf, poller): # Return a function which returns tsf status + r = (tsf, POLLIN) + poller.register(*r) + + def is_rdy(): + return r in poller.ipoll(0) # Immediate return + + return is_rdy + +async def test(): + tsf = asyncio.ThreadSafeFlag() + tsk = asyncio.create_task(foo(tsf)) + mpoll = poll() + tsf_ready = ready(tsf, mpoll) # Create a ready function + for _ in range(25): # Run for 5s + if tsf_ready(): + print("tsf ready") + t = ticks_us() + await tsf.wait() + print(f"got tsf in {ticks_diff(ticks_us(), t)}us") + else: + print("Not ready") + await asyncio.sleep_ms(200) + +asyncio.run(test()) +``` +The `ready` closure returns a nonblocking function which tests the status of a +passed flag. In this example `.wait()` is not called until the flag has been +set, consequently `.wait()` returns rapidly. + +The `select.poll` mechanism works because `ThreadSafeFlag` is subclassed from +`io.IOBase` and has an `ioctl` method. + +###### [Contents](./TUTORIAL.md#contents) + +## 3.7 Barrier + +This is an unofficial implementation of a primitive supported in +[CPython 3.11](https://docs.python.org/3.11/library/asyncio-sync.html#asyncio.Barrier). +While similar in purpose to `gather` there are differences described below. + +Its principal purpose is to cause multiple coros to rendezvous at a particular +point. For example producer and consumer coros can synchronise at a point where +the producer has data available and the consumer is ready to use it. At that +point in time, the `Barrier` can optionally run a callback before releasing the +barrier to allow all waiting coros to continue. + +Secondly, it can allow a task to pause until one or more other tasks have +terminated or passed a particular point. For example an application might want +to shut down various peripherals before starting a sleep period. The task +wanting to sleep initiates several shut down tasks and waits until they have +triggered the barrier to indicate completion. This use case may also be served +by `gather`. + +The key difference between `Barrier` and `gather` is symmetry: `gather` is +asymmetrical. One task owns the `gather` and awaits completion of a set of +tasks. By contrast, `Barrier` can be used symmetrically with member tasks +pausing until all have reached the barrier. This makes it suited for use in +the `while True:` constructs common in firmware applications. Use of `gather` +would imply instantiating a set of tasks on every pass of the loop. + +`gather` provides access to return values; irrelevant to `Barrier` because +passing a barrier does not imply return. `Barrier` now has an efficient +implementation using `Event` to suspend waiting tasks. + +The following is a typical usage example. A data provider acquires data from +some hardware and transmits it concurrently on a number of interfaces. These +run at different speeds. The `Barrier` synchronises these loops. This can run +on a Pyboard. +```python +import asyncio +from primitives import Barrier +from machine import UART +import ujson + +data = None +async def provider(barrier): + global data + n = 0 + while True: + n += 1 # Get data from some source + data = ujson.dumps([n, 'the quick brown fox jumps over the lazy dog']) + print('Provider triggers senders') + await barrier # Free sender tasks + print('Provider waits for last sender to complete') + await barrier + +async def sender(barrier, swriter, n): + while True: + await barrier # Provider has got data + swriter.write(data) + await swriter.drain() + print('UART', n, 'sent', data) + await barrier # Trigger provider when last sender has completed + +async def main(): + sw1 = asyncio.StreamWriter(UART(1, 9600), {}) + sw2 = asyncio.StreamWriter(UART(2, 1200), {}) + barrier = Barrier(3) + tasks = [None] * 2 # For CPython compaibility must store a reference see 2.2 Note + for n, sw in enumerate((sw1, sw2)): + tasks[n] = asyncio.create_task(sender(barrier, sw, n + 1)) + await provider(barrier) + +asyncio.run(main()) +``` + +Constructor. +Mandatory arg: + * `participants` The number of coros which will use the barrier. +Optional args: + * `func` Callback or coroutine to run. Default `None`. + * `args` Tuple of args for the callback. Default `()`. + +Public synchronous methods: + * `busy` No args. Returns `True` if at least one task is waiting on the + barrier. + * `trigger` No args. The barrier records that the coro has passed the critical + point. Returns "immediately". + * `result` No args. If a callback was provided, returns the return value from + the callback. If a coro, returns the `Task` instance. See below. + +The callback can be a function or a coro. Typically a function will be used; it +must run to completion beore the barrier is released. A coro will be promoted +to a `Task` and run asynchronously. The `Task` may be retrieved (e.g. for +cancellation) using the `result` method. + +If a coro waits on a barrier, it should issue an `await` prior to accessing the +`result` method. To guarantee that the callback has run it is necessary to wait +until all participant coros have passed the barrier. + +Participant coros issue `await my_barrier` whereupon execution pauses until all +other participants are also waiting on it. At this point any callback will run +and then each participant will re-commence execution. See `barrier_test` and +`semaphore_test` in `asyntest.py` for example usage. + +A special case of `Barrier` usage is where some coros are allowed to pass the +barrier, registering the fact that they have done so. At least one coro must +wait on the barrier. That coro will pause until all non-waiting coros have +passed the barrier, and all waiting coros have reached it. At that point all +waiting coros will resume. A non-waiting coro issues `barrier.trigger()` to +indicate that is has passed the critical point. + +###### [Contents](./TUTORIAL.md#contents) + +## 3.8 Delay_ms class + +This implements the software equivalent of a retriggerable monostable or a +watchdog timer. On timeout it can launch a callback or coroutine. It exposes an +`Event` allowing a task to pause until a timeout occurs. The delay period may be +altered dynamically. + +It may be found in the `primitives` directory and is documented in +[Delay_ms class](./DRIVERS.md#8-delay_ms-class). + +## 3.9 Message + +The `Message` class uses [ThreadSafeFlag](./TUTORIAL.md#36-threadsafeflag) to +provide an object similar to `Event` with the following differences: + + * `.set()` has an optional data payload. + * `.set()` can be called from another thread, another core, or from an ISR. + * It is an awaitable class. + * Payloads may be retrieved in an asynchronous iterator. + * Multiple tasks can wait on a single `Message` instance. + +It may be found in the `threadsafe` directory and is documented +[here](./THREADING.md#32-message). + +## 3.10 Message broker + +A `Broker` is a means of communicating data and/or control within or between +modules. It is typically a single global object, and uses a publish-subscribe +model. A publication comprises a `topic` and a `message`; the latter may be any +Python object. Tasks subscribe to a `topic` via an `agent` object. Whenever a +publication, occurs all `agent` instances currently subscribed to that topic are +triggered. + +An `agent` may be an instance of various types including a function, a coroutine +or a queue. + +A benefit of this approach is that the design of publishing tasks can proceed +independently from that of the subscribers; `agent` instances can be subscribed +and unsubscribed at run time with no effect on the publisher. The publisher +neither knows or cares about the type or number of subscribing `agent`s. + +This is [documented here](https://github.com/peterhinch/micropython-async/blob/master/v3/docs/DRIVERS.md#9-message-broker). + +## 3.11 Synchronising to hardware + +The following hardware-related classes are documented [here](./DRIVERS.md): + * `ESwitch` A debounced switch with an `Event` interface. + * `Switch` A debounced switch which can trigger open and close user callbacks. + * `EButton` Debounced pushbutton with `Event` instances for pressed, released, + long press or double-press. + * `Pushbutton` Debounced pushbutton with callbacks for pressed, released, long + press or double-press. + * `ESP32Touch` Extends `Pushbutton` class to support ESP32 touchpads. + * `Keyboard` Interface a crosspoint array of buttons e.g. keypads. + * `SwArray` Interface a crosspoint array of pushbuttons or switches. + * `Encoder` An asynchronous interface for control knobs with switch contacts + configured as a quadrature encoder. + * `AADC` Asynchronous ADC. A task can pause until the value read from an ADC + goes outside defined bounds. Bounds can be absolute or relative to the current + value. + +###### [Contents](./TUTORIAL.md#contents) + +# 4 Designing classes for asyncio + +In the context of device drivers, the aim is to ensure nonblocking operation. +The design should ensure that other tasks get scheduled in periods while the +driver is waiting for the hardware. For example, a task awaiting data arriving +on a UART or a user pressing a button should allow other tasks to be scheduled +until the event occurs. + +###### [Contents](./TUTORIAL.md#contents) + +## 4.1 Awaitable classes + +A task can pause execution by waiting on an `awaitable` object. There is a +difference between CPython and MicroPython in the way an `awaitable` class is +defined: see [Portable code](./TUTORIAL.md#412-portable-code) for a way to +write a portable class. This section describes a simpler MicroPython specific +solution. + +In the following code sample, the `__iter__` special method runs for a period. +The calling coro blocks, but other coros continue to run. The key point is that +`__iter__` uses `yield from` to yield execution to another coro, blocking until +it has completed. + +```python +import asyncio + +class Foo(): + def __iter__(self): + for n in range(5): + print('__iter__ called') + yield from asyncio.sleep(1) # Other tasks get scheduled here + return 42 + +async def bar(): + foo = Foo() # Foo is an awaitable class + print('waiting for foo') + res = await foo # Retrieve result + print('done', res) + +asyncio.run(bar()) +``` + +### 4.1.1 Use in context managers + +Awaitable objects can be used in synchronous or asynchronous CM's by providing +the necessary special methods. The syntax is: + +```python +with await awaitable as a: # The 'as' clause is optional + # code omitted +async with awaitable as a: # Asynchronous CM (see below) + # do something +``` + +To achieve this, the `__await__` generator should return `self`. This is passed +to any variable in an `as` clause and also enables the special methods to work. + +###### [Contents](./TUTORIAL.md#contents) + +### 4.1.2 Portable code + +The Python language requires that `__await__` is a generator function. In +MicroPython generators and tasks are identical, so the solution is to use +`yield from task(args)`. + +This tutorial aims to offer code portable to CPython 3.8 or above. In CPython +tasks and generators are distinct. CPython tasks have an `__await__` special +method which retrieves a generator. This is portable and was tested under +CPython 3.8: + +```python +import sys +up = sys.implementation.name == "micropython" +import asyncio + +async def times_two(n): # Coro to await + await asyncio.sleep(1) + return 2 * n + +class Foo(): + def __await__(self): + res = 1 + for n in range(5): + print('__await__ called') + if up: # MicroPython + res = yield from times_two(res) + else: # CPython + res = yield from times_two(res).__await__() + return res + + __iter__ = __await__ + +async def bar(): + foo = Foo() # foo is awaitable + print('waiting for foo') + res = await foo # Retrieve value + print('done', res) + +asyncio.run(bar()) +``` + +In `__await__`, `yield from asyncio.sleep(1)` was allowed in CPython 3.6. In +V3.8 it produces a syntax error. It must now be put in the task as in the above +example. + +###### [Contents](./TUTORIAL.md#contents) + +## 4.2 Asynchronous iterators + +These provide a means of returning a finite or infinite sequence of values +and could be used as a means of retrieving successive data items as they arrive +from a read-only device. An asynchronous iterable calls asynchronous code in +its `next` method. The class must conform to the following requirements: + + * It has an `__aiter__` method returning the asynchronous iterator. + * It has an ` __anext__` method which is a task - i.e. defined with + `async def` and containing at least one `await` statement. To stop + the iteration, it must raise a `StopAsyncIteration` exception. + +Successive values are retrieved with `async for` as below: + +```python +import asyncio +class AsyncIterable: + def __init__(self): + self.data = (1, 2, 3, 4, 5) + self.index = 0 + + def __aiter__(self): # See note below + return self + + async def __anext__(self): + data = await self.fetch_data() + if data: + return data + else: + raise StopAsyncIteration + + async def fetch_data(self): + await asyncio.sleep(0.1) # Other tasks get to run + if self.index >= len(self.data): + return None + x = self.data[self.index] + self.index += 1 + return x + +async def run(): + ai = AsyncIterable() + async for x in ai: + print(x) +asyncio.run(run()) +``` +The `__aiter__` method was formerly an asynchronous method. CPython 3.6 accepts +synchronous or asynchronous methods. CPython 3.8 and MicroPython require +synchronous code [ref](https://github.com/micropython/micropython/pull/6272). + +Asynchronous comprehensions [PEP530](https://www.python.org/dev/peps/pep-0530/), +supported in CPython 3.6, are not yet supported in MicroPython. + +###### [Contents](./TUTORIAL.md#contents) + +## 4.3 Asynchronous context managers + +Classes can be designed to support asynchronous context managers. These are +CM's having enter and exit procedures which are tasks. An example is the `Lock` +class. Such a class has an `__aenter__` task which is logically required to run +asynchronously. To support the asynchronous CM protocol its `__aexit__` method +also must be a task. Such classes are accessed from within a task with the +following syntax: +```python +async def bar(lock): + async with lock as obj: # "as" clause is optional, no real point for a lock + print('In context manager') +``` +As with normal context managers an exit method is guaranteed to be called when +the context manager terminates, whether normally or via an exception. To +achieve this, the special methods `__aenter__` and `__aexit__` must be +defined, both being tasks waiting on a task or `awaitable` object. This example +comes from the `Lock` class: +```python + async def __aenter__(self): + await self.acquire() # a coro defined with async def + return self + + async def __aexit__(self, *args): + self.release() # A synchronous method +``` +If the `async with` has an `as variable` clause the variable receives the +value returned by `__aenter__`. The following is a complete example: +```python +import asyncio + +class Foo: + def __init__(self): + self.data = 0 + + async def acquire(self): + await asyncio.sleep(1) + return 42 + + async def __aenter__(self): + print('Waiting for data') + self.data = await self.acquire() + return self + + def close(self): + print('Exit') + + async def __aexit__(self, *args): + print('Waiting to quit') + await asyncio.sleep(1) # Can run asynchronous + self.close() # or synchronous methods + +async def bar(): + foo = Foo() + async with foo as f: + print('In context manager') + res = f.data + print('Done', res) + +asyncio.run(bar()) +``` + +###### [Contents](./TUTORIAL.md#contents) + +## 4.4 Object scope + +If an object launches a task and that object goes out of scope, the task will +continue to be scheduled. The task will run to completion or until cancelled. +If this is undesirable consider writing a `deinit` method to cancel associated +running tasks. Applications can call `deinit`, for example in a `try...finally` +block or in a context manager. + +###### [Contents](./TUTORIAL.md#contents) + +# 5 Exceptions timeouts and cancellation + +These topics are related: `asyncio` enables the cancellation of tasks, and the +application of a timeout to a task, by throwing an exception to the task. + +## 5.1 Exceptions + +Consider a task `foo` created with `asyncio.create_task(foo())`. This task +might `await` other tasks, with potential nesting. If an exception occurs, it +will propagate up the chain until it reaches `foo`. This behaviour is as per +function calls: the exception propagates up the call chain until trapped. If +the exception is not trapped, the `foo` task stops with a traceback. Crucially +other tasks continue to run. + +This does not apply to the main task started with `asyncio.run`. If an +exception propagates to that task, the scheduler will stop. This can be +demonstrated as follows: + +```python +import asyncio + +async def bar(): + await asyncio.sleep(0) + 1/0 # Crash + +async def foo(): + await asyncio.sleep(0) + print('Running bar') + await bar() + print('Does not print') # Because bar() raised an exception + +async def main(): + task = asyncio.create_task(foo()) + for _ in range(5): + print('Working') # Carries on after the exception + await asyncio.sleep(0.5) + 1/0 # Stops the scheduler + await asyncio.sleep(0) + print('This never happens') + await asyncio.sleep(0) + +asyncio.run(main()) +``` +If `main` issued `await foo()` rather than `create_task(foo())` the exception +would propagate to `main`. Being untrapped, the scheduler, and hence the script, +would stop. + +#### Warning + +Using `throw` or `close` to throw an exception to a task is unwise. It subverts +`asyncio` by forcing the task to run, and possibly terminate, when it is still +queued for execution. + +### 5.1.1 Global exception handler + +During development, it is often best if untrapped exceptions stop the program +rather than merely halting a single task. This can be achieved by setting a +global exception handler. This debug aid is not CPython compatible: +```python +import asyncio +import sys + +def _handle_exception(loop, context): + print('Global handler') + sys.print_exception(context["exception"]) + #loop.stop() + sys.exit() # Drastic - loop.stop() does not work when used this way + +async def bar(): + await asyncio.sleep(0) + 1/0 # Crash + +async def main(): + loop = asyncio.get_event_loop() + loop.set_exception_handler(_handle_exception) + task = asyncio.create_task(bar()) + for _ in range(5): + print('Working') + await asyncio.sleep(0.5) + +asyncio.run(main()) +``` + +### 5.1.2 Keyboard interrupts + +There is a "gotcha" illustrated by the following code sample. If allowed to run +to completion, it works as expected. + +```python +import asyncio +async def foo(): + await asyncio.sleep(3) + print('About to throw exception.') + 1/0 + +async def bar(): + try: + await foo() + except ZeroDivisionError: + print('foo was interrupted by zero division') # Happens + raise # Force shutdown to run by propagating to loop. + except KeyboardInterrupt: + print('foo was interrupted by ctrl-c') # NEVER HAPPENS + raise + +async def shutdown(): + print('Shutdown is running.') # Happens in both cases + await asyncio.sleep(1) + print('done') + +try: + asyncio.run(bar()) +except ZeroDivisionError: + asyncio.run(shutdown()) +except KeyboardInterrupt: + print('Keyboard interrupt at loop level.') + asyncio.run(shutdown()) +``` + +However, issuing a keyboard interrupt causes the exception to go to the +outermost scope. This is because `asyncio.sleep` causes execution to be +transferred to the scheduler. Consequently, applications requiring cleanup code +in response to a keyboard interrupt should trap the exception at the outermost +scope. + +###### [Contents](./TUTORIAL.md#contents) + +## 5.2 Cancellation and Timeouts + +Cancellation and timeouts work by throwing an exception to the task. This is +unlike a normal exception. If a task cancels another, the running task +continues to execute until it yields to the scheduler. Task cancellation occurs +at that point, whether or not the cancelled task is scheduled for execution: a +task waiting on (say) an `Event` or a `sleep` will be cancelled. + +For tasks launched with `.create_task` the exception is transparent to the +user: the task simply stops as described above. It is possible to trap the +exception, for example to perform cleanup code, typically in a `finally` +clause. The exception thrown to the task is `asyncio.CancelledError` in both +cancellation and timeout. There is no way for the task to distinguish between +these two cases. + +As stated above, for a task launched with `.create_task`, trapping the error is +optional. Where a task is `await`ed, to avoid a halt it must be trapped within +the task, within the `await`ing scope, or both. In the last case, the task must +re-raise the exception after trapping so that the error can again be trapped in +the outer scope. + +## 5.2.1 Task cancellation + +The `Task` class has a `cancel` method. This throws a `CancelledError` to the +task. This works with nested tasks. Usage is as follows: +```python +import asyncio +async def printit(): + print('Got here') + await asyncio.sleep(1) + +async def foo(): + while True: + await printit() + print('In foo') + +async def bar(): + foo_task = asyncio.create_task(foo()) # Create task from task + await asyncio.sleep(4) # Show it running + foo_task.cancel() + await asyncio.sleep(0) + print('foo is now cancelled.') + await asyncio.sleep(4) # Proof! + +asyncio.run(bar()) +``` +The exception may be trapped as follows: +```python +import asyncio +async def printit(): + print('Got here') + await asyncio.sleep(1) + +async def foo(): + try: + while True: + await printit() + except asyncio.CancelledError: + print('Trapped cancelled error.') + raise # Enable check in outer scope + finally: # Usual way to do cleanup + print('Cancelled - finally') + +async def bar(): + foo_task = asyncio.create_task(foo()) + await asyncio.sleep(4) + foo_task.cancel() + await asyncio.sleep(0) + print('Task is now cancelled') +asyncio.run(bar()) +``` +As of firmware V1.18, the `current_task()` method is supported. This enables a +task to pass itself to other tasks, enabling them to cancel it. It also +facilitates the following pattern: + +```python +class Foo: + async def run(self): + self.task = asyncio.current_task() + # code omitted + + def cancel(self): + self.task.cancel() +``` + +###### [Contents](./TUTORIAL.md#contents) + +## 5.2.2 Tasks with timeouts + +Timeouts are implemented by means of `asyncio` methods `.wait_for()` and +`.wait_for_ms()`. These take as arguments a task and a timeout in seconds or ms +respectively. If the timeout expires, a `asyncio.CancelledError` is thrown to +the task, while the caller receives a `TimeoutError`. Trapping the exception in +the task is optional. The caller must trap the `TimeoutError`, otherwise the +exception will interrupt program execution. + +```python +import asyncio + +async def forever(): + try: + print('Starting') + while True: + await asyncio.sleep_ms(300) + print('Got here') + except asyncio.CancelledError: # Task sees CancelledError + print('Trapped cancelled error.') + raise + finally: # Usual way to do cleanup + print('forever timed out') + +async def foo(): + try: + await asyncio.wait_for(forever(), 3) + except asyncio.TimeoutError: # Mandatory error trapping + print('foo got timeout') # Caller sees TimeoutError + await asyncio.sleep(2) + +asyncio.run(foo()) +``` + +## 5.2.3 Cancelling running tasks + +This useful technique can provoke counter intuitive behaviour. Consider a task +`foo` created using `create_task`. Then tasks `bar`, `cancel_me` (and possibly +others) are created with code like: +```python +async def bar(): + await foo + # more code +``` +All will pause waiting for `foo` to terminate. If any one of the waiting tasks +is cancelled, the cancellation will propagate to `foo`. This would be expected +behaviour if `foo` were a coro. The fact that it is a running task means that +the cancellation impacts the tasks waiting on it; it actually causes their +cancellation. Again, if `foo` were a coro and a task or coro was waiting on it, +cancelling `foo` would be expected to propagate to the caller. In the context +of running tasks, this may be unwelcome. + +The behaviour is "correct": CPython `asyncio` behaves identically. Ref +[this forum thread](https://forum.micropython.org/viewtopic.php?f=2&t=8158). + +###### [Contents](./TUTORIAL.md#contents) + +# 6 Interfacing hardware + +At heart, all interfaces between `asyncio` and external asynchronous events +rely on polling. This is because of the cooperative nature of `asyncio` +scheduling: the task which is expected to respond to the event can only acquire +control after another task has relinquished it. There are two ways to handle +this. + * Implicit polling: when a task yields and the scheduler acquires control, the + scheduler checks for an event. If it has occurred it schedules a waiting task. + This is the approach used by `ThreadSafeFlag`. + * Explicit polling: a user task does busy-wait polling on the hardware. + +At its simplest, explicit polling may consist of code like this: +```python +async def poll_my_device(): + global my_flag # Set by device ISR + while True: + if my_flag: + my_flag = False + # service the device + await asyncio.sleep(0) +``` + +In place of a global, an instance variable or an instance of an awaitable class +might be used. Explicit polling is discussed further +[below](./TUTORIAL.md#62-polling-hardware-with-a-task). + +Implicit polling is more efficient and may gain further from planned +improvements to I/O scheduling. Aside from the use of `ThreadSafeFlag`, it is +possible to write code which uses the same technique. This is by designing the +driver to behave like a stream I/O device such as a socket or UART, using +`stream I/O`. This polls devices using Python's `select.poll` system: because +polling is done in C it is faster and more efficient than explicit polling. The +use of `stream I/O` is discussed +[here](./TUTORIAL.md#63-using-the-stream-mechanism). + +Owing to its efficiency, implicit polling most benefits fast I/O device drivers: +streaming drivers can be written for many devices not normally considered as +streaming devices [section 6.4](./TUTORIAL.md#64-writing-streaming-device-drivers). + +There are hazards involved with approaches to interfacing ISR's which appear to +avoid polling. It is invalid to issue `create_task` or to trigger an `Event` in +an ISR as these can cause a race condition in the scheduler. + +###### [Contents](./TUTORIAL.md#contents) + +## 6.1 Timing issues + +Both explicit and implicit polling are currently based on round-robin +scheduling. Assume I/O is operating concurrently with N user tasks each of +which yields with a zero delay. When I/O has been serviced it will next be +polled once all user tasks have been scheduled. The implied latency needs to be +considered in the design. I/O channels may require buffering, with an ISR +servicing the hardware in real time from buffers and tasks filling or +emptying the buffers in slower time. + +The possibility of overrun also needs to be considered: this is the case where +something being polled by a task occurs more than once before the task is +actually scheduled. + +Another timing issue is the accuracy of delays. If a task issues + +```python + await asyncio.sleep_ms(t) + # next line +``` + +the scheduler guarantees that execution will pause for at least `t`ms. The +actual delay may be greater depending on the system state when `t` expires. +If, at that time, all other tasks are waiting on nonzero delays, the next line +will immediately be scheduled. But if other tasks are pending execution (either +because they issued a zero delay or because their time has also elapsed) they +may be scheduled first. This introduces a timing uncertainty into the `sleep()` +and `sleep_ms()` functions. The worst-case value for this overrun may be +calculated by summing, for every other task, the worst-case execution time +between yielding to the scheduler. + +###### [Contents](./TUTORIAL.md#contents) + +## 6.2 Polling hardware with a task + +This is a simple approach, but is most appropriate to hardware which may be +polled at a relatively low rate. This is primarily because polling with a short +(or zero) polling interval may cause the task to consume more processor time +than is desirable. + +The example `apoll.py` demonstrates this approach by polling the Pyboard +accelerometer at 100ms intervals. It performs some simple filtering to ignore +noisy samples and prints a message every two seconds if the board is not moved. + +Further examples may be found in the primitives directory, notably `switch.py` +and `pushbutton.py`: drivers for switch and pushbutton devices. + +An example of a driver for a device capable of reading and writing is shown +below. For ease of testing Pyboard UART 4 emulates the notional device. The +driver implements a `RecordOrientedUart` class, where data is supplied in +variable length records consisting of bytes instances. The object appends a +delimiter before sending and buffers incoming data until the delimiter is +received. This is a demo and is an inefficient way to use a UART compared to +stream I/O. + +For the purpose of demonstrating asynchronous transmission we assume the +device being emulated has a means of checking that transmission is complete +and that the application requires that we wait on this. Neither assumption is +true in this example but the code fakes it with `await asyncio.sleep(0.1)`. + +Link pins X1 and X2 to run. + +```python +import asyncio +from pyb import UART + +class RecordOrientedUart(): + DELIMITER = b'\0' + def __init__(self): + self.uart = UART(4, 9600) + self.data = b'' + + def __iter__(self): # Not __await__ issue #2678 + data = b'' + while not data.endswith(self.DELIMITER): + yield from asyncio.sleep(0) # Necessary because: + while not self.uart.any(): + yield from asyncio.sleep(0) # timing may mean this is never called + data = b''.join((data, self.uart.read(self.uart.any()))) + self.data = data + + async def send_record(self, data): + data = b''.join((data, self.DELIMITER)) + self.uart.write(data) + await self._send_complete() + + # In a real device driver we would poll the hardware + # for completion in a loop with await asyncio.sleep(0) + async def _send_complete(self): + await asyncio.sleep(0.1) + + def read_record(self): # Synchronous: await the object before calling + return self.data[0:-1] # Discard delimiter + +async def run(): + foo = RecordOrientedUart() + rx_data = b'' + await foo.send_record(b'A line of text.') + for _ in range(20): + await foo # Other tasks are scheduled while we wait + rx_data = foo.read_record() + print('Got: {}'.format(rx_data)) + await foo.send_record(rx_data) + rx_data = b'' + +asyncio.run(run()) +``` + +###### [Contents](./TUTORIAL.md#contents) + +## 6.3 Using the stream mechanism + +A stream is an abstraction of a device interface which consists of a realtime +source of bytes. Examples include UARTs, I2S devices and sockets. Many streams +are continuous: an I2S microphone will source data until switched off and the +interface is closed. Streams are supported by `asyncio.StreamReader` and +`asyncio.StreamWriter` classes. + +This section applies to platforms other than the Unix build. The latter handles +stream I/O in a different way described +[here](https://github.com/micropython/micropython/issues/7965#issuecomment-960259481). +Code samples may not run under the Unix build until it is made more compatible +with other platforms. + +The stream mechanism can be illustrated using a Pyboard UART. This code sample +demonstrates concurrent I/O on one UART. To run, link Pyboard pins X1 and X2 +(UART Txd and Rxd). + +```python +import asyncio +from machine import UART +uart = UART(4, 9600, timeout=0) # timeout=0 prevents blocking at low baudrates + +async def sender(): + swriter = asyncio.StreamWriter(uart, {}) + while True: + swriter.write('Hello uart\n') + await swriter.drain() # Transmission starts now. + await asyncio.sleep(2) + +async def receiver(): + sreader = asyncio.StreamReader(uart) + while True: + res = await sreader.readline() + print('Received', res) + +async def main(): + rx = asyncio.create_task(receiver()) + tx = asyncio.create_task(sender()) + await asyncio.sleep(10) + print('Quitting') + tx.cancel() + rx.cancel() + await asyncio.sleep(1) + print('Done') + +asyncio.run(main()) +``` +The `.readline` method will pause until `\n` is received. + +##### StreamWriter write methods + +Writing to a `StreamWriter` occurs in two stages. The synchronous `.write` +method concatenates data for later transmission. The asynchronous `.drain` +causes transmission. To avoid allocation call `.drain` after each call to +`.write`. If multiple tasks are to write to the same `StreamWriter`, the best +solution is to implement a shared `Queue`. Each task writes to the `Queue` and +a single task waits on it, issuing `.write` and `.drain` whenever data is +queued. Do not have multiple tasks calling `.drain` concurrently: this can +result in data corruption for reasons detailed +[here](https://github.com/micropython/micropython/issues/6621). + +The mechanism works because the device driver (written in C) implements the +following methods: `ioctl`, `read`, `readline` and `write`. See +[Writing streaming device drivers](./TUTORIAL.md#64-writing-streaming-device-drivers) +for details on how such drivers may be written in Python. + +##### StreamReader read methods + +The `StreamReader` read methods fall into two categories depending on whether +they wait for a specific end condition. Thus `.readline` pauses until a newline +byte has been received, `.read(-1)` waits for EOF, and `readexactly` waits for +a precise number of bytes. Other methods return the number of bytes available +at the time they are called (upto a maximum). Consider the following fragment: +```python +async def foo(device): + sr = StreamReader(device) + data = sr.read(20) +``` +When `read` is issued, task `foo` is descheduled. Other tasks are scheduled, +resulting in a delay. During that period, depending on the stream source, bytes +may be received. The hardware or the device driver may buffer the data, at some +point flagging their availability. When the concurrent tasks permit, asyncio +polls the device. If data is available `foo` is rescheduled and pending data is +returned. It should be evident that the number of bytes returned and the +duration of the pause are variable. + +There are also implications for application and device driver design: in the +period while the task is descheduled, incoming data must be buffered to avoid +data loss. For example in the case of a UART an interrupt service routine +buffers incoming characters. To avoid data loss the size of the read buffer +should be set based on the maximum latency caused by other tasks along with the +baudrate. The buffer size can be reduced if hardware flow control is available. + +##### StreamReader read timeout + +It is possible to apply a timeout to a stream. One approach is to subclass +`StreamReader` as follows: +```python +class StreamReaderTo(asyncio.StreamReader): + def __init__(self, source): + super().__init__(source) + self._delay_ms = Delay_ms() # Allocate once only + + # Task cancels itself if timeout elapses without a byte being received + async def readintotim(self, buf: bytearray, toms: int) -> int: # toms: timeout in ms + mvb = memoryview(buf) + timer = self._delay_ms + timer.callback(asyncio.current_task().cancel) + timer.trigger(toms) # Start cancellation timer + n = 0 + nbytes = len(buf) + try: + while n < nbytes: + n += await super().readinto(mvb[n:]) + timer.trigger(toms) # Retrigger when bytes received + except asyncio.CancelledError: + pass + timer.stop() + return n +``` +This adds a `.readintotim` asynchronous method. Like `.readinto` it reads into a +supplied buffer but the read is subject to a timeout `to` in ms. The read pauses +until either the buffer is full or until bytes stop arriving for a time longer +than `to`. The method returns the number of bytes received. If fewer bytes were +received than would fill the buffer, a timeout occurred. The script +[stream_to.py](../as_demos/stream_to.py) demonstrates this. + +### 6.3.1 A UART driver example + +The program [auart_hd.py](../as_demos/auart_hd.py) illustrates a method of +communicating with a half duplex device such as one responding to the modem +'AT' command set. Half duplex means that the device never sends unsolicited +data: its transmissions are always in response to a command from the master. + +The device is emulated, enabling the test to be run on a Pyboard with two wire +links. + +The (highly simplified) emulated device responds to any command by sending four +lines of data with a pause between each, to simulate slow processing. + +The master sends a command, but does not know in advance how many lines of data +will be returned. It starts a retriggerable timer, which is retriggered each +time a line is received. When the timer times out it is assumed that the device +has completed transmission, and a list of received lines is returned. + +The case of device failure is also demonstrated. This is done by omitting the +transmission before awaiting a response. After the timeout an empty list is +returned. See the code comments for more details. + +###### [Contents](./TUTORIAL.md#contents) + +## 6.4 Writing streaming device drivers + +The `stream I/O` mechanism is provided to support I/O to stream devices. Its +typical use is to support streaming I/O devices such as UARTs and sockets. The +mechanism may be employed by drivers of any device which needs to be polled: +the polling is delegated to the scheduler which uses `select` to schedule the +handlers for any devices which are ready. This is more efficient than running +multiple tasks each polling a device, partly because `select` is written in C +but also because the task performing the polling is descheduled until the +`poll` object returns a ready status. + +A device driver capable of employing the stream I/O mechanism may support +`StreamReader`, `StreamWriter` instances or both. A readable device must +provide at least one of the following methods. Note that these are synchronous +methods. The `ioctl` method (see below) ensures that they are only called if +data is available. The methods should return as fast as possible with as much +data as is available. + +`readline()` Return as many characters as are available up to and including any +newline character. Required if you intend to use `StreamReader.readline()`. +It should return a maximum of one line. +`read(n)` Return as many characters as are available but no more than `n`. +Required to use `StreamReader.read()` or `StreamReader.readexactly()` + +A writeable driver must provide this synchronous method: +`write` Arg `buf`: the buffer to write. This can be a `memoryview`. +It should return immediately. The return value is the number of characters +actually written (may well be 1 if the device is slow). The `ioctl` method +ensures that this is only called if the device is ready to accept data. + +Note that this has changed relative to `asyncio` V2. Formerly `write` had +two additional mandatory args. Existing code will fail because `Stream.drain` +calls `write` with a single arg (which can be a `memoryview`). + +All devices must provide an `ioctl` method which polls the hardware to +determine its ready status. A typical example for a read/write driver is: + +```python +import io +MP_STREAM_POLL_RD = const(1) +MP_STREAM_POLL_WR = const(4) +MP_STREAM_POLL = const(3) +MP_STREAM_ERROR = const(-1) + +class MyIO(io.IOBase): + # Methods omitted + def ioctl(self, req, arg): # see ports/stm32/uart.c + ret = MP_STREAM_ERROR + if req == MP_STREAM_POLL: + ret = 0 + if arg & MP_STREAM_POLL_RD: + if hardware_has_at_least_one_char_to_read: + ret |= MP_STREAM_POLL_RD + if arg & MP_STREAM_POLL_WR: + if hardware_can_accept_at_least_one_write_character: + ret |= MP_STREAM_POLL_WR + return ret +``` + +The following is a complete awaitable delay class. +```python +import asyncio +import utime +import io +MP_STREAM_POLL_RD = const(1) +MP_STREAM_POLL = const(3) +MP_STREAM_ERROR = const(-1) + +class MillisecTimer(io.IOBase): + def __init__(self): + self.end = 0 + self.sreader = asyncio.StreamReader(self) + + def __iter__(self): + await self.sreader.read(1) + + def __call__(self, ms): + self.end = utime.ticks_add(utime.ticks_ms(), ms) + return self + + def read(self, _): + return "a" + + def ioctl(self, req, arg): + ret = MP_STREAM_ERROR + if req == MP_STREAM_POLL: + ret = 0 + if arg & MP_STREAM_POLL_RD: + if utime.ticks_diff(utime.ticks_ms(), self.end) >= 0: + ret |= MP_STREAM_POLL_RD + return ret + +async def timer_test(n): + timer = MillisecTimer() + for x in range(n): + await timer(100) # Pause 100ms + print(x) + +asyncio.run(timer_test(20)) +``` + +This currently confers no benefit over `await asyncio.sleep_ms()`, however if +`asyncio` implements fast I/O scheduling it will be capable of more precise +timing. This is because I/O will be tested on every scheduler call. Currently +it is polled once per complete pass, i.e. when all other pending tasks have run +in round-robin fashion. + +It is possible to use I/O scheduling to associate an event with a callback. +This is more efficient than a polling loop because the task doing the polling +is descheduled until `ioctl` returns a ready status. The following runs a +callback when a pin changes state. + +```python +import asyncio +import io +MP_STREAM_POLL_RD = const(1) +MP_STREAM_POLL = const(3) +MP_STREAM_ERROR = const(-1) + +class PinCall(io.IOBase): + def __init__(self, pin, *, cb_rise=None, cbr_args=(), cb_fall=None, cbf_args=()): + self.pin = pin + self.cb_rise = cb_rise + self.cbr_args = cbr_args + self.cb_fall = cb_fall + self.cbf_args = cbf_args + self.pinval = pin.value() + self.sreader = asyncio.StreamReader(self) + self.task = asyncio.create_task(self.run()) + + async def run(self): + while True: + await self.sreader.read(1) + + def read(self, _): + v = self.pinval + if v and self.cb_rise is not None: + self.cb_rise(*self.cbr_args) + return + if not v and self.cb_fall is not None: + self.cb_fall(*self.cbf_args) + + def ioctl(self, req, arg): + ret = MP_STREAM_ERROR + if req == MP_STREAM_POLL: + ret = 0 + if arg & MP_STREAM_POLL_RD: + v = self.pin.value() + if v != self.pinval: + self.pinval = v + ret = MP_STREAM_POLL_RD + return ret +``` + +Once again latency can be high: if implemented fast I/O scheduling will improve +this. + +The demo program [iorw.py](../as_demos/iorw.py) illustrates a complete example. + +###### [Contents](./TUTORIAL.md#contents) + +## 6.5 A complete example: aremote.py + +See [aremote.py](../as_drivers/nec_ir/aremote.py) documented +[here](./NEC_IR.md). This is a complete device driver: a receiver/decoder for +an infra red remote controller. The following notes are salient points +regarding its `asyncio` usage. + +A pin interrupt records the time of a state change (in μs) and sends a +`Message`, passing the time when the first state change occurred. A task waits +on the `Message`, yields for the duration of a data burst, then decodes the +stored data before calling a user-specified callback. + +Passing the time to the `Message` instance enables the task to compensate for +any `asyncio` latency when setting its delay period. + +###### [Contents](./TUTORIAL.md#contents) + +## 6.6 HTU21D environment sensor + +This chip provides accurate measurements of temperature and humidity. The +driver is documented [here](./HTU21D.md). It has a continuously running +task which updates `temperature` and `humidity` bound variables which may be +accessed "instantly". + +The chip takes on the order of 120ms to acquire both data items. The driver +works asynchronously by triggering the acquisition and using +`await asyncio.sleep(t)` prior to reading the data. This allows other tasks to +run while acquisition is in progress. + +```python +import as_drivers.htu21d.htu_test +``` + +###### [Contents](./TUTORIAL.md#contents) + +# 7 Hints and tips + +## 7.1 Program hangs + +Hanging usually occurs because a task has blocked without yielding: this will +hang the entire system. When developing, it is useful to have a task which +periodically toggles an onboard LED. This provides confirmation that the +scheduler is running. + +## 7.2 asyncio retains state + +If a `asyncio` application terminates, the state is retained. Embedded code seldom +terminates, but in testing, it is useful to re-run a script without the need for +a soft reset. This may be done as follows: +```python +import asyncio + +async def main(): + await asyncio.sleep(5) # Dummy test script + +def test(): + try: + asyncio.run(main()) + except KeyboardInterrupt: # Trapping this is optional + print('Interrupted') # or pass + finally: + asyncio.new_event_loop() # Clear retained state +``` +It should be noted that clearing retained state is not a panacea. Re-running +complex applications may require the state to be retained. + +###### [Contents](./TUTORIAL.md#contents) + +## 7.3 Garbage Collection + +You may want to consider running a task which issues: + +```python + gc.collect() + gc.threshold(gc.mem_free() // 4 + gc.mem_alloc()) +``` + +This assumes `import gc` has been issued. The purpose of this is discussed +[here](http://docs.micropython.org/en/latest/reference/constrained.html#the-heap). + +###### [Contents](./TUTORIAL.md#contents) + +## 7.4 Testing + +It's advisable to test that a device driver yields control when you intend it +to. This can be done by running one or more instances of a dummy task which +runs a loop printing a message, and checking that it runs in the periods when +the driver is blocking: + +```python +async def rr(n): + while True: + print('Roundrobin ', n) + await asyncio.sleep(0) +``` + +As an example of the type of hazard which can occur, in the `RecordOrientedUart` +example above, the `__await__` method was originally written as: + +```python + def __await__(self): + data = b'' + while not data.endswith(self.DELIMITER): + while not self.uart.any(): + yield from asyncio.sleep(0) + data = b''.join((data, self.uart.read(self.uart.any()))) + self.data = data +``` + +In testing, this hogged execution until an entire record was received. This was +because `uart.any()` always returned a nonzero quantity. By the time it was +called, characters had been received. The solution was to yield execution in +the outer loop: + +```python + def __await__(self): + data = b'' + while not data.endswith(self.DELIMITER): + yield from asyncio.sleep(0) # Necessary because: + while not self.uart.any(): + yield from asyncio.sleep(0) # timing may mean this is never called + data = b''.join((data, self.uart.read(self.uart.any()))) + self.data = data +``` + +It is perhaps worth noting that this error would not have been apparent had +data been sent to the UART at a slow rate rather than via a loopback test. +Welcome to the joys of realtime programming. + +###### [Contents](./TUTORIAL.md#contents) + +## 7.5 A common error + +If a function or method is defined with `async def` and subsequently called as +if it were a regular (synchronous) callable, MicroPython does not issue an +error message. This is [by design](https://github.com/micropython/micropython/issues/3241). +A coro instance is created and discarded, typically leading to a program +silently failing to run correctly: + +```python +import asyncio +async def foo(): + await asyncio.sleep(1) + print('done') + +async def main(): + foo() # Should read: await foo + +asyncio.run(main()) +``` + +###### [Contents](./TUTORIAL.md#contents) + +## 7.6 Socket programming + +There are two basic approaches to socket programming under `asyncio`. By +default sockets block until a specified read or write operation completes. +`asyncio` supports blocking sockets by using `select.poll` to prevent them +from blocking the scheduler. In most cases it is simplest to use this +mechanism. Note that the `asyncio` stream mechanism employs it. Example client +and server code may be found in the `client_server` directory. + +Note that `socket.getaddrinfo` currently blocks. The time will be minimal in +the example code but if a DNS lookup is required the blocking period could be +substantial. + +The second approach to socket programming is to use nonblocking sockets. This +adds complexity but is necessary in some applications, notably where +connectivity is via WiFi (see below). + +Support for TLS on nonblocking sockets is platform dependent. It works on ESP32, +Pyboard D and ESP8266. + +The use of nonblocking sockets requires some attention to detail. If a +nonblocking read is performed, because of server latency, there is no guarantee +that all (or any) of the requested data is returned. Likewise writes may not +proceed to completion. + +Hence asynchronous read and write methods need to iteratively perform the +nonblocking operation until the required data has been read or written. In +practice a timeout is likely to be required to cope with server outages. + +### 7.6.1 WiFi issues + +The `asyncio` stream mechanism is not good at detecting WiFi outages. I have +found it necessary to use nonblocking sockets to achieve resilient operation +and client reconnection in the presence of outages. + +[This doc](https://github.com/peterhinch/micropython-samples/blob/master/resilient/README.md) +describes issues I encountered in WiFi applications which keep sockets open for +long periods, and outlines a solution. + +[This repo](https://github.com/peterhinch/micropython-mqtt.git) offers a +resilent asynchronous MQTT client which ensures message integrity over WiFi +outages. [This repo](https://github.com/peterhinch/micropython-iot.git) +provides a simple asynchronous full-duplex serial channel between a wirelessly +connected client and a wired server with guaranteed message delivery. + +###### [Contents](./TUTORIAL.md#contents) + +## 7.7 CPython compatibility and the event loop + +The samples in this tutorial are compatible with CPython 3.8. If you need +compatibility with versions 3.5 or above, the `asyncio.run()` method is absent. +Replace: +```python +asyncio.run(my_task()) +``` +with: +```python +loop = asyncio.get_event_loop() +loop.run_until_complete(my_task()) +``` +The `create_task` method is a member of the `event_loop` instance. Replace +```python +asyncio.create_task(my_task()) +``` +with +```python +loop = asyncio.get_event_loop() +loop.create_task(my_task()) +``` +Event loop methods are supported in `asyncio` and in CPython 3.8 but are +deprecated. To quote from the official docs: + +Application developers should typically use the high-level asyncio functions, +such as `asyncio.run()`, and should rarely need to reference the loop object or +call its methods. This section is intended mostly for authors of lower-level +code, libraries, and frameworks, who need finer control over the event loop +behavior ([reference](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.get_event_loop)). + +This doc offers better alternatives to `get_event_loop` if you can confine +support to CPython V3.8+. + +There is an event loop method `run_forever` which takes no args and causes the +event loop to run. This is supported by `asyncio`. This has use cases, notably +when all of an application's tasks are instantiated in other modules. + +## 7.8 Race conditions + +These occur when coroutines compete for access to a resource, each using the +resource in a mutually incompatible manner. + +This behaviour can be demonstrated by running [the switch test](./primitives/tests/switches.py). +In `test_sw()` coroutines are scheduled by events. If the switch is cycled +rapidly the LED behaviour may seem surprising. This is because each time the +switch is closed, a coro is launched to flash the red LED, and on each open event, +a coro is launched for the green LED. With rapid cycling a new coro instance will +commence while one is still running against the same LED. This race condition +leads to the LED behaving erratically. + +This is a hazard of asynchronous programming. In some situations, it is +desirable to launch a new instance on each button press or switch closure, even +if other instances are still incomplete. In other cases it can lead to a race +condition, leading to the need to code an interlock to ensure that the desired +behaviour occurs. The programmer must define the desired behaviour. + +In the case of this test program it might be to ignore events while a similar +one is running, or to extend the timer to prolong the LED illumination. +Alternatively a subsequent button press might be required to terminate the +illumination. The "right" behaviour is application dependent. + +## 7.9 Undocumented asyncio features + +These may be subject to change. + +A `Task` instance has a `.done()` method that returns `True` if the task has +terminated (by running to completion, by throwing an exception or by being +cancelled). + +If a task has completed, a `.data` bound variable holds any result which was +returned by the task. If the task throws an exception or is cancelled `.data` +holds the exception (or `CancelledError`). + +###### [Contents](./TUTORIAL.md#contents) + +# 8 Notes for beginners + +These notes are intended for those new to asynchronous code. They start by +outlining the problems which schedulers seek to solve, and give an overview of +the `asyncio` approach to a solution. + +[Section 8.5](./TUTORIAL.md#85-why-cooperative-rather-than-pre-emptive) +discusses the relative merits of `asyncio` and the `_thread` module and why +you may prefer to use cooperative (`asyncio`) over pre-emptive (`_thread`) +scheduling. + +###### [Contents](./TUTORIAL.md#contents) + +## 8.1 Problem 1: event loops + +A typical firmware application runs continuously and is required to respond to +external events. These might include a voltage change on an ADC, the arrival of +a hard interrupt, a character arriving on a UART, or data being available on a +socket. These events occur asynchronously and the code must be able to respond +regardless of the order in which they occur. Further the application may be +required to perform time-dependent tasks such as flashing LED's. + +The obvious way to do this is with an event loop. The following is not +practical code but serves to illustrate the general form of an event loop. + +```python +def event_loop(): + led_1_time = 0 + led_2_time = 0 + switch_state = switch.state() # Current state of a switch + while True: + time_now = utime.time() + if time_now >= led_1_time: # Flash LED #1 + led1.toggle() + led_1_time = time_now + led_1_period + if time_now >= led_2_time: # Flash LED #2 + led2.toggle() + led_2_time = time_now + led_2_period + # Handle LEDs 3 upwards + + if switch.value() != switch_state: + switch_state = switch.value() + # do something + if uart.any(): + # handle UART input +``` + +This works for simple examples but event loops rapidly become unwieldy as the +number of events increases. They also violate the principles of object oriented +programming by lumping much of the program logic in one place rather than +associating code with the object being controlled. We want to design a class +for an LED capable of flashing which could be put in a module and imported. An +OOP approach to flashing an LED might look like this: + +```python +import pyb +class LED_flashable(): + def __init__(self, led_no): + self.led = pyb.LED(led_no) + + def flash(self, period): + while True: + self.led.toggle() + # somehow wait for period but allow other + # things to happen at the same time +``` + +A cooperative scheduler such as `asyncio` enables classes such as this to be +created. + +###### [Contents](./TUTORIAL.md#contents) + +## 8.2 Problem 2: blocking methods + +Assume you need to read a number of bytes from a socket. If you call +`socket.read(n)` with a default blocking socket it will "block" (i.e. fail to +return) until `n` bytes have been received. During this period the application +will be unresponsive to other events. + +With `asyncio` and a non-blocking socket you can write an asynchronous read +method. The task requiring the data will (necessarily) block until it is +received but during that period other tasks will be scheduled enabling the +application to remain responsive. + +## 8.3 The asyncio approach + +The following class provides for an LED which can be turned on and off, and +which can also be made to flash at an arbitrary rate. A `LED_async` instance +has a `run` method which can be considered to run continuously. The LED's +behaviour can be controlled by methods `on()`, `off()` and `flash(secs)`. + +```python +import pyb +import asyncio + +class LED_async(): + def __init__(self, led_no): + self.led = pyb.LED(led_no) + self.rate = 0 + self.task = asyncio.create_task(self.run()) + + async def run(self): + while True: + if self.rate <= 0: + await asyncio.sleep_ms(200) + else: + self.led.toggle() + await asyncio.sleep_ms(int(500 / self.rate)) + + def flash(self, rate): + self.rate = rate + + def on(self): + self.led.on() + self.rate = 0 + + def off(self): + self.led.off() + self.rate = 0 +``` + +Note that `on()`, `off()` and `flash()` are conventional synchronous methods. +They change the behaviour of the LED but return immediately. The flashing +occurs "in the background". This is explained in detail in the next section. + +The class conforms with the OOP principle of keeping the logic associated with +the device within the class. Further, the way `asyncio` works ensures that +while the LED is flashing the application can respond to other events. The +example below flashes the four Pyboard LED's at different rates while also +responding to the USR button which terminates the program. + +```python +import pyb +import asyncio +from led_async import LED_async # Class as listed above + +async def main(): + leds = [LED_async(n) for n in range(1, 4)] + for n, led in enumerate(leds): + led.flash(0.7 + n/4) + sw = pyb.Switch() + while not sw.value(): + await asyncio.sleep_ms(100) + +asyncio.run(main()) +``` + +In contrast to the event loop example the logic associated with the switch is +in a function separate from the LED functionality. Note the code used to start +the scheduler: + +```python +asyncio.run(main()) # Execution passes to tasks. + # It only continues here once main() terminates, when the + # scheduler has stopped. +``` + +###### [Contents](./TUTORIAL.md#contents) + +## 8.4 Scheduling in asyncio + +Python 3.5 and MicroPython support the notion of an asynchronous function, +known as a task. A task normally includes at least one `await` statement. + +```python +async def hello(): + for _ in range(10): + print('Hello world.') + await asyncio.sleep(1) +``` + +This function prints the message ten times at one second intervals. While the +function is paused pending the time delay `asyncio` will schedule other tasks, +providing an illusion of concurrency. + +When a task issues `await asyncio.sleep_ms()` or `await asyncio.sleep()` the +current task pauses: it is placed on a queue which is ordered on time due, and +execution passes to the task at the top of the queue. The queue is designed so +that even if the specified sleep is zero other due tasks will run before the +current one is resumed. This is "fair round-robin" scheduling. It is common +practice to issue `await asyncio.sleep(0)` in loops to ensure a task doesn't +hog execution. The following shows a busy-wait loop which waits for another +task to set the global `flag`. Alas it monopolises the CPU preventing other +tasks from running: + +```python +async def bad_code(): + global flag + while not flag: + pass + flag = False + # code omitted +``` + +The problem here is that while the `flag` is `False` the loop never yields to +the scheduler so no other task will get to run. The correct approach is: + +```python +async def good_code(): + global flag + while not flag: + await asyncio.sleep(0) + flag = False + # code omitted +``` + +For the same reason it's bad practice to issue delays like `utime.sleep(1)` +because that will lock out other tasks for 1s; use `await asyncio.sleep(1)`. +Note that the delays implied by `asyncio` methods `sleep` and `sleep_ms` can +overrun the specified time. This is because while the delay is in progress +other tasks will run. When the delay period completes, execution will not +resume until the running task issues `await` or terminates. A well-behaved task +will always issue `await` at regular intervals. Where a precise delay is +required, especially one below a few ms, it may be necessary to use +`utime.sleep_us(us)`. + +###### [Contents](./TUTORIAL.md#contents) + +## 8.5 Why cooperative rather than pre-emptive? + +The initial reaction of beginners to the idea of cooperative multi-tasking is +often one of disappointment. Surely pre-emptive is better? Why should I have to +explicitly yield control when the Python virtual machine can do it for me? + +My background is in hardware interfacing: I am not a web developer. I found +[this video](https://www.youtube.com/watch?v=kdzL3r-yJZY) to be an interesting +beginner-level introduction to asynchronous web programming which discusses the +relative merits of cooperative and pre-emptive scheduling in that environment. + +When it comes to embedded systems the cooperative model has two advantages. +Firstly, it is lightweight. It is possible to have large numbers of tasks +because unlike descheduled threads, paused tasks contain little state. +Secondly it avoids some of the subtle problems associated with pre-emptive +scheduling. In practice, cooperative multi-tasking is widely used, notably in +user interface applications. + +To make a case for the defence a pre-emptive model has one advantage: if +someone writes + +```python +for x in range(1000000): + # do something time consuming +``` + +it won't lock out other threads. Under cooperative schedulers, the loop must +explicitly yield control every so many iterations e.g. by putting the code in +a task and periodically issuing `await asyncio.sleep(0)`. + +Alas this benefit of pre-emption pales into insignificance compared to the +drawbacks. Some of these are covered in the documentation on writing +[interrupt handlers](http://docs.micropython.org/en/latest/reference/isr_rules.html). +In a pre-emptive model every thread can interrupt every other thread, changing +data which might be used in other threads. It is generally much easier to find +and fix a lockup resulting from a task which fails to yield than locating the +sometimes deeply subtle and rarely occurring bugs which can occur in +pre-emptive code. + +To put this in simple terms, if you write a MicroPython task, you can be +sure that variables won't suddenly be changed by another task: your task has +complete control until it issues `await asyncio.sleep(0)`. + +Bear in mind that interrupt handlers are pre-emptive. This applies to both hard +and soft interrupts, either of which can occur at any point in your code. + +An eloquent discussion of the evils of threading may be found +[in threads are bad](https://glyph.twistedmatrix.com/2014/02/unyielding.html). + +###### [Contents](./TUTORIAL.md#contents) + +## 8.6 Communication + +In non-trivial applications, tasks need to communicate. Conventional Python +techniques can be employed. These include the use of global variables or +declaring tasks as object methods: these can then share instance variables. +Alternatively a mutable object may be passed as a task argument. + +Pre-emptive systems mandate specialist classes to achieve "thread safe" +communications; in a cooperative system these are seldom required. + +###### [Contents](./TUTORIAL.md#contents) + +# 9. Polling vs Interrupts + +The role of interrupts in cooperative systems has proved to be a source of +confusion in the forum. The merit of an interrupt service routine (ISR) is that +it runs very soon after the event causing it. On a Pyboard, Python code may be +running 15μs after a hardware change, enabling prompt servicing of hardware and +accurate timing of signals. + +The question arises whether it is possible to use interrupts to cause a task to +be scheduled at reduced latency. It is easy to show that, in a cooperative +scheduler, interrupts offer no latency benefit compared to polling the hardware +directly. + +The reason for this is that a cooperative scheduler only schedules tasks when +another task has yielded control. Consider a system with a number of concurrent +tasks, where the longest any task blocks before yielding to the scheduler is +`N`ms. In such a system, even with an ideal scheduler, the worst-case latency +between a hardware event occurring and its handling task being scheduled is +`N`ms, assuming that the mechanism for detecting the event adds no latency of +its own. + +In practice, `N` is likely to be on the order of many ms. On fast hardware there +will be a negligible performance difference between polling the hardware and +polling a flag set by an ISR. On hardware such as ESP8266 and ESP32 the ISR +approach will probably be slower owing to the long and variable interrupt +latency of these platforms. + +Using an ISR to set a flag is probably best reserved for situations where an +ISR is already needed for other reasons. + +The above comments refer to an ideal scheduler. Currently `asyncio` is not in +this category, with worst-case latency being > `N`ms. The conclusions remain +valid. + +This, along with other issues, is discussed in +[Interfacing asyncio to interrupts](./INTERRUPTS.md). + +###### [Contents](./TUTORIAL.md#contents) + +# 10. Interfacing threaded code + +In the context of a `asyncio` application, the `_thread` module has two main +uses: + 1. Defining code to run on another core (currently restricted to RP2). + 2. Handling blocking functions. The technique assigns the blocking function to + another thread. The `asyncio` system continues to run, with a single task + paused pending the result of the blocking method. + +These techniques, and thread-safe classes to enable their use, are presented in +[this doc](./THREADING.md). + +###### [Contents](./TUTORIAL.md#contents) diff --git a/v3/docs/hd44780.md b/v3/docs/hd44780.md new file mode 100644 index 0000000..f0de432 --- /dev/null +++ b/v3/docs/hd44780.md @@ -0,0 +1,106 @@ +# 1. Driver for character-based LCD displays + +This driver is for displays based on the Hitachi HD44780 driver: these are +widely available, typically in 16 character x 2 rows format. This version is +for `asyncio` V3 which requires firmware V1.13 or above; at the time of +writing this has not been released and a daily build is required. + +###### [Main README](../README.md) + +# 2. Files + +The driver and test program are implemented as a Python package. To install +copy the directory `as_drivers/hd44780` and contents to the target's filesystem. + +Files: + * `alcd.py` Driver, includes connection details. + * `alcdtest.py` Test/demo script. + +To run the demo issue: +```python +import as_drivers.hd44780.alcdtest +``` + +# 3. Typical wiring + +The driver uses 4-bit mode to economise on pins and wiring. Pins are arbitrary +but this configuration was used in testing: + +| LCD |Board | +|:----:|:----:| +| Rs | Y1 | +| E | Y2 | +| D7 | Y3 | +| D6 | Y4 | +| D5 | Y5 | +| D4 | Y6 | + +# 4. LCD Class + +## 4.1 Constructor + +This takes the following positional args: + * `pinlist` A tuple of 6 strings, being the Pyboard pins used for signals + `Rs`, `E`, `D4`, `D5`, `D6`, `D7` e.g. `('Y1','Y2','Y6','Y5','Y4','Y3')`. + * `cols` The number of horizontal characters in the display (typically 16). + * `rows` Default 2. Number of rows in the display. + +The driver uses the `machine` library. For non-Pyboard targets with numeric pin +ID's `pinlist` should be a tuple of integers. + +## 4.2 Display updates + +The class has no public properties or methods. The display is represented as an +array of strings indexed by row. The row contents is replaced in its entirety, +replacing all previous contents regardless of length. This is illustrated by +the test program: + +```python +import asyncio +import utime as time +from as_drivers.hd44780 import LCD, PINLIST + +lcd = LCD(PINLIST, cols = 16) + +async def lcd_task(): + for secs in range(20, -1, -1): + lcd[0] = 'MicroPython {}'.format(secs) + lcd[1] = "{:11d}uS".format(time.ticks_us()) + await asyncio.sleep(1) + +asyncio.run(lcd_task()) +``` + +The row contents may be read back by issuing + +```python +row0 = lcd[0] +``` + +# 5. Display Formatting + +The driver represents an LCD display as an array indexed by row. Assigning a +string to a row causes that row to be updated. To write text to a specific +column of the display it is recommended to use the Python string `format` +method. + +For example this function formats a string such that it is left-padded with +spaces to a given column and right-padded to the specified width (typically the +width of the display). Right padding is not necessary but is included to +illustrate how right-justified formatting can be achieved: + +```python +def print_at(st, col, width=16): + return '{:>{col}s}{:{t}s}'.format(st,'', col=col+len(st), t = width-(col+len(st))) +``` + +``` +>>> print_at('cat', 2) +' cat ' +>>> len(_) +16 +>>> +``` + +This use of the `format` method may be extended to achieve more complex +tabulated data layouts. diff --git a/v3/docs/images/isolate.png b/v3/docs/images/isolate.png new file mode 100644 index 0000000..0c6f368 Binary files /dev/null and b/v3/docs/images/isolate.png differ diff --git a/v3/docs/images/keypad.png b/v3/docs/images/keypad.png new file mode 100644 index 0000000..2c8fdbd Binary files /dev/null and b/v3/docs/images/keypad.png differ diff --git a/v3/primitives/__init__.py b/v3/primitives/__init__.py new file mode 100644 index 0000000..ceaad77 --- /dev/null +++ b/v3/primitives/__init__.py @@ -0,0 +1,72 @@ +# __init__.py Common functions for uasyncio primitives + +# Copyright (c) 2018-2024 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +import asyncio + + +async def _g(): + pass + + +type_coro = type(_g()) + +# If a callback is passed, run it and return. +# If a coro is passed initiate it and return. +# coros are passed by name i.e. not using function call syntax. +def launch(func, tup_args): + res = func(*tup_args) + if isinstance(res, type_coro): + res = asyncio.create_task(res) + return res + + +def set_global_exception(): + def _handle_exception(loop, context): + import sys + + sys.print_exception(context["exception"]) + sys.exit() + + loop = asyncio.get_event_loop() + loop.set_exception_handler(_handle_exception) + + +_attrs = { + "AADC": "aadc", + "Barrier": "barrier", + "Condition": "condition", + "Delay_ms": "delay_ms", + "Encoder": "encoder", + "Pushbutton": "pushbutton", + "ESP32Touch": "pushbutton", + "Queue": "queue", + "Semaphore": "semaphore", + "BoundedSemaphore": "semaphore", + "Switch": "switch", + "WaitAll": "events", + "WaitAny": "events", + "ELO": "events", + "ESwitch": "events", + "EButton": "events", + "RingbufQueue": "ringbuf_queue", + "Keyboard": "sw_array", + "SwArray": "sw_array", + "Broker": "broker", + "broker": "broker", + "Agent": "broker", + "RegExp": "broker", +} + +# Copied from asyncio.__init__.py +# Lazy loader, effectively does: +# global attr +# from .mod import attr +def __getattr__(attr): + mod = _attrs.get(attr, None) + if mod is None: + raise AttributeError(attr) + value = getattr(__import__(mod, None, None, True, 1), attr) + globals()[attr] = value + return value diff --git a/v3/primitives/aadc.py b/v3/primitives/aadc.py new file mode 100644 index 0000000..eb88641 --- /dev/null +++ b/v3/primitives/aadc.py @@ -0,0 +1,68 @@ +# aadc.py AADC (asynchronous ADC) class + +# Copyright (c) 2020 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +import asyncio +import io + +MP_STREAM_POLL_RD = const(1) +MP_STREAM_POLL = const(3) +MP_STREAM_ERROR = const(-1) + + +class AADC(io.IOBase): + def __init__(self, adc): + self._adc = adc + self._lower = 0 + self._upper = 65535 + self._pol = True + self._last = None + self._sreader = asyncio.StreamReader(self) + + def __iter__(self): + b = yield from self._sreader.read(2) + return int.from_bytes(b, "little") + + def _adcread(self): + self._last = self._adc.read_u16() + return self._last + + def read(self, n): # For use by StreamReader only + return int.to_bytes(self._last, 2, "little") + + def ioctl(self, req, arg): + ret = MP_STREAM_ERROR + if req == MP_STREAM_POLL: + ret = 0 + if arg & MP_STREAM_POLL_RD: + if self._pol ^ (self._lower <= self._adcread() <= self._upper): + ret |= MP_STREAM_POLL_RD + return ret + + # *** API *** + + # If normal will pause until ADC value is in range + # Otherwise will pause until value is out of range + def sense(self, normal): + self._pol = normal + + def read_u16(self, last=False): + if last: + return self._last + return self._adcread() + + # Call syntax: set limits for trigger + # lower is None: leave limits unchanged. + # upper is None: treat lower as relative to current value. + # both have values: treat as absolute limits. + def __call__(self, lower=None, upper=None): + if lower is not None: + if upper is None: # Relative limit + r = self._adcread() if self._last is None else self._last + self._lower = r - lower + self._upper = r + lower + else: # Absolute limits + self._lower = lower + self._upper = upper + return self diff --git a/v3/primitives/barrier.py b/v3/primitives/barrier.py new file mode 100644 index 0000000..2e4b201 --- /dev/null +++ b/v3/primitives/barrier.py @@ -0,0 +1,51 @@ +# barrier.py +# Copyright (c) 2018-2020 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +# Now uses Event rather than polling. + +import asyncio + +from . import launch + +# A Barrier synchronises N coros. Each issues await barrier. +# Execution pauses until all other participant coros are waiting on it. +# At that point the callback is executed. Then the barrier is 'opened' and +# execution of all participants resumes. + + +class Barrier: + def __init__(self, participants, func=None, args=()): + self._participants = participants + self._count = participants + self._func = func + self._args = args + self._res = None + self._evt = asyncio.Event() + + def __await__(self): + if self.trigger(): + return # Other tasks have already reached barrier + await self._evt.wait() # Wait until last task reaches it + + __iter__ = __await__ + + def result(self): + return self._res + + def trigger(self): + self._count -= 1 + if self._count < 0: + raise ValueError("Too many tasks accessing Barrier") + if self._count > 0: + return False # At least 1 other task has not reached barrier + # All other tasks are waiting + if self._func is not None: + self._res = launch(self._func, self._args) + self._count = self._participants + self._evt.set() # Release others + self._evt.clear() + return True + + def busy(self): + return self._count < self._participants diff --git a/v3/primitives/broker.py b/v3/primitives/broker.py new file mode 100644 index 0000000..73072bb --- /dev/null +++ b/v3/primitives/broker.py @@ -0,0 +1,89 @@ +# broker.py A message broker for MicroPython + +# Copyright (c) 2024-2025 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +# Inspired by the following +# https://www.joeltok.com/posts/2021-03-building-an-event-bus-in-python/ + +import asyncio +from primitives import Queue, RingbufQueue, type_coro +import re + + +class Agent: # ABC for user agent + pass + + +class RegExp: + def __init__(self, re_str): + self.re = re.compile(re_str) + + def matching(self, topic): + return re.match(self.re, topic) is not None + + +def _validate(a): + return ( + isinstance(a, asyncio.Event) + or isinstance(a, Queue) + or isinstance(a, RingbufQueue) + or isinstance(a, Agent) + or callable(a) + ) + + +class Broker(dict): + Verbose = True + + def subscribe(self, topic, agent, *args): + if not _validate(agent): + raise ValueError("Invalid agent:", agent) + aa = (agent, args) + if not (t := self.get(topic, False)): + self[topic] = {aa} + else: + if aa in t and Broker.Verbose: + print(f"Duplicate agent {aa} in topic {topic}.") + t.add(aa) + + def unsubscribe(self, topic, agent, *args): + if topic in self: + if (aa := (agent, args)) in self[topic]: + self[topic].remove(aa) + elif Broker.Verbose: + print(f"Unsubscribe agent {aa} from topic {topic} fail: agent not subscribed.") + if len(self[topic]) == 0: + del self[topic] + elif Broker.Verbose: + print(f"Unsubscribe topic {topic} fail: topic not subscribed.") + + def publish(self, topic, message=None): + agents = set() # Agents which are triggered by this topic + if isinstance(topic, str): # Check regexps + # Are any keys RegExp instances? + for regexp in [k for k in self.keys() if isinstance(k, RegExp)]: + if regexp.matching(topic): + agents.update(self[regexp]) # Append matching agents + agents.update(self.get(topic, [])) # Exact match + for agent, args in agents: + if isinstance(agent, asyncio.Event): + agent.set() + continue + if isinstance(agent, Agent): # User class + agent.put(topic, message, *args) # Must support .put + continue + if isinstance(agent, Queue) or isinstance(agent, RingbufQueue): + t = (topic, message, args) + try: + agent.put_nowait(t if args else t[:2]) + except Exception: # Queue discards current message. RingbufQueue discards oldest + Broker.Verbose and print(f"Message lost topic {topic} message {message}") + continue + # agent is function, method, coroutine or bound coroutine + res = agent(topic, message, *args) + if isinstance(res, type_coro): + asyncio.create_task(res) + + +broker = Broker() diff --git a/v3/primitives/condition.py b/v3/primitives/condition.py new file mode 100644 index 0000000..8595b90 --- /dev/null +++ b/v3/primitives/condition.py @@ -0,0 +1,66 @@ +# condition.py + +# Copyright (c) 2018-2020 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +import asyncio + +# Condition class +# from primitives.condition import Condition + + +class Condition: + def __init__(self, lock=None): + self.lock = asyncio.Lock() if lock is None else lock + self.events = [] + + async def acquire(self): + await self.lock.acquire() + + # enable this syntax: + # with await condition [as cond]: + def __await__(self): + await self.lock.acquire() + return self + + __iter__ = __await__ + + def __enter__(self): + return self + + def __exit__(self, *_): + self.lock.release() + + def locked(self): + return self.lock.locked() + + def release(self): + self.lock.release() # Will raise RuntimeError if not locked + + def notify(self, n=1): # Caller controls lock + if not self.lock.locked(): + raise RuntimeError("Condition notify with lock not acquired.") + for _ in range(min(n, len(self.events))): + ev = self.events.pop() + ev.set() + + def notify_all(self): + self.notify(len(self.events)) + + async def wait(self): + if not self.lock.locked(): + raise RuntimeError("Condition wait with lock not acquired.") + ev = asyncio.Event() + self.events.append(ev) + self.lock.release() + await ev.wait() + await self.lock.acquire() + assert ev not in self.events, "condition wait assertion fail" + return True # CPython compatibility + + async def wait_for(self, predicate): + result = predicate() + while not result: + await self.wait() + result = predicate() + return result diff --git a/v3/primitives/delay_ms.py b/v3/primitives/delay_ms.py new file mode 100644 index 0000000..7975eeb --- /dev/null +++ b/v3/primitives/delay_ms.py @@ -0,0 +1,82 @@ +# delay_ms.py Now uses ThreadSafeFlag and has extra .wait() API +# Usage: +# from primitives import Delay_ms + +# Copyright (c) 2018-2022 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +import asyncio +from utime import ticks_add, ticks_diff, ticks_ms +from . import launch + + +class Delay_ms: + class DummyTimer: # Stand-in for the timer class. Can be cancelled. + def cancel(self): + pass + + _fake = DummyTimer() + + def __init__(self, func=None, args=(), duration=1000): + self._func = func + self._args = args + self._durn = duration # Default duration + self._retn = None # Return value of launched callable + self._tend = None # Stop time (absolute ms). + self._busy = False + self._trig = asyncio.ThreadSafeFlag() + self._tout = asyncio.Event() # Timeout event + self.wait = self._tout.wait # Allow: await wait_ms.wait() + self.clear = self._tout.clear + self.set = self._tout.set + self._ttask = self._fake # Timer task + self._mtask = asyncio.create_task(self._run()) # Main task + + async def _run(self): + while True: + await self._trig.wait() # Await a trigger + self._ttask.cancel() # Cancel and replace + await asyncio.sleep_ms(0) + dt = max(ticks_diff(self._tend, ticks_ms()), 0) # Beware already elapsed. + self._ttask = asyncio.create_task(self._timer(dt)) + + async def _timer(self, dt): + await asyncio.sleep_ms(dt) + self._tout.set() # Only gets here if not cancelled. + self._busy = False + if self._func is not None: + self._retn = launch(self._func, self._args) + + # API + # trigger may be called from hard ISR. + def trigger(self, duration=0): # Update absolute end time, 0-> ctor default + if self._mtask is None: + raise RuntimeError("Delay_ms.deinit() has run.") + self._tend = ticks_add(ticks_ms(), duration if duration > 0 else self._durn) + self._retn = None # Default in case cancelled. + self._busy = True + self._trig.set() + + def stop(self): + self._ttask.cancel() + self._ttask = self._fake + self._busy = False + self._tout.clear() + + def __call__(self): # Current running status + return self._busy + + running = __call__ + + def rvalue(self): + return self._retn + + def callback(self, func=None, args=()): + self._func = func + self._args = args + + def deinit(self): + if self._mtask is not None: # https://github.com/peterhinch/micropython-async/issues/98 + self.stop() + self._mtask.cancel() + self._mtask = None diff --git a/v3/primitives/encoder.py b/v3/primitives/encoder.py new file mode 100644 index 0000000..ef9b561 --- /dev/null +++ b/v3/primitives/encoder.py @@ -0,0 +1,113 @@ +# encoder.py Asynchronous driver for incremental quadrature encoder. + +# Copyright (c) 2021-2024 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +# For an explanation of the design please see +# [ENCODERS.md](https://github.com/peterhinch/micropython-samples/blob/master/encoders/ENCODERS.md) + +# Thanks are due to the following collaborators: +# @ilium007 for identifying the issue of tracking detents, +# https://github.com/peterhinch/micropython-async/issues/82. + +# Mike Teachman (@miketeachman) for design discussions and testing +# against a state table design +# https://github.com/miketeachman/micropython-rotary/blob/master/rotary.py + +# Raul Kompaß (@rkompass) for suggesting a bugfix here +# https://forum.micropython.org/viewtopic.php?f=15&t=9929&p=66175#p66156 + +# Now uses ThreadSafeFlag.clear() + +import asyncio +from machine import Pin + + +class Encoder: + def __init__( + self, + pin_x, + pin_y, + v=0, + div=1, + vmin=None, + vmax=None, + mod=None, + callback=lambda a, b: None, + args=(), + delay=100, + ): + self._pin_x = pin_x + self._pin_y = pin_y + self._x = pin_x() + self._y = pin_y() + self._v = v * div # Initialise hardware value + self._cv = v # Current (divided) value + self.delay = delay # Pause (ms) for motion to stop/limit callback frequency + self._trig = asyncio.Event() + + if ((vmin is not None) and v < vmin) or ((vmax is not None) and v > vmax): + raise ValueError("Incompatible args: must have vmin <= v <= vmax") + self._tsf = asyncio.ThreadSafeFlag() + trig = Pin.IRQ_RISING | Pin.IRQ_FALLING + try: + xirq = pin_x.irq(trigger=trig, handler=self._x_cb, hard=True) + yirq = pin_y.irq(trigger=trig, handler=self._y_cb, hard=True) + except TypeError: # hard arg is unsupported on some hosts + xirq = pin_x.irq(trigger=trig, handler=self._x_cb) + yirq = pin_y.irq(trigger=trig, handler=self._y_cb) + asyncio.create_task(self._run(vmin, vmax, div, mod, callback, args)) + + # Hardware IRQ's. Duration 36μs on Pyboard 1 ~50μs on ESP32. + # IRQ latency: 2nd edge may have occured by the time ISR runs, in + # which case there is no movement. + def _x_cb(self, pin_x): + if (x := pin_x()) != self._x: + self._x = x + self._v += 1 if x ^ self._pin_y() else -1 + self._tsf.set() + + def _y_cb(self, pin_y): + if (y := pin_y()) != self._y: + self._y = y + self._v -= 1 if y ^ self._pin_x() else -1 + self._tsf.set() + + async def _run(self, vmin, vmax, div, mod, cb, args): + pv = self._v # Prior hardware value + pcv = self._cv # Prior divided value passed to callback + lcv = pcv # Current value after limits applied + plcv = pcv # Previous value after limits applied + delay = self.delay + while True: + self._tsf.clear() + await self._tsf.wait() # Wait for an edge. A stopped encoder waits here. + await asyncio.sleep_ms(delay) # Optional rate limit for callback/trig. + hv = self._v # Sample hardware (atomic read). + if hv == pv: # A change happened but was negated before + continue # this got scheduled. Nothing to do. + pv = hv + cv = round(hv / div) # cv is divided value. + if not (dv := cv - pcv): # dv is change in divided value. + continue # No change + lcv += dv # lcv: divided value with limits/mod applied + lcv = lcv if vmax is None else min(vmax, lcv) + lcv = lcv if vmin is None else max(vmin, lcv) + lcv = lcv if mod is None else lcv % mod + self._cv = lcv # update ._cv for .value() before CB. + if lcv != plcv: + cb(lcv, lcv - plcv, *args) # Run user CB in uasyncio context + self._trig.set() # Enable async iterator + pcv = cv + plcv = lcv + + def __aiter__(self): + return self + + async def __anext__(self): + await self._trig.wait() + self._trig.clear() + return self._cv + + def value(self): + return self._cv diff --git a/v3/primitives/events.py b/v3/primitives/events.py new file mode 100644 index 0000000..9c364c6 --- /dev/null +++ b/v3/primitives/events.py @@ -0,0 +1,221 @@ +# events.py Event based primitives + +# Copyright (c) 2022-2024 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +import asyncio +from . import Delay_ms +from . import RingbufQueue + +# An Event-like class that can wait on an iterable of Event-like instances. +# .wait pauses until any passed event is set. +class WaitAny: + def __init__(self, events): + self.events = events + self.trig_event = None + self.evt = asyncio.Event() + + async def wait(self): + tasks = [asyncio.create_task(self.wt(event)) for event in self.events] + try: + await self.evt.wait() + finally: + self.evt.clear() + for task in tasks: + task.cancel() + return self.trig_event + + async def wt(self, event): + await event.wait() + self.evt.set() + self.trig_event = event + + def event(self): + return self.trig_event + + def clear(self): + for evt in (x for x in self.events if hasattr(x, "clear")): + evt.clear() + + +# An Event-like class that can wait on an iterable of Event-like instances, +# .wait pauses until all passed events have been set. +class WaitAll: + def __init__(self, events): + self.events = events + + async def wait(self): + async def wt(event): + await event.wait() + + tasks = (asyncio.create_task(wt(event)) for event in self.events) + try: + await asyncio.gather(*tasks) + finally: # May be subject to timeout or cancellation + for task in tasks: + task.cancel() + + def clear(self): + for evt in (x for x in self.events if hasattr(x, "clear")): + evt.clear() + + +# Convert to an event-like object: either a running task or a coro with args. +# Motivated by a suggestion from @sandyscott iss #116 +class ELO_x: + def __init__(self, coro, *args, **kwargs): + self._coro = coro + self._args = args + self._kwargs = kwargs + self._task = None # Current running task (or exception) + + async def wait(self): + cr = self._coro + istask = isinstance(cr, asyncio.Task) # Instantiated with a Task + if istask and isinstance(self._task, asyncio.CancelledError): + return # Previously awaited and was cancelled/timed out + self._task = cr if istask else asyncio.create_task(cr(*self._args, **self._kwargs)) + try: + await self._task + except asyncio.CancelledError as e: + self._task = e # Let WaitAll or WaitAny complete + + # User can retrieve task/coro results by awaiting .task() (even if task had + # run to completion). If task was cancelled CancelledError is returned. + # If .task() is called before .wait() returns None or result of prior .wait() + # Caller issues isinstance(task, CancelledError) + def task(self): + return self._task + + +# Convert to an event-like object: either a running task or a coro with args. +# Motivated by a suggestion from @sandyscott iss #116 +class ELO: + def __init__(self, coro, *args, **kwargs): + tsk = isinstance(coro, asyncio.Task) # Instantiated with a Task + self._task = coro if tsk else asyncio.create_task(coro(*args, **kwargs)) + + async def wait(self): + try: + await self._task + except asyncio.CancelledError as e: + self._task = e # Let WaitAll or WaitAny complete + + # User can retrieve task/coro results by awaiting elo() (even if task had + # run to completion). If task was cancelled CancelledError is returned. + # If .task() is called before .wait() returns None or result of prior .wait() + # Caller issues isinstance(task, CancelledError) + def __call__(self): + return self._task + + +# Minimal switch class having an Event based interface +class ESwitch: + debounce_ms = 50 + + def __init__(self, pin, lopen=1): # Default is n/o switch returned to gnd + self._pin = pin # Should be initialised for input with pullup + self._lopen = lopen # Logic level in "open" state + self.open = asyncio.Event() + self.close = asyncio.Event() + self._state = self._pin() ^ self._lopen # Get initial state + asyncio.create_task(self._poll(ESwitch.debounce_ms)) + + async def _poll(self, dt): # Poll the button + while True: + if (s := self._pin() ^ self._lopen) != self._state: # 15μs + self._state = s + self._cf() if s else self._of() + await asyncio.sleep_ms(dt) # Wait out bounce + + def _of(self): + self.open.set() + + def _cf(self): + self.close.set() + + # ***** API ***** + # Return current state of switch (0 = pressed) + def __call__(self): + return self._state + + def deinit(self): + self._poll.cancel() + self.open.clear() + self.close.clear() + + +# Minimal pushbutton class having an Event based interface +class EButton: + debounce_ms = 50 # Attributes can be varied by user + long_press_ms = 1000 + double_click_ms = 400 + + def __init__(self, pin, suppress=False, sense=None): + self._pin = pin # Initialise for input + self._supp = suppress + self._sense = pin() if sense is None else sense + self._state = self.rawstate() # Initial logical state + self._ltim = Delay_ms(duration=EButton.long_press_ms) + self._dtim = Delay_ms(duration=EButton.double_click_ms) + self.press = asyncio.Event() # *** API *** + self.double = asyncio.Event() + self.long = asyncio.Event() + self.release = asyncio.Event() # *** END API *** + # Tasks run forever. Poll contacts + self._tasks = [asyncio.create_task(self._poll(EButton.debounce_ms))] + self._tasks.append(asyncio.create_task(self._ltf())) # Handle long press + if suppress: + self._tasks.append(asyncio.create_task(self._dtf())) # Double timer + + async def _poll(self, dt): # Poll the button + while True: + if (s := self.rawstate()) != self._state: + self._state = s + self._pf() if s else self._rf() + await asyncio.sleep_ms(dt) # Wait out bounce + + def _pf(self): # Button press + if not self._supp: + self.press.set() # User event + if self._dtim(): # Press occurred while _dtim is running + self.double.set() # User event + self._dtim.stop() # _dtim's Event is only used if suppress + else: # Single press or 1st of a double pair. + self._dtim.trigger() + self._ltim.trigger() # Trigger long timer on 1st press of a double pair + + def _rf(self): # Button release + self._ltim.stop() + if not self._supp or not self._dtim(): # If dtim running postpone release otherwise it + self.release.set() # is set before press + + async def _ltf(self): # Long timeout + while True: + await self._ltim.wait() + self._ltim.clear() # Clear the event + self.long.set() # User event + + # Runs if suppress set. Delay response to single press until sure it is a single short pulse. + async def _dtf(self): + while True: + await self._dtim.wait() # Double click has timed out + self._dtim.clear() # Clear the event + if not self._ltim(): # Button was released: not a long press. + self.press.set() # User events + self.release.set() + + # ****** API ****** + # Current non-debounced logical button state: True == pressed + def rawstate(self): + return bool(self._pin() ^ self._sense) + + # Current debounced state of button (True == pressed) + def __call__(self): + return self._state + + def deinit(self): + for task in self._tasks: + task.cancel() + for evt in (self.press, self.double, self.long, self.release): + evt.clear() diff --git a/v3/primitives/package.json b/v3/primitives/package.json new file mode 100644 index 0000000..2adf5cf --- /dev/null +++ b/v3/primitives/package.json @@ -0,0 +1,19 @@ +{ + "urls": [ + ["primitives/__init__.py", "github:peterhinch/micropython-async/v3/primitives/__init__.py"], + ["primitives/aadc.py", "github:peterhinch/micropython-async/v3/primitives/aadc.py"], + ["primitives/barrier.py", "github:peterhinch/micropython-async/v3/primitives/barrier.py"], + ["primitives/broker.py", "github:peterhinch/micropython-async/v3/primitives/broker.py"], + ["primitives/condition.py", "github:peterhinch/micropython-async/v3/primitives/condition.py"], + ["primitives/delay_ms.py", "github:peterhinch/micropython-async/v3/primitives/delay_ms.py"], + ["primitives/encoder.py", "github:peterhinch/micropython-async/v3/primitives/encoder.py"], + ["primitives/events.py", "github:peterhinch/micropython-async/v3/primitives/events.py"], + ["primitives/pushbutton.py", "github:peterhinch/micropython-async/v3/primitives/pushbutton.py"], + ["primitives/queue.py", "github:peterhinch/micropython-async/v3/primitives/queue.py"], + ["primitives/ringbuf_queue.py", "github:peterhinch/micropython-async/v3/primitives/ringbuf_queue.py"], + ["primitives/semaphore.py", "github:peterhinch/micropython-async/v3/primitives/semaphore.py"], + ["primitives/switch.py", "github:peterhinch/micropython-async/v3/primitives/switch.py"], + ["primitives/sw_array.py", "github:peterhinch/micropython-async/v3/primitives/sw_array.py"] + ], + "version": "0.1" +} diff --git a/v3/primitives/pushbutton.py b/v3/primitives/pushbutton.py new file mode 100644 index 0000000..64c8e2e --- /dev/null +++ b/v3/primitives/pushbutton.py @@ -0,0 +1,159 @@ +# pushbutton.py + +# Copyright (c) 2018-2023 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +import asyncio +import utime as time +from . import launch, Delay_ms + +try: + from machine import TouchPad +except ImportError: + pass + + +class Pushbutton: + debounce_ms = 50 + long_press_ms = 1000 + double_click_ms = 400 + + def __init__(self, pin, suppress=False, sense=None): + self._pin = pin # Initialise for input + self._supp = suppress + self._dblpend = False # Doubleclick waiting for 2nd click + self._dblran = False # Doubleclick executed user function + self._tf = False + self._ff = False + self._df = False + self._ld = False # Delay_ms instance for long press + self._dd = False # Ditto for doubleclick + # Convert from electrical to logical value + self._sense = pin.value() if sense is None else sense + self._state = self.rawstate() # Initial state + self._run = asyncio.create_task(self._go()) # Thread runs forever + + async def _go(self): + while True: + self._check(self.rawstate()) + # Ignore state changes until switch has settled. Also avoid hogging CPU. + # See https://github.com/peterhinch/micropython-async/issues/69 + await asyncio.sleep_ms(Pushbutton.debounce_ms) + + def _check(self, state): + if state == self._state: + return + # State has changed: act on it now. + self._state = state + if state: # Button pressed: launch pressed func + if self._tf: + launch(self._tf, self._ta) + # If there's a long func: start long press delay if no double click running + # (case where a short click is rapidly followed by a long one, iss 101). + if self._ld and not (self._df and self._dd()): + self._ld.trigger(Pushbutton.long_press_ms) + if self._df: + if self._dd(): # Second click: timer running + self._dd.stop() + self._dblpend = False + self._dblran = True # Prevent suppressed launch on release + launch(self._df, self._da) + else: + # First click: start doubleclick timer + self._dd.trigger(Pushbutton.double_click_ms) + self._dblpend = True # Prevent suppressed launch on release + else: # Button release. Is there a release func? + if self._ff: + if self._supp: + d = self._ld + # If long delay exists, is running and doubleclick status is OK + if not self._dblpend and not self._dblran: + if (d and d()) or not d: + launch(self._ff, self._fa) + else: + launch(self._ff, self._fa) + if self._ld: + self._ld.stop() # Avoid interpreting a second click as a long push + self._dblran = False + + def _ddto(self): # Doubleclick timeout: no doubleclick occurred + self._dblpend = False + if self._ff and self._supp and not self._state: + if not self._ld or (self._ld and not self._ld()): + launch(self._ff, self._fa) + + # ****** API ****** + def press_func(self, func=False, args=()): + if func is None: + self.press = asyncio.Event() + self._tf = self.press.set if func is None else func + self._ta = args + + def release_func(self, func=False, args=()): + if func is None: + self.release = asyncio.Event() + self._ff = self.release.set if func is None else func + self._fa = args + + def double_func(self, func=False, args=()): + if func is None: + self.double = asyncio.Event() + func = self.double.set + self._df = func + self._da = args + if func: # If double timer already in place, leave it + if not self._dd: + self._dd = Delay_ms(self._ddto) + else: + self._dd = False # Clearing down double func + + def long_func(self, func=False, args=()): + if func is None: + self.long = asyncio.Event() + func = self.long.set + if func: + if self._ld: + self._ld.callback(func, args) + else: + self._ld = Delay_ms(func, args) + else: + self._ld = False + + # Current non-debounced logical button state: True == pressed + def rawstate(self): + return bool(self._pin() ^ self._sense) + + # Current debounced state of button (True == pressed) + def __call__(self): + return self._state + + def deinit(self): + self._run.cancel() + + +class ESP32Touch(Pushbutton): + thresh = (80 << 8) // 100 + + @classmethod + def threshold(cls, val): + if not (isinstance(val, int) and 0 < val < 100): + raise ValueError("Threshold must be in range 1-99") + cls.thresh = (val << 8) // 100 + + def __init__(self, pin, suppress=False): + self._thresh = 0 # Detection threshold + self._rawval = 0 + try: + self._pad = TouchPad(pin) + except ValueError: + raise ValueError(pin) # Let's have a bit of information :) + super().__init__(pin, suppress, False) + + # Current logical button state: True == touched + def rawstate(self): + rv = self._pad.read() # ~220μs + if rv > self._rawval: # Either initialisation or pad was touched + self._rawval = rv # when initialised and has now been released + self._thresh = (rv * ESP32Touch.thresh) >> 8 + return False # Untouched + return rv < self._thresh diff --git a/v3/primitives/queue.py b/v3/primitives/queue.py new file mode 100644 index 0000000..e2cdba2 --- /dev/null +++ b/v3/primitives/queue.py @@ -0,0 +1,91 @@ +# queue.py: adapted from uasyncio V2 + +# Copyright (c) 2018-2020 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +# Code is based on Paul Sokolovsky's work. +# This is a temporary solution until uasyncio V3 gets an efficient official version + +import asyncio + + +# Exception raised by get_nowait(). +class QueueEmpty(Exception): + pass + + +# Exception raised by put_nowait(). +class QueueFull(Exception): + pass + + +class Queue: + def __init__(self, maxsize=0): + self.maxsize = maxsize + self._queue = [] + self._evput = asyncio.Event() # Triggered by put, tested by get + self._evget = asyncio.Event() # Triggered by get, tested by put + + self._jncnt = 0 + self._jnevt = asyncio.Event() + self._upd_jnevt(0) # update join event + + def _get(self): + self._evget.set() # Schedule all tasks waiting on get + self._evget.clear() + return self._queue.pop(0) + + async def get(self): # Usage: item = await queue.get() + while self.empty(): # May be multiple tasks waiting on get() + # Queue is empty, suspend task until a put occurs + # 1st of N tasks gets, the rest loop again + await self._evput.wait() + return self._get() + + def get_nowait(self): # Remove and return an item from the queue. + # Return an item if one is immediately available, else raise QueueEmpty. + if self.empty(): + raise QueueEmpty() + return self._get() + + def _put(self, val): + self._upd_jnevt(1) # update join event + self._evput.set() # Schedule tasks waiting on put + self._evput.clear() + self._queue.append(val) + + async def put(self, val): # Usage: await queue.put(item) + while self.full(): + # Queue full + await self._evget.wait() + # Task(s) waiting to get from queue, schedule first Task + self._put(val) + + def put_nowait(self, val): # Put an item into the queue without blocking. + if self.full(): + raise QueueFull() + self._put(val) + + def qsize(self): # Number of items in the queue. + return len(self._queue) + + def empty(self): # Return True if the queue is empty, False otherwise. + return len(self._queue) == 0 + + def full(self): # Return True if there are maxsize items in the queue. + # Note: if the Queue was initialized with maxsize=0 (the default) or + # any negative number, then full() is never True. + return self.maxsize > 0 and self.qsize() >= self.maxsize + + def _upd_jnevt(self, inc: int): # #Update join count and join event + self._jncnt += inc + if self._jncnt <= 0: + self._jnevt.set() + else: + self._jnevt.clear() + + def task_done(self): # Task Done decrements counter + self._upd_jnevt(-1) + + async def join(self): # Wait for join event + await self._jnevt.wait() diff --git a/v3/primitives/ringbuf_queue.py b/v3/primitives/ringbuf_queue.py new file mode 100644 index 0000000..eaf7ad3 --- /dev/null +++ b/v3/primitives/ringbuf_queue.py @@ -0,0 +1,78 @@ +# ringbuf_queue.py Provides RingbufQueue class + +# Copyright (c) 2022-2023 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +# API differs from CPython +# Uses pre-allocated ring buffer: can use list or array +# Asynchronous iterator allowing consumer to use async for +# put_nowait QueueFull exception can be ignored allowing oldest data to be discarded - +# this is not thread safe. Nor is the class as a whole TS because of its use of +# Event objects. + +import asyncio + + +class RingbufQueue: # MicroPython optimised + def __init__(self, buf): + self._q = [0 for _ in range(buf)] if isinstance(buf, int) else buf + self._size = len(self._q) + self._wi = 0 + self._ri = 0 + self._evput = asyncio.Event() # Triggered by put, tested by get + self._evget = asyncio.Event() # Triggered by get, tested by put + + def full(self): + return ((self._wi + 1) % self._size) == self._ri + + def empty(self): + return self._ri == self._wi + + def qsize(self): + return (self._wi - self._ri) % self._size + + def get_nowait(self): # Remove and return an item from the queue. + # Return an item if one is immediately available, else raise QueueEmpty. + if self.empty(): + raise IndexError + r = self._q[self._ri] + self._ri = (self._ri + 1) % self._size + self._evget.set() # Schedule all tasks waiting on ._evget + self._evget.clear() + return r + + def peek(self): # Return oldest item from the queue without removing it. + # Return an item if one is immediately available, else raise QueueEmpty. + if self.empty(): + raise IndexError + return self._q[self._ri] + + def put_nowait(self, v): + self._q[self._wi] = v + self._evput.set() # Schedule any tasks waiting on get + self._evput.clear() + self._wi = (self._wi + 1) % self._size + if self._wi == self._ri: # Would indicate empty + self._ri = (self._ri + 1) % self._size # Discard a message + raise IndexError # Caller can ignore if overwrites are OK + + async def put(self, val): # Usage: await queue.put(item) + while self.full(): # Queue full + await self._evget.wait() # May be >1 task waiting on ._evget + # Task(s) waiting to get from queue, schedule first Task + self.put_nowait(val) + + def __aiter__(self): + return self + + async def __anext__(self): + return await self.get() + + async def get(self): + while self.empty(): # Empty. May be more than one task waiting on ._evput + await self._evput.wait() + r = self._q[self._ri] + self._ri = (self._ri + 1) % self._size + self._evget.set() # Schedule all tasks waiting on ._evget + self._evget.clear() + return r diff --git a/v3/primitives/semaphore.py b/v3/primitives/semaphore.py new file mode 100644 index 0000000..86b4395 --- /dev/null +++ b/v3/primitives/semaphore.py @@ -0,0 +1,46 @@ +# semaphore.py + +# Copyright (c) 2018-2020 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +import asyncio + +# A Semaphore is typically used to limit the number of coros running a +# particular piece of code at once. The number is defined in the constructor. +class Semaphore: + def __init__(self, value=1): + self._count = value + self._event = asyncio.Event() + + async def __aenter__(self): + await self.acquire() + return self + + async def __aexit__(self, *args): + self.release() + await asyncio.sleep(0) + + async def acquire(self): + self._event.clear() + while self._count == 0: # Multiple tasks may be waiting for + await self._event.wait() # a release + self._event.clear() + # When we yield, another task may succeed. In this case + await asyncio.sleep(0) # the loop repeats + self._count -= 1 + + def release(self): + self._event.set() + self._count += 1 + + +class BoundedSemaphore(Semaphore): + def __init__(self, value=1): + super().__init__(value) + self._initial_value = value + + def release(self): + if self._count < self._initial_value: + super().release() + else: + raise ValueError("Semaphore released more than acquired") diff --git a/v3/primitives/sw_array.py b/v3/primitives/sw_array.py new file mode 100644 index 0000000..edad126 --- /dev/null +++ b/v3/primitives/sw_array.py @@ -0,0 +1,158 @@ +# sw_array.py A crosspoint array of pushbuttons + +# Copyright (c) 2023 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +import asyncio +from . import RingbufQueue +from time import ticks_ms, ticks_diff + +# A crosspoint array of pushbuttons +# Tuples/lists of pins. Rows are OUT, cols are IN +class Keyboard(RingbufQueue): + def __init__(self, rowpins, colpins, *, bufsize=10, db_delay=50): + super().__init__(bytearray(bufsize) if isinstance(bufsize, int) else bufsize) + self.rowpins = rowpins + self.colpins = colpins + self._state = 0 # State of all keys as bitmap + for opin in self.rowpins: # Initialise output pins + opin(1) + self._run = asyncio.create_task(self.scan(len(rowpins) * len(colpins), db_delay)) + + def __getitem__(self, scan_code): + return bool(self._state & (1 << scan_code)) + + async def scan(self, nkeys, db_delay): + while True: + cur = 0 # Current bitmap of logical key states + for opin in self.rowpins: + opin(0) # Assert output + for ipin in self.colpins: + cur <<= 1 + cur |= ipin() ^ 1 # Convert physical to logical + opin(1) + if pressed := (cur & ~self._state): # 1's are newly pressed button(s) + for sc in range(nkeys): + if pressed & 1: + try: + self.put_nowait(sc) + except IndexError: # q full. Overwrite oldest + pass + pressed >>= 1 + changed = cur ^ self._state # Any new press or release + self._state = cur + await asyncio.sleep_ms(db_delay if changed else 0) # Wait out bounce + + def deinit(self): + self._run.cancel() + + +CLOSE = const(1) # cfg comprises the OR of these constants +OPEN = const(2) +LONG = const(4) +DOUBLE = const(8) +SUPPRESS = const(16) # Disambiguate: see docs. + +# Entries in queue are (scan_code, event) where event is an OR of above constants. +# rowpins/colpins are tuples/lists of pins. Rows are OUT, cols are IN. +# cfg is a logical OR of above constants. If a bit is 0 that state will never be reported. +class SwArray(RingbufQueue): + debounce_ms = 50 # Attributes can be varied by user + long_press_ms = 1000 + double_click_ms = 400 + + def __init__(self, rowpins, colpins, cfg, *, bufsize=10): + super().__init__(bufsize) + self._rowpins = rowpins + self._colpins = colpins + self._cfg = cfg + self._state = 0 # State of all buttons as bitmap + self._flags = 0 # Busy bitmap + self._basic = not bool(cfg & (SUPPRESS | LONG | DOUBLE)) # Basic mode + self._suppress = bool(cfg & SUPPRESS) + for opin in self._rowpins: # Initialise output pins + opin(1) # open circuit + self._run = asyncio.create_task(self._scan(len(rowpins) * len(colpins))) + + def __getitem__(self, scan_code): + return bool(self._state & (1 << scan_code)) + + def _put(self, sc, evt): + if evt & self._cfg: # Only if user has requested it + try: + self.put_nowait((sc, evt)) + except IndexError: # q full. Overwrite oldest + pass + + def _timeout(self, ts, condition): + t = SwArray.long_press_ms if condition == LONG else SwArray.double_click_ms + return ticks_diff(ticks_ms(), ts) > t + + def _busy(self, sc, v): + of = self._flags # Return prior state + if v: + self._flags |= 1 << sc + else: + self._flags &= ~(1 << sc) + return (of >> sc) & 1 + + async def _finish(self, sc): # Tidy up. If necessary await a contact open + while self[sc]: + await asyncio.sleep_ms(0) + self._put(sc, OPEN) + self._busy(sc, False) + + def keymap(self): # Return a bitmap of debounced state of all buttons/switches + return self._state + + # Handle long, double. Switch has closed. + async def _defer(self, sc): + # Wait for contact closure to be registered: let calling loop complete + await asyncio.sleep_ms(0) + ts = ticks_ms() + if not self._suppress: + self._put(sc, CLOSE) + while self[sc]: # Pressed + await asyncio.sleep_ms(0) + if self._timeout(ts, LONG): + self._put(sc, LONG) + await self._finish(sc) + return + if not self._suppress: + self._put(sc, OPEN) + while not self[sc]: + await asyncio.sleep_ms(0) + if self._timeout(ts, DOUBLE): # No second closure + self._put(sc, CLOSE) # Single press. Report CLOSE + await self._finish(sc) # then OPEN + return + self._put(sc, DOUBLE) + await self._finish(sc) + + async def _scan(self, nkeys): + db_delay = SwArray.debounce_ms + while True: + cur = 0 # Current bitmap of logical button states (1 == pressed) + for opin in self._rowpins: + opin(0) # Assert output + for ipin in self._colpins: + cur <<= 1 + cur |= ipin() ^ 1 # Convert physical to logical + opin(1) + curb = cur # Copy current bitmap + if changed := (cur ^ self._state): # 1's are newly canged button(s) + for sc in range(nkeys): + if changed & 1: # Current button has changed state + if self._basic: # No timed behaviour + self._put(sc, CLOSE if cur & 1 else OPEN) + elif cur & 1: # Closed + if not self._busy(sc, True): # Currently not busy + asyncio.create_task(self._defer(sc)) # Q is handled asynchronously + changed >>= 1 + cur >>= 1 + changed = curb ^ self._state # Any new press or release + self._state = curb + await asyncio.sleep_ms(db_delay if changed else 0) # Wait out bounce + + def deinit(self): + self._run.cancel() diff --git a/v3/primitives/switch.py b/v3/primitives/switch.py new file mode 100644 index 0000000..fe80231 --- /dev/null +++ b/v3/primitives/switch.py @@ -0,0 +1,51 @@ +# switch.py + +# Copyright (c) 2018-2022 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +import asyncio +import utime as time +from . import launch + + +class Switch: + debounce_ms = 50 + + def __init__(self, pin): + self.pin = pin # Should be initialised for input with pullup + self._open_func = False + self._close_func = False + self.switchstate = self.pin.value() # Get initial state + self._run = asyncio.create_task(self.switchcheck()) # Thread runs forever + + def open_func(self, func, args=()): + if func is None: + self.open = asyncio.Event() + self._open_func = self.open.set if func is None else func + self._open_args = args + + def close_func(self, func, args=()): + if func is None: + self.close = asyncio.Event() + self._close_func = self.close.set if func is None else func + self._close_args = args + + # Return current state of switch (0 = pressed) + def __call__(self): + return self.switchstate + + async def switchcheck(self): + while True: + state = self.pin.value() + if state != self.switchstate: + # State has changed: act on it now. + self.switchstate = state + if state == 0 and self._close_func: + launch(self._close_func, self._close_args) + elif state == 1 and self._open_func: + launch(self._open_func, self._open_args) + # Ignore further state changes until switch has settled + await asyncio.sleep_ms(Switch.debounce_ms) + + def deinit(self): + self._run.cancel() diff --git a/v3/primitives/tests/__init__.py b/v3/primitives/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/v3/primitives/tests/adctest.py b/v3/primitives/tests/adctest.py new file mode 100644 index 0000000..fcb21ce --- /dev/null +++ b/v3/primitives/tests/adctest.py @@ -0,0 +1,54 @@ +# adctest.py + +# Copyright (c) 2020 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +import asyncio +from machine import ADC +import pyb +from primitives import AADC + + +async def signal(): # Could use write_timed but this prints values + dac = pyb.DAC(1, bits=12, buffering=True) + v = 0 + while True: + if not v & 0xF: + print("write", v << 4) # Make value u16 as per ADC read + dac.write(v) + v += 1 + v %= 4096 + await asyncio.sleep_ms(50) + + +async def adctest(): + asyncio.create_task(signal()) + adc = AADC(ADC(pyb.Pin.board.X1)) + await asyncio.sleep(0) + adc.sense(normal=False) # Wait until ADC gets to 5000 + value = await adc(5000, 10000) + print("Received", value, adc.read_u16(True)) # Reduce to 12 bits + adc.sense(normal=True) # Now print all changes > 2000 + while True: + value = await adc(2000) # Trigger if value changes by 2000 + print("Received", value, adc.read_u16(True)) + + +st = """This test requires a Pyboard with pins X1 and X5 linked. +A sawtooth waveform is applied to the ADC. Initially the test waits +until the ADC value reaches 5000. It then reports whenever the value +changes by 2000. +Issue test() to start. +""" +print(st) + + +def test(): + try: + asyncio.run(adctest()) + except KeyboardInterrupt: + print("Interrupted") + finally: + asyncio.new_event_loop() + print() + print(st) diff --git a/v3/primitives/tests/asyntest.py b/v3/primitives/tests/asyntest.py new file mode 100644 index 0000000..f1dbd72 --- /dev/null +++ b/v3/primitives/tests/asyntest.py @@ -0,0 +1,812 @@ +# asyntest.py Test/demo of the 'micro' Event, Barrier and Semaphore classes +# Test/demo of official asyncio library and official Lock class + +# Copyright (c) 2017-2022 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +# CPython 3.8 compatibility +# (ignore RuntimeWarning: coroutine '_g' was never awaited) +# To run: +# from primitives.tests.asyntest import test + +try: + import asyncio +except ImportError: + import asyncio +import sys + +unix = "linux" in sys.implementation._machine + +from primitives import Barrier, Semaphore, BoundedSemaphore, Condition, Queue, RingbufQueue + +try: + from threadsafe import Message +except: + pass + + +def print_tests(): + st = """Available functions: +test(0) Print this list. +test(1) Test message acknowledge. +test(2) Test Message and Lock objects. +test(3) Test the Barrier class with callback. +test(4) Test the Barrier class with coroutine. +test(5) Test Semaphore +test(6) Test BoundedSemaphore. +test(7) Test the Condition class. +test(8) Test the Queue class. +test(9) Test the RingbufQueue class. +test(10) Test the Queue task_done/join behavior. +""" + print("\x1b[32m") + print(st) + print("\x1b[39m") + + +print_tests() + + +def printexp(exp, runtime=0): + print("Expected output:") + print("\x1b[32m") + print(exp) + print("\x1b[39m") + if runtime: + print("Running (runtime = {}s):".format(runtime)) + else: + print("Running (runtime < 1s):") + + +# ************ Test Message class ************ +# Demo use of acknowledge message + + +async def message_wait(message, ack_message, n): + try: + await message + print(f"message_wait {n} got message: {message.value()}") + if ack_message is not None: + ack_message.set() + except asyncio.CancelledError: + print(f"message_wait {n} cancelled") + + +async def run_ack(n): + message = Message() + ack1 = Message() + ack2 = Message() + for count in range(n): + t0 = asyncio.create_task(message_wait(message, ack1, 1)) + t1 = asyncio.create_task(message_wait(message, ack2, 2)) + message.set(count) + print("message was set") + await ack1 + ack1.clear() + print("Cleared ack1") + await ack2 + ack2.clear() + print("Cleared ack2") + message.clear() + print("Cleared message") + await asyncio.sleep(1) + t0.cancel() + t1.cancel() + + +async def msg_send(msg, items): + for item in items: + await asyncio.sleep_ms(400) + msg.set(item) + + +async def msg_recv(msg): # Receive using asynchronous iterator + async for data in msg: + print("Got", data) + msg.clear() + + +async def ack_coro(): + print("Test multiple tasks waiting on a message.") + await run_ack(3) + print() + print("Test asynchronous iterator.") + msg = Message() + asyncio.create_task(msg_send(msg, (1, 2, 3))) + try: + await asyncio.wait_for(msg_recv(msg), 3) + except asyncio.TimeoutError: + pass + await asyncio.sleep(1) + print() + print("Test cancellation of first waiting task.") + t1 = asyncio.create_task(message_wait(msg, None, 1)) + t2 = asyncio.create_task(message_wait(msg, None, 2)) + await asyncio.sleep(1) + t1.cancel() + await asyncio.sleep(1) + print("Setting message") + msg.set("Test message") + await asyncio.sleep(1) # Tasks have ended or been cancelled + msg.clear() + print() + print("Test cancellation of second waiting task.") + t1 = asyncio.create_task(message_wait(msg, None, 1)) + t2 = asyncio.create_task(message_wait(msg, None, 2)) + await asyncio.sleep(1) + t2.cancel() + await asyncio.sleep(1) + print("Setting message") + msg.set("Test message") + await asyncio.sleep(1) + msg.clear() + + print("I've seen attack ships burn on the shoulder of Orion...") + print("Time to die...") + + +def ack_test(): + if unix: + print("Message class is incompatible with Unix build.") + return + printexp( + """Running (runtime = 12s): +Test multiple tasks waiting on a message. +message was set +message_wait 1 got message: 0 +message_wait 2 got message: 0 +Cleared ack1 +Cleared ack2 +Cleared message +message was set +message_wait 1 got message: 1 +message_wait 2 got message: 1 +Cleared ack1 +Cleared ack2 +Cleared message +message was set +message_wait 1 got message: 2 +message_wait 2 got message: 2 +Cleared ack1 +Cleared ack2 +Cleared message + +Test asynchronous iterator. +Got 1 +Got 2 +Got 3 + +Test cancellation of first waiting task. +message_wait 1 cancelled +Setting message +message_wait 2 got message: Test message + +Test cancellation of second waiting task. +message_wait 2 cancelled +Setting message +message_wait 1 got message: Test message +I've seen attack ships burn on the shoulder of Orion... +Time to die... +""", + 12, + ) + asyncio.run(ack_coro()) + + +# ************ Test Lock and Message classes ************ + + +async def run_lock(n, lock): + print("run_lock {} waiting for lock".format(n)) + await lock.acquire() + print("run_lock {} acquired lock".format(n)) + await asyncio.sleep(1) # Delay to demo other coros waiting for lock + lock.release() + print("run_lock {} released lock".format(n)) + + +async def messageset(message): + print("Waiting 5 secs before setting message") + await asyncio.sleep(5) + message.set() + print("message was set") + + +async def messagewait(message): + print("waiting for message") + await message + print("got message") + message.clear() + + +async def run_message_test(): + print("Test Lock class") + lock = asyncio.Lock() + asyncio.create_task(run_lock(1, lock)) + asyncio.create_task(run_lock(2, lock)) + asyncio.create_task(run_lock(3, lock)) + print("Test Message class") + message = Message() + asyncio.create_task(messageset(message)) + await messagewait(message) # run_message_test runs fast until this point + print("Message status {}".format("Incorrect" if message.is_set() else "OK")) + print("Tasks complete") + + +def msg_test(): + if unix: + print("Message class is incompatible with Unix build.") + return + printexp( + """Test Lock class +Test Message class +waiting for message +run_lock 1 waiting for lock +run_lock 1 acquired lock +run_lock 2 waiting for lock +run_lock 3 waiting for lock +Waiting 5 secs before setting message +run_lock 1 released lock +run_lock 2 acquired lock +run_lock 2 released lock +run_lock 3 acquired lock +run_lock 3 released lock +message was set +got message +Message status OK +Tasks complete +""", + 5, + ) + asyncio.run(run_message_test()) + + +# ************ Barrier test ************ + + +async def killer(duration): + await asyncio.sleep(duration) + + +def callback(text): + print(text) + + +async def report(barrier): + for i in range(5): + print("{} ".format(i), end="") + await barrier + + +async def do_barrier_test(): + barrier = Barrier(3, callback, ("Synch",)) + for _ in range(2): + for _ in range(3): + asyncio.create_task(report(barrier)) + await asyncio.sleep(1) + print() + await asyncio.sleep(1) + + +def barrier_test(): + printexp( + """Running (runtime = 3s): +0 0 0 Synch +1 1 1 Synch +2 2 2 Synch +3 3 3 Synch +4 4 4 Synch + +1 1 1 Synch +2 2 2 Synch +3 3 3 Synch +4 4 4 Synch +""", + 3, + ) + asyncio.run(do_barrier_test()) + + +# ************ Barrier test 1 ************ + + +async def my_coro(text): + try: + await asyncio.sleep_ms(0) + while True: + await asyncio.sleep(1) + print(text) + except asyncio.CancelledError: + print("my_coro was cancelled.") + + +async def report1(barrier, x): + await asyncio.sleep(x) + print("report instance", x, "waiting") + await barrier + print("report instance", x, "done") + + +async def bart(): + barrier = Barrier(4, my_coro, ("my_coro running",)) + for x in range(3): + asyncio.create_task(report1(barrier, x)) + await asyncio.sleep(4) + assert barrier.busy() + await barrier + await asyncio.sleep(0) + assert not barrier.busy() + # Must yield before reading result(). Here we wait long enough for + await asyncio.sleep_ms(1500) # coro to print + barrier.result().cancel() + await asyncio.sleep(2) + + +def barrier_test1(): + printexp( + """Running (runtime = 5s): +report instance 0 waiting +report instance 1 waiting +report instance 2 waiting +report instance 2 done +report instance 1 done +report instance 0 done +my_coro running +my_coro was cancelled. + +Exact report instance done sequence may vary, but 3 instances should report +done before my_coro runs. +""", + 5, + ) + asyncio.run(bart()) + + +# ************ Semaphore test ************ + + +async def run_sema(n, sema, barrier): + print("run_sema {} trying to access semaphore".format(n)) + async with sema: + print("run_sema {} acquired semaphore".format(n)) + # Delay demonstrates other coros waiting for semaphore + await asyncio.sleep(1 + n / 10) # n/10 ensures deterministic printout + print("run_sema {} has released semaphore".format(n)) + barrier.trigger() + + +async def run_sema_test(bounded): + num_coros = 5 + barrier = Barrier(num_coros + 1) + if bounded: + semaphore = BoundedSemaphore(3) + else: + semaphore = Semaphore(3) + for n in range(num_coros): + asyncio.create_task(run_sema(n, semaphore, barrier)) + await barrier # Quit when all coros complete + try: + semaphore.release() + except ValueError: + print("Bounded semaphore exception test OK") + + +def semaphore_test(bounded=False): + if bounded: + exp = """run_sema 0 trying to access semaphore +run_sema 0 acquired semaphore +run_sema 1 trying to access semaphore +run_sema 1 acquired semaphore +run_sema 2 trying to access semaphore +run_sema 2 acquired semaphore +run_sema 3 trying to access semaphore +run_sema 4 trying to access semaphore +run_sema 0 has released semaphore +run_sema 4 acquired semaphore +run_sema 1 has released semaphore +run_sema 3 acquired semaphore +run_sema 2 has released semaphore +run_sema 4 has released semaphore +run_sema 3 has released semaphore +Bounded semaphore exception test OK + +Exact sequence of acquisition may vary when 3 and 4 compete for semaphore.""" + else: + exp = """run_sema 0 trying to access semaphore +run_sema 0 acquired semaphore +run_sema 1 trying to access semaphore +run_sema 1 acquired semaphore +run_sema 2 trying to access semaphore +run_sema 2 acquired semaphore +run_sema 3 trying to access semaphore +run_sema 4 trying to access semaphore +run_sema 0 has released semaphore +run_sema 3 acquired semaphore +run_sema 1 has released semaphore +run_sema 4 acquired semaphore +run_sema 2 has released semaphore +run_sema 3 has released semaphore +run_sema 4 has released semaphore + +Exact sequence of acquisition may vary when 3 and 4 compete for semaphore.""" + printexp(exp, 3) + asyncio.run(run_sema_test(bounded)) + + +# ************ Condition test ************ + +cond = Condition() +tim = 0 + + +async def cond01(): + while True: + await asyncio.sleep(2) + with await cond: + cond.notify(2) # Notify 2 tasks + + +async def cond03(): # Maintain a count of seconds + global tim + await asyncio.sleep(0.5) + while True: + await asyncio.sleep(1) + tim += 1 + + +async def cond02(n, barrier): + with await cond: + print("cond02", n, "Awaiting notification.") + await cond.wait() + print("cond02", n, "triggered. tim =", tim) + barrier.trigger() + + +def predicate(): + return tim >= 8 # 12 + + +async def cond04(n, barrier): + with await cond: + print("cond04", n, "Awaiting notification and predicate.") + await cond.wait_for(predicate) + print("cond04", n, "triggered. tim =", tim) + barrier.trigger() + + +async def cond_go(): + ntasks = 7 + barrier = Barrier(ntasks + 1) + t1 = asyncio.create_task(cond01()) + t3 = asyncio.create_task(cond03()) + for n in range(ntasks): + asyncio.create_task(cond02(n, barrier)) + await barrier # All instances of cond02 have completed + # Test wait_for + barrier = Barrier(2) + asyncio.create_task(cond04(99, barrier)) + await barrier + # cancel continuously running coros. + t1.cancel() + t3.cancel() + await asyncio.sleep_ms(0) + print("Done.") + + +def condition_test(): + printexp( + """cond02 0 Awaiting notification. +cond02 1 Awaiting notification. +cond02 2 Awaiting notification. +cond02 3 Awaiting notification. +cond02 4 Awaiting notification. +cond02 5 Awaiting notification. +cond02 6 Awaiting notification. +cond02 5 triggered. tim = 1 +cond02 6 triggered. tim = 1 +cond02 3 triggered. tim = 3 +cond02 4 triggered. tim = 3 +cond02 1 triggered. tim = 5 +cond02 2 triggered. tim = 5 +cond02 0 triggered. tim = 7 +cond04 99 Awaiting notification and predicate. +cond04 99 triggered. tim = 9 +Done. +""", + 13, + ) + asyncio.run(cond_go()) + + +# ************ Queue test ************ + + +async def slow_process(): + await asyncio.sleep(2) + return 42 + + +async def bar(q): + print("Waiting for slow process.") + result = await slow_process() + print("Putting result onto queue") + await q.put(result) # Put result on q + + +async def foo(q): + print("Running foo()") + result = await q.get() + print("Result was {}".format(result)) + + +async def q_put(n, q): + for x in range(8): + obj = (n, x) + await q.put(obj) + await asyncio.sleep(0) + + +async def q_get(n, q): + for x in range(8): + await q.get() + await asyncio.sleep(0) + + +async def putter(q): + # put some item, then sleep + for _ in range(20): + await q.put(1) + await asyncio.sleep_ms(50) + + +async def getter(q): + # checks for new items, and relies on the "blocking" of the get method + for _ in range(20): + await q.get() + + +async def queue_go(): + q = Queue(10) + asyncio.create_task(foo(q)) + asyncio.create_task(bar(q)) + await asyncio.sleep(3) + for n in range(4): + asyncio.create_task(q_put(n, q)) + await asyncio.sleep(1) + assert q.qsize() == 10 + await q.get() + await asyncio.sleep(0.1) + assert q.qsize() == 10 + while not q.empty(): + await q.get() + await asyncio.sleep(0.1) + assert q.empty() + print("Competing put tasks test complete") + + for n in range(4): + asyncio.create_task(q_get(n, q)) + await asyncio.sleep(1) + x = 0 + while not q.full(): + await q.put(x) + await asyncio.sleep(0.3) + x += 1 + assert q.qsize() == 10 + print("Competing get tasks test complete") + await asyncio.gather(putter(q), getter(q)) + print("Queue tests complete") + print("I've seen attack ships burn off the shoulder of Orion...") + print("Time to die...") + + +def queue_test(): + printexp( + """Running (runtime = 20s): +Running foo() +Waiting for slow process. +Putting result onto queue +Result was 42 +Competing put tasks test complete +Competing get tasks test complete +Queue tests complete + + +I've seen attack ships burn off the shoulder of Orion... +Time to die... + +""", + 20, + ) + asyncio.run(queue_go()) + + +# ************ RingbufQueue test ************ + + +async def qread(q, lst, twr): + async for item in q: + lst.append(item) + await asyncio.sleep_ms(twr) + + +async def read(q, t, twr=0): + lst = [] + try: + await asyncio.wait_for(qread(q, lst, twr), t) + except asyncio.TimeoutError: + pass + return lst + + +async def put_list(q, lst, twp=0): + for item in lst: + await q.put(item) + await asyncio.sleep_ms(twp) + + +async def rbq_go(): + q = RingbufQueue([0 for _ in range(10)]) # 10 elements + pl = [n for n in range(15)] + print("Read waits on slow write.") + asyncio.create_task(put_list(q, pl, 100)) + rl = await read(q, 2) + assert pl == rl + print("done") + print("Write waits on slow read.") + asyncio.create_task(put_list(q, pl)) + rl = await read(q, 2, 100) + assert pl == rl + print("done") + print("Testing full, empty and qsize methods.") + assert q.empty() + assert q.qsize() == 0 + assert not q.full() + await put_list(q, (1, 2, 3)) + assert not q.empty() + assert q.qsize() == 3 + assert not q.full() + print("Done") + print("Testing put_nowait and overruns.") + nfail = 0 + for x in range(4, 15): + try: + q.put_nowait(x) + except IndexError: + nfail += 1 + assert nfail == 5 + assert q.full() + rl = await read(q, 2) + assert rl == [6, 7, 8, 9, 10, 11, 12, 13, 14] + print("Testing get_nowait.") + await q.put(1) + assert q.get_nowait() == 1 + err = 0 + try: + q.get_nowait() + except IndexError: + err = 1 + assert err == 1 + print("Tests complete.") + print("I've seen attack ships burn off the shoulder of Orion...") + print("Time to die...") + + +def rbq_test(): + printexp( + """Running (runtime = 6s): +Read waits on slow write. +done +Write waits on slow read. +done +Testing full, empty and qsize methods. +Done +Testing put_nowait and overruns. +Testing get_nowait. +Tests complete. +I've seen attack ships burn off the shoulder of Orion... +Time to die... + +""", + 6, + ) + asyncio.run(rbq_go()) + + +# ************ Queue task_done/join test ************ +async def q_task_done_join_consumer(q): + while True: + r = await q.get() + print("consumer", "got/processing {}".format(r)) + await asyncio.sleep(0.5) + q.task_done() + + +async def q_task_done_join_waiter(q): + print("waiter", "await q.join") + await q.join() + print("waiter", "joined!", "task done!") + + +async def q_task_done_join_go(): + q = Queue() + + # empty queue should not block join + print("test", "await empty q.join") + await q.join() + print("test", "pass") + + consumer_task = asyncio.create_task(q_task_done_join_consumer(q)) + waiter_task = asyncio.create_task(q_task_done_join_waiter(q)) + + # add jobs + for x in range(10): + await q.put(x) + + print("test", "await q.join") + await q.join() + print("test", "all jobs done!") + + await asyncio.sleep(0) + print("test", "waiter_task.done()?", waiter_task.done()) + + consumer_task.cancel() + await asyncio.gather(consumer_task, return_exceptions=True) + + print("test", "DONE") + + +def q_task_done_join_test(): + printexp( + """Test Queue task_done/join behaviors +test await empty q.join +test pass +test await q.join +consumer got/processing 0 +waiter await q.join +consumer got/processing 1 +consumer got/processing 2 +consumer got/processing 3 +consumer got/processing 4 +consumer got/processing 5 +consumer got/processing 6 +consumer got/processing 7 +consumer got/processing 8 +consumer got/processing 9 +test all jobs done! +waiter joined! task done! +test waiter_task.done()? True +test DONE +""", + 5, + ) + asyncio.run(q_task_done_join_go()) + + +# ************ ************ +def test(n): + try: + if n == 1: + ack_test() # Test message acknowledge. + elif n == 2: + msg_test() # Test Messge and Lock objects. + elif n == 3: + barrier_test() # Test the Barrier class. + elif n == 4: + barrier_test1() # Test the Barrier class. + elif n == 5: + semaphore_test(False) # Test Semaphore + elif n == 6: + semaphore_test(True) # Test BoundedSemaphore. + elif n == 7: + condition_test() # Test the Condition class. + elif n == 8: + queue_test() # Test the Queue class. + elif n == 9: + rbq_test() # Test the RingbufQueue class. + elif n == 10: + q_task_done_join_test() # Test the Queue task_done/join behavior. + except KeyboardInterrupt: + print("Interrupted") + finally: + asyncio.new_event_loop() + print_tests() diff --git a/v3/primitives/tests/broker_test.py b/v3/primitives/tests/broker_test.py new file mode 100644 index 0000000..ad1357e --- /dev/null +++ b/v3/primitives/tests/broker_test.py @@ -0,0 +1,129 @@ +# broker_test.py Test various types of subscriber + +# import primitives.tests.broker_test + +import asyncio +from primitives import broker, Queue, RingbufQueue, RegExp + +# Periodically publish messages to two topics +async def test(t): + for x in range(t): + await asyncio.sleep(1) + broker.publish("foo_topic", f"dogs {x}") + broker.publish("bar_topic", f"rats {x}") + + +# Suscribe via coroutine +async def subs(topic, message): + await asyncio.sleep_ms(100) + print("coroutine", topic, message) + + +# Subscribe via function +def func(topic, message): + print("function", topic, message) + + +# Subscribe via Event + +event = asyncio.Event() + + +async def event_test(): + while True: + await event.wait() + event.clear() + print("Event triggered") + + +class TestClass: + async def fetch_data(self, topic, message, arg1, arg2): + await asyncio.sleep_ms(100) + print("bound coro", topic, message, arg1, arg2) + + def get_data(self, topic, message): + print("bound method", topic, message) + + +async def print_queue(q): + while True: + topic, message = await asyncio.wait_for(q.get(), 2) + print(topic, message) + + +async def print_ringbuf_q(q): + while True: + topic, message, args = await asyncio.wait_for(q.get(), 2) + print(topic, message, args) + + +async def main(): + Broker.Verbose = False # Suppress q full messages + tc = TestClass() + q = Queue(10) + rq = RingbufQueue(10) + print("Subscribing Event, coroutine, Queue, RingbufQueue and bound coroutine.") + broker.subscribe("foo_topic", tc.fetch_data, 1, 42) # Bound coroutine + broker.subscribe("bar_topic", subs) # Coroutine + broker.subscribe("bar_topic", event) + broker.subscribe("foo_topic", q) + broker.subscribe("bar_topic", rq, "args", "added") + + asyncio.create_task(test(30)) # Publish to topics for 30s + asyncio.create_task(event_test()) + await asyncio.sleep(5) + print() + print("Unsubscribing coroutine") + broker.unsubscribe("bar_topic", subs) + await asyncio.sleep(5) + print() + print("Unsubscribing Event") + broker.unsubscribe("bar_topic", event) + print() + print("Subscribing function") + broker.subscribe("bar_topic", func) + await asyncio.sleep(5) + print() + print("Unsubscribing function") + broker.unsubscribe("bar_topic", func) + print() + print("Unsubscribing bound coroutine") + broker.unsubscribe("foo_topic", tc.fetch_data, 1, 42) # Async method + print() + print("Subscribing method") + broker.subscribe("foo_topic", tc.get_data) # Sync method + await asyncio.sleep(5) + print() + print("Unsubscribing method") + broker.unsubscribe("foo_topic", tc.get_data) # Async method + print("Retrieving foo_topic messages from Queue") + print("Retrieving bar_topic messages from RingbufQueue") + await asyncio.gather(print_queue(q), print_ringbuf_q(rq), return_exceptions=True) + # Queues are now empty + print() + print("*** Unsubscribing queues ***") + broker.unsubscribe("foo_topic", q) + broker.unsubscribe("bar_topic", rq, "args", "added") + print() + + print("*** Testing error reports and exception ***") + print() + Broker.Verbose = True + print("*** Produce warning messages on invalid unsubscribe ***") + broker.unsubscribe("rats", "more rats") # Invalid topic + broker.unsubscribe("foo_topic", "rats") # Invalid agent + print("*** Check exception on invalid subscribe ***") + try: + broker.subscribe("foo_topic", "rubbish_agent") + print("Test FAIL") + except ValueError: + print("Test PASS") + print() + print("*** Test wildcard subscribe ***") + broker.subscribe(RegExp(".*_topic"), func) + broker.publish("FAIL", func) # No match + asyncio.create_task(test(5)) + await asyncio.sleep(10) + + +asyncio.run(main()) diff --git a/v3/primitives/tests/delay_test.py b/v3/primitives/tests/delay_test.py new file mode 100644 index 0000000..16babdd --- /dev/null +++ b/v3/primitives/tests/delay_test.py @@ -0,0 +1,238 @@ +# delay_test.py Tests for Delay_ms class + +# Copyright (c) 2020 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +import asyncio +import micropython +from primitives.delay_ms import Delay_ms + +micropython.alloc_emergency_exception_buf(100) + + +def printexp(exp, runtime=0): + print("Expected output:") + print("\x1b[32m") + print(exp) + print("\x1b[39m") + if runtime: + print("Running (runtime = {}s):".format(runtime)) + else: + print("Running (runtime < 1s):") + + +async def ctor_test(): # Constructor arg + s = """ +Trigger 5 sec delay +Retrigger 5 sec delay +Callback should run +cb callback +Done +""" + printexp(s, 12) + + def cb(v): + print("cb", v) + + d = Delay_ms(cb, ("callback",), duration=5000) + + print("Trigger 5 sec delay") + d.trigger() + await asyncio.sleep(4) + print("Retrigger 5 sec delay") + d.trigger() + await asyncio.sleep(4) + print("Callback should run") + await asyncio.sleep(2) + print("Done") + + +async def launch_test(): + s = """ +Trigger 5 sec delay +Coroutine should run: run to completion. +Coroutine starts +Coroutine ends +Coroutine should run: test cancellation. +Coroutine starts +Coroutine should run: test awaiting. +Coroutine starts +Coroutine ends +Done +""" + printexp(s, 20) + + async def cb(v, ms): + print(v, "starts") + await asyncio.sleep_ms(ms) + print(v, "ends") + + d = Delay_ms(cb, ("coroutine", 1000)) + + print("Trigger 5 sec delay") + d.trigger(5000) # Test extending time + await asyncio.sleep(4) + print("Coroutine should run: run to completion.") + await asyncio.sleep(3) + d = Delay_ms(cb, ("coroutine", 3000)) + d.trigger(5000) + await asyncio.sleep(4) + print("Coroutine should run: test cancellation.") + await asyncio.sleep(2) + coro = d.rvalue() + coro.cancel() + d.trigger(5000) + await asyncio.sleep(4) + print("Coroutine should run: test awaiting.") + await asyncio.sleep(2) + coro = d.rvalue() + await coro + print("Done") + + +async def reduce_test(): # Test reducing a running delay + s = """ +Trigger 5 sec delay +Callback should run +cb callback +Callback should run +cb callback +Done +""" + printexp(s, 11) + + def cb(v): + print("cb", v) + + d = Delay_ms(cb, ("callback",)) + + print("Trigger 5 sec delay") + d.trigger(5000) # Test extending time + await asyncio.sleep(4) + print("Callback should run") + await asyncio.sleep(2) + d.trigger(10000) + await asyncio.sleep(1) + d.trigger(3000) + await asyncio.sleep(2) + print("Callback should run") + await asyncio.sleep(2) + print("Done") + + +async def stop_test(): # Test the .stop and .running methods + s = """ +Trigger 5 sec delay +Running +Callback should run +cb callback +Callback returned 42 +Callback should not run +Done + """ + printexp(s, 12) + + def cb(v): + print("cb", v) + return 42 + + d = Delay_ms(cb, ("callback",)) + + print("Trigger 5 sec delay") + d.trigger(5000) # Test extending time + await asyncio.sleep(4) + if d(): + print("Running") + print("Callback should run") + await asyncio.sleep(2) + print("Callback returned", d.rvalue()) + d.trigger(3000) + await asyncio.sleep(1) + d.stop() + await asyncio.sleep(1) + if d(): + print("Running") + print("Callback should not run") + await asyncio.sleep(4) + print("Done") + + +async def isr_test(): # Test trigger from hard ISR + from pyb import Timer + + s = """ +Timer holds off cb for 5 secs +cb should now run +cb callback +Done +""" + printexp(s, 6) + + def cb(v): + print("cb", v) + + d = Delay_ms(cb, ("callback",)) + + def timer_cb(_): + d.trigger(200) + + tim = Timer(1, freq=10, callback=timer_cb) + + print("Timer holds off cb for 5 secs") + await asyncio.sleep(5) + tim.deinit() + print("cb should now run") + await asyncio.sleep(1) + print("Done") + + +async def err_test(): # Test triggering de-initialised timer + s = """ +Running (runtime = 3s): +Trigger 1 sec delay +cb callback +Success: error was raised. +Done + """ + printexp(s, 3) + + def cb(v): + print("cb", v) + return 42 + + d = Delay_ms(cb, ("callback",)) + + print("Trigger 1 sec delay") + d.trigger(1000) + await asyncio.sleep(2) + d.deinit() + try: + d.trigger(1000) + except RuntimeError: + print("Success: error was raised.") + print("Done") + + +av = """ +Run a test by issuing +delay_test.test(n) +where n is a test number. Avaliable tests: +\x1b[32m +0 Test triggering from a hard ISR (Pyboard only) +1 Test the .stop method and callback return value. +2 Test reducing the duration of a running timer +3 Test delay defined by constructor arg +4 Test triggering a Task +5 Attempt to trigger de-initialised instance +\x1b[39m +""" +print(av) + +tests = (isr_test, stop_test, reduce_test, ctor_test, launch_test, err_test) + + +def test(n=0): + try: + asyncio.run(tests[n]()) + finally: + asyncio.new_event_loop() diff --git a/v3/primitives/tests/elo_test.py b/v3/primitives/tests/elo_test.py new file mode 100644 index 0000000..8ebe4bd --- /dev/null +++ b/v3/primitives/tests/elo_test.py @@ -0,0 +1,100 @@ +# elo_test.py Test ELO class + +# Copyright (c) 2024 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +# from primitives.tests.elo_test import test +# test() + +import asyncio +from primitives import WaitAny, WaitAll, ELO + +evt = asyncio.Event() + + +def set_after(t): + async def ta(t): + await asyncio.sleep(t) + print("set") + evt.set() + evt.clear() + + asyncio.create_task(ta(t)) + + +def can_after(elo, t): + async def ca(elo, t): + await asyncio.sleep(t) + elo().cancel() + + asyncio.create_task(ca(elo, t)) + + +async def foo(t, n=42): + await asyncio.sleep(t) + return n + + +async def main(): + txt = """\x1b[32m +Expected output: + +Test cancellation. +Canned +Test return of value. +Result: 42 +Instantiate with running task +Result: 99 +Delayed return of value. +Result: 88 +\x1b[39m +""" + print(txt) + entries = (evt, elo := ELO(foo, 5)) + print("Test cancellation.") + can_after(elo, 1) + await WaitAny(entries).wait() + task = elo() + if isinstance(task, asyncio.CancelledError): + print("Canned") + + print("Test return of value.") + entries = (evt, elo := ELO(foo, 5)) + await WaitAny(entries).wait() + res = await elo() + print(f"Result: {res}") + + print("Instantiate with running task") + elo = ELO(task := asyncio.create_task(foo(3, 99))) + await WaitAny((elo, evt)).wait() + res = await task + print(f"Result: {res}") + + print("Delayed return of value.") + entries = (evt, elo := ELO(foo, 5, 88)) + await WaitAny(entries).wait() + set_after(1) # Early exit + res = await elo() # Pause until complete + print(f"Result: {res}") + + +def tests(): + txt = """ +\x1b[32m +Issue: +from primitives.tests.elo_test import test +test() +\x1b[39m +""" + print(txt) + + +def test(): + try: + asyncio.run(main()) + finally: + asyncio.new_event_loop() + tests() + + +tests() diff --git a/v3/primitives/tests/encoder_stop.py b/v3/primitives/tests/encoder_stop.py new file mode 100644 index 0000000..b091c8b --- /dev/null +++ b/v3/primitives/tests/encoder_stop.py @@ -0,0 +1,44 @@ +# encoder_stop.py Demo of callback which occurs after motion has stopped. + +from machine import Pin +import asyncio +from primitives.encoder import Encoder +from primitives.delay_ms import Delay_ms + +px = Pin("X1", Pin.IN, Pin.PULL_UP) +py = Pin("X2", Pin.IN, Pin.PULL_UP) + +tim = Delay_ms(duration=400) # High value for test +d = 0 + + +def tcb(pos, delta): # User callback gets args of encoder cb + global d + d = 0 + print(pos, delta) + + +def cb(pos, delta): # Encoder callback occurs rapidly + global d + tim.trigger() # Postpone the user callback + tim.callback(tcb, (pos, d := d + delta)) # and update its args + + +async def main(): + while True: + await asyncio.sleep(1) + + +def test(): + print("Running encoder test. Press ctrl-c to teminate.") + Encoder.delay = 0 # No need for this delay + enc = Encoder(px, py, callback=cb) + try: + asyncio.run(main()) + except KeyboardInterrupt: + print("Interrupted") + finally: + asyncio.new_event_loop() + + +test() diff --git a/v3/primitives/tests/encoder_test.py b/v3/primitives/tests/encoder_test.py new file mode 100644 index 0000000..7848ea0 --- /dev/null +++ b/v3/primitives/tests/encoder_test.py @@ -0,0 +1,35 @@ +# encoder_test.py Test for asynchronous driver for incremental quadrature encoder. + +# Copyright (c) 2021-2022 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +from machine import Pin +import asyncio +from primitives.encoder import Encoder + + +px = Pin(33, Pin.IN, Pin.PULL_UP) +py = Pin(25, Pin.IN, Pin.PULL_UP) + + +def cb(pos, delta): + print(pos, delta) + + +async def main(): + while True: + await asyncio.sleep(1) + + +def test(): + print("Running encoder test. Press ctrl-c to teminate.") + enc = Encoder(px, py, v=0, vmin=0, vmax=100, callback=cb) + try: + asyncio.run(main()) + except KeyboardInterrupt: + print("Interrupted") + finally: + asyncio.new_event_loop() + + +test() diff --git a/v3/primitives/tests/event_test.py b/v3/primitives/tests/event_test.py new file mode 100644 index 0000000..17ac5e4 --- /dev/null +++ b/v3/primitives/tests/event_test.py @@ -0,0 +1,243 @@ +# event_test.py Test WaitAll, WaitAny, ESwwitch, EButton + +# Copyright (c) 2022 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +# from primitives.tests.event_test import * + +import asyncio +from primitives import Delay_ms, WaitAny, ESwitch, WaitAll, EButton +from pyb import Pin + +events = [asyncio.Event() for _ in range(4)] + + +async def set_events(*ev): + for n in ev: + await asyncio.sleep(1) + print("Setting", n) + events[n].set() + + +def clear(msg): + print(msg) + for e in events: + e.clear() + + +async def can(obj, tim): + await asyncio.sleep(tim) + print("About to cancel") + obj.cancel() + + +async def foo(tsk): + print("Waiting") + await tsk + + +async def wait_test(): + msg = """ +\x1b[32m +Expected output: +Setting 0 +Tested WaitAny 0 +Setting 1 +Tested WaitAny 1 +Setting 2 +Setting 3 +Tested WaitAll 2, 3 +Setting 0 +Setting 3 +Tested WaitAny 0, 3 +Cancel in 3s +Setting 0 +Setting 1 +About to cancel +Cancelled. +Waiting for 4s +Timeout +done +\x1b[39m +""" + print(msg) + wa = WaitAny((events[0], events[1], WaitAll((events[2], events[3])))) + asyncio.create_task(set_events(0)) + await wa.wait() + clear("Tested WaitAny 0") + asyncio.create_task(set_events(1)) + await wa.wait() + clear("Tested WaitAny 1") + asyncio.create_task(set_events(2, 3)) + await wa.wait() + clear("Tested WaitAll 2, 3") + wa = WaitAll((WaitAny((events[0], events[1])), WaitAny((events[2], events[3])))) + asyncio.create_task(set_events(0, 3)) + await wa.wait() + clear("Tested WaitAny 0, 3") + task = asyncio.create_task(wa.wait()) + asyncio.create_task(set_events(0, 1)) # Does nothing + asyncio.create_task(can(task, 3)) + print("Cancel in 3s") + try: + await task + except asyncio.CancelledError: # TODO why must we trap this? + print("Cancelled.") + print("Waiting for 4s") + try: + await asyncio.wait_for(wa.wait(), 4) + except asyncio.TimeoutError: + print("Timeout") + print("done") + + +val = 0 +fail = False +pout = None +polarity = 0 + + +async def monitor(evt, v, verbose): + global val + while True: + await evt.wait() + evt.clear() + val += v + verbose and print("Got", hex(v), hex(val)) + + +async def pulse(ms=100): + pout(1 ^ polarity) + await asyncio.sleep_ms(ms) + pout(polarity) + + +def expect(v, e): + global fail + if v == e: + print("Pass") + else: + print(f"Fail: expected 0x{e:04x} got 0x{v:04x}") + fail = True + + +async def btest(btn, verbose, supp): + global val, fail + val = 0 + events = btn.press, btn.release, btn.double, btn.long + tasks = [] + for n, evt in enumerate(events): # Each event has a 3-bit event counter + tasks.append(asyncio.create_task(monitor(evt, 1 << 3 * n, verbose))) + await asyncio.sleep(1) + print("Start short press test") + await pulse() + await asyncio.sleep(1) + verbose and print("Test of short press", hex(val)) + expect(val, 0x09) + + val = 0 + await asyncio.sleep(1) + print("Start long press test") + await pulse(2000) + await asyncio.sleep(4) + verbose and print("Long press", hex(val)) + exp = 0x208 if supp else 0x209 + expect(val, exp) + + val = 0 + await asyncio.sleep(1) + print("Start double press test") + await pulse() + await asyncio.sleep_ms(100) + await pulse() + await asyncio.sleep(4) + verbose and print("Double press", hex(val)) + exp = 0x48 if supp else 0x52 + expect(val, exp) + + val = 0 + await asyncio.sleep(1) + print("Start double press, 2nd press long, test") + await pulse() + await asyncio.sleep_ms(100) + await pulse(2000) + await asyncio.sleep(4) + verbose and print("Double press", hex(val)) + exp = 0x48 if supp else 0x52 + expect(val, exp) + for task in tasks: + task.cancel() + + +async def stest(sw, verbose): + global val, fail + val = 0 + events = sw.open, sw.close + tasks = [] + for n, evt in enumerate(events): + tasks.append(asyncio.create_task(monitor(evt, 1 << 3 * n, verbose))) + asyncio.create_task(pulse(2000)) + print("Switch closure") + await asyncio.sleep(1) + expect(val, 0x08) + await asyncio.sleep(4) # Wait for any spurious events + print("Switch open") + verbose and print("Switch close and open", hex(val)) + expect(val, 0x09) + for task in tasks: + task.cancel() + + +async def switch_test(pol, verbose): + global val, pout, polarity + polarity = pol + pin = Pin("Y1", Pin.IN) + pout = Pin("Y2", Pin.OUT, value=pol) + print("Testing EButton.") + print("Testing with suppress == False") + btn = EButton(pin) + await btest(btn, verbose, False) + print() + print("Testing with suppress == True") + btn.deinit() + btn = EButton(pin, suppress=True) + await btest(btn, verbose, True) + print() + print("Testing ESwitch") + sw = ESwitch(pin, pol) + await stest(sw, verbose) + print("Failures occurred.") if fail else print("All tests passed.") + + +def tests(): + txt = """ + \x1b[32m + Available tests: + 1. test_switches(polarity=1, verbose=False) Test the ESwitch and Ebutton classe. + 2. test_wait() Test the WaitAny and WaitAll primitives. + + Switch tests assume a Pyboard with a link between Y1 and Y2. + \x1b[39m + """ + print(txt) + + +tests() + + +def test_switches(polarity=1, verbose=False): + try: + asyncio.run( + switch_test(polarity, verbose) + ) # polarity 1/0 is normal (off) electrical state. + finally: + asyncio.new_event_loop() + tests() + + +def test_wait(): + try: + asyncio.run(wait_test()) + finally: + asyncio.new_event_loop() + tests() diff --git a/v3/primitives/tests/switches.py b/v3/primitives/tests/switches.py new file mode 100644 index 0000000..8d93c9b --- /dev/null +++ b/v3/primitives/tests/switches.py @@ -0,0 +1,271 @@ +# Test/demo programs for Switch and Pushbutton classes +# Tested on Pyboard but should run on other microcontroller platforms +# running MicroPython with uasyncio library. + +# Copyright (c) 2018-2022 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file +# Now executes .deinit() + +# To run: +# from primitives.tests.switches import * +# test_sw() # For example + +from machine import Pin +from pyb import LED +from primitives import Switch, Pushbutton +import asyncio + +helptext = """ +Test using switch or pushbutton between X1 and gnd. +Ground pin X2 to terminate test. + +""" +tests = """ +\x1b[32m +Available tests: +test_sw Switch test. +test_swcb Switch with callback. +test_sw_event Switch with event. +test_btn Pushutton launching coros. +test_btncb Pushbutton launching callbacks. +btn_dynamic Change coros launched at runtime. +btn_event Pushbutton event interface. +\x1b[39m +""" +print(tests) + +# Pulse an LED (coroutine) +async def pulse(led, ms): + led.on() + await asyncio.sleep_ms(ms) + led.off() + + +# Pulse an LED when an event triggered +async def evt_pulse(event, led): + while True: + event.clear() + await event.wait() + led.on() + await asyncio.sleep_ms(500) + led.off() + + +# Toggle an LED (callback) +def toggle(led): + led.toggle() + + +# Quit test by connecting X2 to ground +async def killer(obj): + pin = Pin("X2", Pin.IN, Pin.PULL_UP) + while pin.value(): + await asyncio.sleep_ms(50) + obj.deinit() + await asyncio.sleep_ms(0) + + +def run(obj): + try: + asyncio.run(killer(obj)) + except KeyboardInterrupt: + print("Interrupted") + finally: + asyncio.new_event_loop() + print(tests) + + +# Test for the Switch class passing coros +def test_sw(): + s = """ +close pulses green +open pulses red +""" + print("Test of switch scheduling coroutines.") + print(helptext) + print(s) + pin = Pin("X1", Pin.IN, Pin.PULL_UP) + red = LED(1) + green = LED(2) + sw = Switch(pin) + # Register coros to launch on contact close and open + sw.close_func(pulse, (green, 1000)) + sw.open_func(pulse, (red, 1000)) + run(sw) + + +# Test for the switch class with a callback +def test_swcb(): + s = """ +close toggles red +open toggles green +""" + print("Test of switch executing callbacks.") + print(helptext) + print(s) + pin = Pin("X1", Pin.IN, Pin.PULL_UP) + red = LED(1) + green = LED(2) + sw = Switch(pin) + # Register a coro to launch on contact close + sw.close_func(toggle, (red,)) + sw.open_func(toggle, (green,)) + run(sw) + + +# Test for the Switch class (events) +async def do_sw_event(): + pin = Pin("X1", Pin.IN, Pin.PULL_UP) + sw = Switch(pin) + sw.open_func(None) + sw.close_func(None) + tasks = [] + for event, led in ((sw.close, 1), (sw.open, 2)): + tasks.append(asyncio.create_task(evt_pulse(event, LED(led)))) + await killer(sw) + for task in tasks: + task.cancel() + + +def test_sw_event(): + s = """ +close pulse red +open pulses green +""" + print("Test of switch triggering events.") + print(helptext) + print(s) + try: + asyncio.run(do_sw_event()) + except KeyboardInterrupt: + print("Interrupted") + finally: + asyncio.new_event_loop() + print(tests) + + +# Test for the Pushbutton class (coroutines) +# Pass True to test suppress +def test_btn(suppress=False, lf=True, df=True): + s = """ +press pulses red +release pulses green +double click pulses yellow +long press pulses blue +""" + print("Test of pushbutton scheduling coroutines.") + print(helptext) + print(s) + pin = Pin("X1", Pin.IN, Pin.PULL_UP) + red = LED(1) + green = LED(2) + yellow = LED(3) + blue = LED(4) + pb = Pushbutton(pin, suppress) + pb.press_func(pulse, (red, 1000)) + pb.release_func(pulse, (green, 1000)) + if df: + print("Doubleclick enabled") + pb.double_func(pulse, (yellow, 1000)) + if lf: + print("Long press enabled") + pb.long_func(pulse, (blue, 1000)) + run(pb) + + +# Test for the Pushbutton class (callbacks) +def test_btncb(): + s = """ +press toggles red +release toggles green +double click toggles yellow +long press toggles blue +""" + print("Test of pushbutton executing callbacks.") + print(helptext) + print(s) + pin = Pin("X1", Pin.IN, Pin.PULL_UP) + red = LED(1) + green = LED(2) + yellow = LED(3) + blue = LED(4) + pb = Pushbutton(pin) + pb.press_func(toggle, (red,)) + pb.release_func(toggle, (green,)) + pb.double_func(toggle, (yellow,)) + pb.long_func(toggle, (blue,)) + run(pb) + + +# Test for the Pushbutton class where callback coros change dynamically +def setup(pb, press, release, dbl, lng, t=1000): + s = """ +Functions are changed: +LED's pulse for 2 seconds +press pulses blue +release pulses red +double click pulses green +long pulses yellow +""" + pb.press_func(pulse, (press, t)) + pb.release_func(pulse, (release, t)) + pb.double_func(pulse, (dbl, t)) + if lng is not None: + pb.long_func(pulse, (lng, t)) + print(s) + + +def btn_dynamic(): + s = """ +press pulses red +release pulses green +double click pulses yellow +long press changes button functions. +""" + print("Test of pushbutton scheduling coroutines.") + print(helptext) + print(s) + pin = Pin("X1", Pin.IN, Pin.PULL_UP) + red = LED(1) + green = LED(2) + yellow = LED(3) + blue = LED(4) + pb = Pushbutton(pin) + setup(pb, red, green, yellow, None) + pb.long_func(setup, (pb, blue, red, green, yellow, 2000)) + run(pb) + + +# Test for the Pushbutton class (events) +async def do_btn_event(): + pin = Pin("X1", Pin.IN, Pin.PULL_UP) + pb = Pushbutton(pin) + pb.press_func(None) + pb.release_func(None) + pb.double_func(None) + pb.long_func(None) + tasks = [] + for event, led in ((pb.press, 1), (pb.release, 2), (pb.double, 3), (pb.long, 4)): + tasks.append(asyncio.create_task(evt_pulse(event, LED(led)))) + await killer(pb) + for task in tasks: + task.cancel() + + +def btn_event(): + s = """ +press pulse red +release pulses green +double click pulses yellow +long press pulses blue +""" + print("Test of pushbutton triggering events.") + print(helptext) + print(s) + try: + asyncio.run(do_btn_event()) + except KeyboardInterrupt: + print("Interrupted") + finally: + asyncio.new_event_loop() + print(tests) diff --git a/v3/threadsafe/__init__.py b/v3/threadsafe/__init__.py new file mode 100644 index 0000000..a60c707 --- /dev/null +++ b/v3/threadsafe/__init__.py @@ -0,0 +1,23 @@ +# __init__.py Common functions for uasyncio threadsafe primitives + +# Copyright (c) 2022 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +_attrs = { + "ThreadSafeEvent": "threadsafe_event", + "ThreadSafeQueue": "threadsafe_queue", + "Message": "message", + "Context": "context", +} + +# Copied from uasyncio.__init__.py +# Lazy loader, effectively does: +# global attr +# from .mod import attr +def __getattr__(attr): + mod = _attrs.get(attr, None) + if mod is None: + raise AttributeError(attr) + value = getattr(__import__(mod, None, None, True, 1), attr) + globals()[attr] = value + return value diff --git a/v3/threadsafe/context.py b/v3/threadsafe/context.py new file mode 100644 index 0000000..1fc305d --- /dev/null +++ b/v3/threadsafe/context.py @@ -0,0 +1,33 @@ +# context.py: Run functions or methods on another core or in another thread + +import asyncio +import _thread +from threadsafe import ThreadSafeQueue + +# Object describing a job to be run on another core +class Job: + def __init__(self, func, args, kwargs): + self.kwargs = kwargs + self.args = args + self.func = func + self.rval = None # Return value + self.done = asyncio.ThreadSafeFlag() # "done" indicator + + +def worker(q): # Runs forever on a core executing jobs as they arrive + while True: + job = q.get_sync(True) # Block until a Job arrives + job.rval = job.func(*job.args, **job.kwargs) + job.done.set() + + +class Context: + def __init__(self, qsize=10): + self.q = ThreadSafeQueue(qsize) + _thread.start_new_thread(worker, (self.q,)) + + async def assign(self, func, *args, **kwargs): + job = Job(func, args, kwargs) + await self.q.put(job) # Will pause if q is full. + await job.done.wait() # Pause until function has run + return job.rval diff --git a/v3/threadsafe/message.py b/v3/threadsafe/message.py new file mode 100644 index 0000000..ba74848 --- /dev/null +++ b/v3/threadsafe/message.py @@ -0,0 +1,73 @@ +# message.py +# Now uses ThreadSafeFlag for efficiency + +# Copyright (c) 2018-2022 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +# Usage: +# from primitives.message import Message +# See https://github.com/micropython/micropython/issues/7965 for code below +import sys + +ok = hasattr(sys.implementation, "_machine") # MicroPython +if ok: + ok = "linux" not in sys.implementation._machine +if ok: + import asyncio +else: + print("Message is MicroPython only, and not on Unix build.") + sys.exit(1) + +# A coro waiting on a message issues await message +# A coro or hard/soft ISR raising the message issues.set(payload) +# .clear() should be issued by at least one waiting task and before +# next event. + + +class Message(asyncio.Event): + def __init__(self): + super().__init__() + self._waiting_on_tsf = False + self._tsf = asyncio.ThreadSafeFlag() + self._data = None # Message + + def clear(self): # At least one task must call clear when scheduled + super().clear() + + def __iter__(self): + yield from self.wait() + return self._data + + async def _waiter(self): # Runs if 1st task is cancelled + await self._tsf.wait() + super().set() + self._waiting_on_tsf = False + + async def wait(self): + if self._waiting_on_tsf == False: + self._waiting_on_tsf = True + await asyncio.sleep(0) # Ensure other tasks see updated flag + try: + await self._tsf.wait() + super().set() + self._waiting_on_tsf = False + except asyncio.CancelledError: + asyncio.create_task(self._waiter()) + raise # Pass cancellation to calling code + else: + await super().wait() + return self._data + + def set(self, data=None): # Can be called from a hard ISR + self._data = data + super().set() + self._tsf.set() + + def __aiter__(self): + return self + + async def __anext__(self): + return await self + + def value(self): + return self._data diff --git a/v3/threadsafe/package.json b/v3/threadsafe/package.json new file mode 100644 index 0000000..39d51d0 --- /dev/null +++ b/v3/threadsafe/package.json @@ -0,0 +1,10 @@ +{ + "urls": [ + ["threadsafe/__init__.py", "github:peterhinch/micropython-async/v3/threadsafe/__init__.py"], + ["threadsafe/message.py", "github:peterhinch/micropython-async/v3/threadsafe/message.py"], + ["threadsafe/threadsafe_event.py", "github:peterhinch/micropython-async/v3/threadsafe/threadsafe_event.py"], + ["threadsafe/threadsafe_queue.py", "github:peterhinch/micropython-async/v3/threadsafe/threadsafe_queue.py"], + ["threadsafe/context.py", "github:peterhinch/micropython-async/v3/threadsafe/context.py"] + ], + "version": "0.1" +} diff --git a/v3/threadsafe/threadsafe_event.py b/v3/threadsafe/threadsafe_event.py new file mode 100644 index 0000000..a8b25a3 --- /dev/null +++ b/v3/threadsafe/threadsafe_event.py @@ -0,0 +1,35 @@ +# threadsafe_queue.py Provides ThreadsafeQueue class + +# Copyright (c) 2022 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +import asyncio + + +class ThreadSafeEvent(asyncio.Event): + def __init__(self): + super().__init__() + self._waiting_on_tsf = False + self._tsf = asyncio.ThreadSafeFlag() + + def set(self): + self._tsf.set() + + async def _waiter(self): + await self._tsf.wait() + super().set() + self._waiting_on_tsf = False + + async def wait(self): + if self._waiting_on_tsf == False: + self._waiting_on_tsf = True + await asyncio.sleep_ms(0) + try: + await self._tsf.wait() + super().set() + self._waiting_on_tsf = False + except asyncio.CancelledError: + asyncio.create_task(self._waiter()) + raise + else: + await super().wait() diff --git a/v3/threadsafe/threadsafe_queue.py b/v3/threadsafe/threadsafe_queue.py new file mode 100644 index 0000000..d2bdf61 --- /dev/null +++ b/v3/threadsafe/threadsafe_queue.py @@ -0,0 +1,66 @@ +# threadsafe_queue.py Provides ThreadsafeQueue class + +# Copyright (c) 2022 Peter Hinch +# Released under the MIT License (MIT) - see LICENSE file + +# Uses pre-allocated ring buffer: can use list or array +# Asynchronous iterator allowing consumer to use async for + +import asyncio + + +class ThreadSafeQueue: # MicroPython optimised + def __init__(self, buf): + self._q = [0 for _ in range(buf)] if isinstance(buf, int) else buf + self._size = len(self._q) + self._wi = 0 + self._ri = 0 + self._evput = asyncio.ThreadSafeFlag() # Triggered by put, tested by get + self._evget = asyncio.ThreadSafeFlag() # Triggered by get, tested by put + + def full(self): + return ((self._wi + 1) % self._size) == self._ri + + def empty(self): + return self._ri == self._wi + + def qsize(self): + return (self._wi - self._ri) % self._size + + def get_sync(self, block=False): # Remove and return an item from the queue. + if not block and self.empty(): + raise IndexError # Not allowed to block + while self.empty(): # Block until an item appears + pass + r = self._q[self._ri] + self._ri = (self._ri + 1) % self._size + self._evget.set() + return r + + def put_sync(self, v, block=False): + self._q[self._wi] = v + self._evput.set() # Schedule task waiting on get + if not block and self.full(): + raise IndexError + while self.full(): + pass # can't bump ._wi until an item is removed + self._wi = (self._wi + 1) % self._size + + async def put(self, val): # Usage: await queue.put(item) + while self.full(): # Queue full + await self._evget.wait() + self.put_sync(val) + + def __aiter__(self): + return self + + async def __anext__(self): + return await self.get() + + async def get(self): + while self.empty(): + await self._evput.wait() + r = self._q[self._ri] + self._ri = (self._ri + 1) % self._size + self._evget.set() # Schedule task waiting on ._evget + return r