uint8_t p2;
uint16_t p3;
int16_t prio;
+ uint16_t flags; /* TDMA_IFLG_xxx */
};
/* A bucket inside the TDMA scheduler */
/* Schedule a set of items starting from 'frame_offset' TDMA frames in the future */
int tdma_schedule_set(uint8_t frame_offset, const struct tdma_sched_item *item_set, uint16_t p3);
+/* Scan current frame scheduled items for flags */
+uint16_t tdma_sched_flag_scan(void);
+
/* Execute pre-scheduled events for current frame */
int tdma_sched_execute(void);
extern int tdma_end_set(uint8_t p1, uint8_t p2, uint16_t p3);
-#define SCHED_ITEM(x, p, y, z) { .cb = x, .p1 = y, .p2 = z, .prio = p }
+#define SCHED_ITEM(x, p, y, z) { .cb = x, .p1 = y, .p2 = z, .prio = p, .flags = 0 }
#define SCHED_END_FRAME() { .cb = NULL, .p1 = 0, .p2 = 0 }
#define SCHED_END_SET() { .cb = &tdma_end_set, .p1 = 0, .p2 = 0 }
* generated by TPU once every TDMA frame */
static void l1_sync(void)
{
+ uint16_t sched_flags;
+
putchart('+');
check_lost_frame();
}
/* execute the sched_items that have been scheduled for this
- * TDMA frame */
+ * TDMA frame (including setup/cleanup steps) */
+ sched_flags = tdma_sched_flag_scan();
+
tdma_sched_execute();
if (dsp_api.r_page_used) {
sched->cur_bucket = next_bucket;
}
+/* Scan current frame scheduled items for flags */
+uint16_t tdma_sched_flag_scan(void)
+{
+ struct tdma_scheduler *sched = &l1s.tdma_sched;
+ struct tdma_sched_bucket *bucket;
+ int i;
+ uint16_t flags = 0;
+
+ /* determine current bucket */
+ bucket = &sched->bucket[sched->cur_bucket];
+
+ /* iterate over items in this bucket and call callback function */
+ for (i=0; i<bucket->num_items; i++) {
+ struct tdma_sched_item *item = &bucket->item[i];
+ flags |= item->flags;
+ }
+
+ return flags;
+}
+
/* Sort a bucket entries by priority */
static void _tdma_sched_bucket_sort(struct tdma_sched_bucket *bucket, int *seq)
{