diff options
author | Dimitri Staessens <[email protected]> | 2022-03-27 11:09:43 +0200 |
---|---|---|
committer | Sander Vrijders <[email protected]> | 2022-03-30 15:05:05 +0200 |
commit | 02b3893b1ec392f1b3ca030a03267c31eb1dc290 (patch) | |
tree | f7cebdb5ef2c4994bc1e675e838bc8922cbae950 /src/ipcpd/unicast/psched.c | |
parent | 56654f2cd1813d87d32695f126939bbfaad52385 (diff) | |
download | ouroboros-02b3893b1ec392f1b3ca030a03267c31eb1dc290.tar.gz ouroboros-02b3893b1ec392f1b3ca030a03267c31eb1dc290.zip |
lib: Add np1_flow_read and np1_flow_write calls
Reading/writing to (N + 1)-flows from the IPCP was using a raw QoS flow
to bypass some functions in the ipcp_flow_read call. But this call was
broken for keepalive packets. Fixing the ipcp_flow_read call for
(N - 1) flows causes the IPCPs to drop 0-byte keepalive packets coming from
(N + 1) client flows.
>From now on, there is a dedicated call for (N + 1) reads/writes from
the IPCPs that's more efficient and cleaner. The (N + 1) flow internal
QoS is now also defaulted to a qos_np1 qosspec, instead of tampering
with the qosspec requested by the (N + 1) client.
Signed-off-by: Dimitri Staessens <[email protected]>
Signed-off-by: Sander Vrijders <[email protected]>
Diffstat (limited to 'src/ipcpd/unicast/psched.c')
-rw-r--r-- | src/ipcpd/unicast/psched.c | 7 |
1 files changed, 5 insertions, 2 deletions
diff --git a/src/ipcpd/unicast/psched.c b/src/ipcpd/unicast/psched.c index 33ac5afe..bb452726 100644 --- a/src/ipcpd/unicast/psched.c +++ b/src/ipcpd/unicast/psched.c @@ -50,6 +50,7 @@ static int qos_prio [] = { struct psched { fset_t * set[QOS_CUBE_MAX]; next_packet_fn_t callback; + read_fn_t read; pthread_t readers[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL]; }; @@ -101,7 +102,7 @@ static void * packet_reader(void * o) notifier_event(NOTIFY_DT_FLOW_UP, &fd); break; case FLOW_PKT: - if (ipcp_flow_read(fd, &sdb)) + if (sched->read(fd, &sdb) < 0) continue; sched->callback(fd, qc, sdb); @@ -117,7 +118,8 @@ static void * packet_reader(void * o) return (void *) 0; } -struct psched * psched_create(next_packet_fn_t callback) +struct psched * psched_create(next_packet_fn_t callback, + read_fn_t read) { struct psched * psched; struct sched_info * infos[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL]; @@ -131,6 +133,7 @@ struct psched * psched_create(next_packet_fn_t callback) goto fail_malloc; psched->callback = callback; + psched->read = read; for (i = 0; i < QOS_CUBE_MAX; ++i) { psched->set[i] = fset_create(); |