summaryrefslogtreecommitdiff
path: root/src/ipcpd/normal
diff options
context:
space:
mode:
authorDimitri Staessens <[email protected]>2018-10-04 18:06:32 +0200
committerSander Vrijders <[email protected]>2018-10-05 09:07:47 +0200
commitb802b25ddfe6f1b6ecabe3ba70e3dac2e99e7a50 (patch)
tree94e787f0f0ca1f0254b3728b0156b2e3283d8518 /src/ipcpd/normal
parent937adca2a718b160b6d42bb8a3f28d96321fdb49 (diff)
downloadouroboros-b802b25ddfe6f1b6ecabe3ba70e3dac2e99e7a50.tar.gz
ouroboros-b802b25ddfe6f1b6ecabe3ba70e3dac2e99e7a50.zip
lib: Pass qosspec at flow allocation
The flow allocator now passes the full qos specification to the endpoint, instead of just a cube. This is a more flexible architecture, as it makes QoS cubes internal to the layers. Adds endianness transforms for the flow allocator protocol in the normal IPCP. Signed-off-by: Dimitri Staessens <[email protected]> Signed-off-by: Sander Vrijders <[email protected]>
Diffstat (limited to 'src/ipcpd/normal')
-rw-r--r--src/ipcpd/normal/dt.c2
-rw-r--r--src/ipcpd/normal/fa.c57
-rw-r--r--src/ipcpd/normal/fa.h2
3 files changed, 42 insertions, 19 deletions
diff --git a/src/ipcpd/normal/dt.c b/src/ipcpd/normal/dt.c
index c3f8f198..a350e4be 100644
--- a/src/ipcpd/normal/dt.c
+++ b/src/ipcpd/normal/dt.c
@@ -31,8 +31,6 @@
#define DT "dt"
#define OUROBOROS_PREFIX DT
-/* FIXME: fix #defines and remove endian.h include. */
-#include <ouroboros/endian.h>
#include <ouroboros/bitmap.h>
#include <ouroboros/errno.h>
#include <ouroboros/logs.h>
diff --git a/src/ipcpd/normal/fa.c b/src/ipcpd/normal/fa.c
index 10f0a863..4c82e0e0 100644
--- a/src/ipcpd/normal/fa.c
+++ b/src/ipcpd/normal/fa.c
@@ -57,8 +57,15 @@ struct fa_msg {
uint32_t r_eid;
uint32_t s_eid;
uint8_t code;
- uint8_t qc;
int8_t response;
+ /* QoS parameters from spec, aligned */
+ uint8_t availability;
+ uint8_t in_order;
+ uint32_t delay;
+ uint64_t bandwidth;
+ uint32_t loss;
+ uint32_t ber;
+ uint32_t max_gap;
} __attribute__((packed));
struct {
@@ -100,6 +107,7 @@ static void fa_post_sdu(void * comp,
int fd;
uint8_t * buf;
struct fa_msg * msg;
+ qosspec_t qs;
(void) comp;
@@ -142,10 +150,18 @@ static void fa_post_sdu(void * comp,
assert(ipcpi.alloc_id == -1);
+ qs.delay = ntoh32(msg->delay);
+ qs.bandwidth = ntoh64(msg->bandwidth);
+ qs.availability = msg->availability;
+ qs.loss = ntoh32(msg->loss);
+ qs.ber = ntoh32(msg->ber);
+ qs.in_order = msg->in_order;
+ qs.max_gap = ntoh32(msg->max_gap);
+
fd = ipcp_flow_req_arr(getpid(),
(uint8_t *) (msg + 1),
ipcp_dir_hash_len(),
- msg->qc);
+ qs);
if (fd < 0) {
pthread_mutex_unlock(&ipcpi.alloc_lock);
log_err("Failed to get fd for flow.");
@@ -155,8 +171,8 @@ static void fa_post_sdu(void * comp,
pthread_rwlock_wrlock(&fa.flows_lock);
- fa.r_eid[fd] = msg->s_eid;
- fa.r_addr[fd] = msg->s_addr;
+ fa.r_eid[fd] = ntoh32(msg->s_eid);
+ fa.r_addr[fd] = ntoh64(msg->s_addr);
pthread_rwlock_unlock(&fa.flows_lock);
@@ -169,14 +185,14 @@ static void fa_post_sdu(void * comp,
case FLOW_REPLY:
pthread_rwlock_wrlock(&fa.flows_lock);
- fa.r_eid[msg->r_eid] = msg->s_eid;
+ fa.r_eid[ntoh32(msg->r_eid)] = ntoh32(msg->s_eid);
- ipcp_flow_alloc_reply(msg->r_eid, msg->response);
+ ipcp_flow_alloc_reply(ntoh32(msg->r_eid), msg->response);
if (msg->response < 0)
- destroy_conn(msg->r_eid);
+ destroy_conn(ntoh32(msg->r_eid));
else
- sdu_sched_add(fa.sdu_sched, msg->r_eid);
+ sdu_sched_add(fa.sdu_sched, ntoh32(msg->r_eid));
pthread_rwlock_unlock(&fa.flows_lock);
@@ -227,11 +243,12 @@ void fa_stop(void)
int fa_alloc(int fd,
const uint8_t * dst,
- qoscube_t qc)
+ qosspec_t qs)
{
struct fa_msg * msg;
uint64_t addr;
struct shm_du_buff * sdb;
+ qoscube_t qc;
addr = dir_query(dst);
if (addr == 0)
@@ -240,14 +257,22 @@ int fa_alloc(int fd,
if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + ipcp_dir_hash_len()))
return -1;
- msg = (struct fa_msg *) shm_du_buff_head(sdb);
- msg->code = FLOW_REQ;
- msg->qc = qc;
- msg->s_eid = fd;
- msg->s_addr = ipcpi.dt_addr;
+ msg = (struct fa_msg *) shm_du_buff_head(sdb);
+ msg->code = FLOW_REQ;
+ msg->s_eid = hton32(fd);
+ msg->s_addr = hton64(ipcpi.dt_addr);
+ msg->delay = hton32(qs.delay);
+ msg->bandwidth = hton64(qs.bandwidth);
+ msg->availability = qs.availability;
+ msg->loss = hton32(qs.loss);
+ msg->ber = hton32(qs.ber);
+ msg->in_order = qs.in_order;
+ msg->max_gap = hton32(qs.max_gap);
memcpy(msg + 1, dst, ipcp_dir_hash_len());
+ qc = qos_spec_to_cube(qs);
+
if (dt_write_sdu(addr, qc, fa.fd, sdb)) {
ipcp_sdb_release(sdb);
return -1;
@@ -302,8 +327,8 @@ int fa_alloc_resp(int fd,
msg = (struct fa_msg *) shm_du_buff_head(sdb);
msg->code = FLOW_REPLY;
- msg->r_eid = fa.r_eid[fd];
- msg->s_eid = fd;
+ msg->r_eid = hton32(fa.r_eid[fd]);
+ msg->s_eid = hton32(fd);
msg->response = response;
if (response < 0) {
diff --git a/src/ipcpd/normal/fa.h b/src/ipcpd/normal/fa.h
index 87819d6f..a98d834a 100644
--- a/src/ipcpd/normal/fa.h
+++ b/src/ipcpd/normal/fa.h
@@ -36,7 +36,7 @@ void fa_stop(void);
int fa_alloc(int fd,
const uint8_t * dst,
- qoscube_t qos);
+ qosspec_t qs);
int fa_alloc_resp(int fd,
int response);