summaryrefslogtreecommitdiff
path: root/src/ipcpd/normal/fmgr.c
diff options
context:
space:
mode:
authorSander Vrijders <[email protected]>2017-03-03 10:31:03 +0000
committerdimitri staessens <[email protected]>2017-03-03 10:31:03 +0000
commitb1b59cc4642faa99514f2288ba1bb5324a79850f (patch)
treecec27d3c2064f0c0bcb564060d9d9012f819b22f /src/ipcpd/normal/fmgr.c
parent46c2f9d5363cdff2d99cf1b1c4a41c5bf97d2c03 (diff)
parenta409fd81dfc6d22f9a287f15394b86490dea5273 (diff)
downloadouroboros-b1b59cc4642faa99514f2288ba1bb5324a79850f.tar.gz
ouroboros-b1b59cc4642faa99514f2288ba1bb5324a79850f.zip
Merged in sandervrijders/ouroboros/be-ae-conn (pull request #396)
ipcpd: normal: Refactor application entities and add neighbors struct
Diffstat (limited to 'src/ipcpd/normal/fmgr.c')
-rw-r--r--src/ipcpd/normal/fmgr.c283
1 files changed, 171 insertions, 112 deletions
diff --git a/src/ipcpd/normal/fmgr.c b/src/ipcpd/normal/fmgr.c
index 34724ddd..b7a99f6c 100644
--- a/src/ipcpd/normal/fmgr.c
+++ b/src/ipcpd/normal/fmgr.c
@@ -31,12 +31,16 @@
#include <ouroboros/cacep.h>
#include <ouroboros/rib.h>
+#include "connmgr.h"
#include "fmgr.h"
#include "frct.h"
#include "ipcp.h"
#include "shm_pci.h"
-#include "gam.h"
#include "ribconfig.h"
+#include "pff.h"
+#include "neighbors.h"
+#include "gam.h"
+#include "routing.h"
#include <stdlib.h>
#include <stdbool.h>
@@ -48,19 +52,7 @@ typedef FlowAllocMsg flow_alloc_msg_t;
#define FD_UPDATE_TIMEOUT 100000 /* nanoseconds */
-struct nm1_flow {
- struct list_head next;
- int fd;
- qosspec_t qs;
- struct conn_info * info;
-};
-
struct {
- flow_set_t * nm1_set[QOS_CUBE_MAX];
- fqueue_t * nm1_fqs[QOS_CUBE_MAX];
- struct list_head nm1_flows;
- pthread_rwlock_t nm1_flows_lock;
-
flow_set_t * np1_set[QOS_CUBE_MAX];
fqueue_t * np1_fqs[QOS_CUBE_MAX];
pthread_rwlock_t np1_flows_lock;
@@ -69,15 +61,43 @@ struct {
int np1_cep_id_to_fd[IPCPD_MAX_CONNS];
pthread_t np1_sdu_reader;
+
+ flow_set_t * nm1_set[QOS_CUBE_MAX];
+ fqueue_t * nm1_fqs[QOS_CUBE_MAX];
pthread_t nm1_sdu_reader;
- pthread_t nm1_flow_wait;
- /* FIXME: Replace with PFF */
- int fd;
+ struct pff * pff[QOS_CUBE_MAX];
+ struct routing * routing[QOS_CUBE_MAX];
struct gam * gam;
+ struct nbs * nbs;
+ struct ae * ae;
+
+ struct nb_notifier nb_notifier;
} fmgr;
+static int fmgr_neighbor_event(enum nb_event event,
+ struct conn conn)
+{
+ qoscube_t cube;
+
+ /* We are only interested in neighbors being added and removed. */
+ switch (event) {
+ case NEIGHBOR_ADDED:
+ ipcp_flow_get_qoscube(conn.flow_info.fd, &cube);
+ flow_set_add(fmgr.nm1_set[cube], conn.flow_info.fd);
+ break;
+ case NEIGHBOR_REMOVED:
+ ipcp_flow_get_qoscube(conn.flow_info.fd, &cube);
+ flow_set_del(fmgr.nm1_set[cube], conn.flow_info.fd);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static void * fmgr_np1_sdu_reader(void * o)
{
struct shm_du_buff * sdb;
@@ -171,12 +191,20 @@ void * fmgr_nm1_sdu_reader(void * o)
continue;
}
- /*
- * FIXME: Dropping for now, since
- * we don't have a PFF yet
- */
- ipcp_flow_del(sdb);
- continue;
+ fd = pff_nhop(fmgr.pff[i], pci.dst_addr);
+ if (fd < 0) {
+ log_err("No next hop for %lu",
+ pci.dst_addr);
+ ipcp_flow_del(sdb);
+ continue;
+ }
+
+ if (ipcp_flow_write(fd, sdb)) {
+ log_err("Failed to write SDU to fd %d.",
+ fd);
+ ipcp_flow_del(sdb);
+ continue;
+ }
}
shm_pci_shrink(sdb);
@@ -192,49 +220,6 @@ void * fmgr_nm1_sdu_reader(void * o)
return (void *) 0;
}
-static void * fmgr_nm1_flow_wait(void * o)
-{
- qoscube_t cube;
- struct conn_info * info;
- int fd;
- qosspec_t qs;
- struct nm1_flow * flow;
-
- (void) o;
-
- while (true) {
- if (gam_flow_wait(fmgr.gam, &fd, &info, &qs)) {
- log_err("Failed to get next flow descriptor.");
- continue;
- }
-
- ipcp_flow_get_qoscube(fd, &cube);
- flow_set_add(fmgr.nm1_set[cube], fd);
-
- /* FIXME: Temporary, until we have a PFF */
- fmgr.fd = fd;
-
- pthread_rwlock_wrlock(&fmgr.nm1_flows_lock);
- flow = malloc(sizeof(*flow));
- if (flow == NULL) {
- free(info);
- pthread_rwlock_unlock(&fmgr.nm1_flows_lock);
- continue;
- }
-
- flow->info = info;
- flow->fd = fd;
- flow->qs = qs;
-
- list_head_init(&flow->next);
- list_add(&flow->next, &fmgr.nm1_flows);
-
- pthread_rwlock_unlock(&fmgr.nm1_flows_lock);
- }
-
- return (void *) 0;
-}
-
static void fmgr_destroy_flows(void)
{
int i;
@@ -247,12 +232,29 @@ static void fmgr_destroy_flows(void)
}
}
-int fmgr_init(void)
+static void fmgr_destroy_routing(void)
{
- enum pol_gam pg;
+ int i;
+
+ for (i = 0; i < QOS_CUBE_MAX; ++i)
+ routing_destroy(fmgr.routing[i]);
+}
+static void fmgr_destroy_pff(void)
+{
int i;
+ for (i = 0; i < QOS_CUBE_MAX; ++i)
+ pff_destroy(fmgr.pff[i]);
+}
+
+int fmgr_init(void)
+{
+ enum pol_gam pg;
+ int i;
+ int j;
+ struct conn_info info;
+
for (i = 0; i < AP_MAX_FLOWS; ++i)
fmgr.np1_fd_to_cep_id[i] = INVALID_CEP_ID;
@@ -288,63 +290,116 @@ int fmgr_init(void)
if (rib_read(BOOT_PATH "/dt/gam/type", &pg, sizeof(pg))
!= sizeof(pg)) {
log_err("Failed to read policy for ribmgr gam.");
+ fmgr_destroy_flows();
return -1;
}
- fmgr.gam = gam_create(pg);
- if (fmgr.gam == NULL) {
- log_err("Failed to create graph adjacency manager.");
+ strcpy(info.ae_name, DT_AE);
+ strcpy(info.protocol, FRCT_PROTO);
+ info.pref_version = 1;
+ info.pref_syntax = PROTO_FIXED;
+ info.addr = ipcpi.dt_addr;
+
+ fmgr.ae = connmgr_ae_create(info);
+ if (fmgr.ae == NULL) {
+ log_err("Failed to create AE struct.");
fmgr_destroy_flows();
return -1;
}
- list_head_init(&fmgr.nm1_flows);
+ fmgr.nbs = nbs_create();
+ if (fmgr.nbs == NULL) {
+ log_err("Failed to create neighbors struct.");
+ fmgr_destroy_flows();
+ connmgr_ae_destroy(fmgr.ae);
+ return -1;
+ }
- pthread_rwlock_init(&fmgr.nm1_flows_lock, NULL);
- pthread_rwlock_init(&fmgr.np1_flows_lock, NULL);
+ fmgr.nb_notifier.notify_call = fmgr_neighbor_event;
+ if (nbs_reg_notifier(fmgr.nbs, &fmgr.nb_notifier)) {
+ log_err("Failed to register notifier.");
+ nbs_destroy(fmgr.nbs);
+ fmgr_destroy_flows();
+ connmgr_ae_destroy(fmgr.ae);
+ return -1;
+ }
+
+ if (pthread_rwlock_init(&fmgr.np1_flows_lock, NULL)) {
+ gam_destroy(fmgr.gam);
+ nbs_unreg_notifier(fmgr.nbs, &fmgr.nb_notifier);
+ nbs_destroy(fmgr.nbs);
+ fmgr_destroy_flows();
+ connmgr_ae_destroy(fmgr.ae);
+ return -1;
+ }
+
+ for (i = 0; i < QOS_CUBE_MAX; ++i) {
+ fmgr.pff[i] = pff_create();
+ if (fmgr.pff[i] == NULL) {
+ for (j = 0; j < i; ++j)
+ pff_destroy(fmgr.pff[j]);
+ pthread_rwlock_destroy(&fmgr.np1_flows_lock);
+ nbs_unreg_notifier(fmgr.nbs, &fmgr.nb_notifier);
+ nbs_destroy(fmgr.nbs);
+ fmgr_destroy_flows();
+ connmgr_ae_destroy(fmgr.ae);
+ return -1;
+ }
+
+ fmgr.routing[i] = routing_create(fmgr.pff[i], fmgr.nbs);
+ if (fmgr.routing[i] == NULL) {
+ for (j = 0; j < i; ++j)
+ routing_destroy(fmgr.routing[j]);
+ fmgr_destroy_pff();
+ pthread_rwlock_destroy(&fmgr.np1_flows_lock);
+ nbs_unreg_notifier(fmgr.nbs, &fmgr.nb_notifier);
+ nbs_destroy(fmgr.nbs);
+ fmgr_destroy_flows();
+ connmgr_ae_destroy(fmgr.ae);
+ return -1;
+ }
+ }
+
+ fmgr.gam = gam_create(pg, fmgr.nbs, fmgr.ae);
+ if (fmgr.gam == NULL) {
+ log_err("Failed to init dt graph adjacency manager.");
+ fmgr_destroy_routing();
+ fmgr_destroy_pff();
+ pthread_rwlock_destroy(&fmgr.np1_flows_lock);
+ nbs_unreg_notifier(fmgr.nbs, &fmgr.nb_notifier);
+ nbs_destroy(fmgr.nbs);
+ fmgr_destroy_flows();
+ connmgr_ae_destroy(fmgr.ae);
+ return -1;
+ }
pthread_create(&fmgr.np1_sdu_reader, NULL, fmgr_np1_sdu_reader, NULL);
pthread_create(&fmgr.nm1_sdu_reader, NULL, fmgr_nm1_sdu_reader, NULL);
- pthread_create(&fmgr.nm1_flow_wait, NULL, fmgr_nm1_flow_wait, NULL);
return 0;
}
void fmgr_fini()
{
- struct list_head * pos = NULL;
- struct list_head * n = NULL;
- qoscube_t cube;
-
pthread_cancel(fmgr.np1_sdu_reader);
pthread_cancel(fmgr.nm1_sdu_reader);
- pthread_cancel(fmgr.nm1_flow_wait);
pthread_join(fmgr.np1_sdu_reader, NULL);
pthread_join(fmgr.nm1_sdu_reader, NULL);
- pthread_join(fmgr.nm1_flow_wait, NULL);
- gam_destroy(fmgr.gam);
+ nbs_unreg_notifier(fmgr.nbs, &fmgr.nb_notifier);
- pthread_rwlock_wrlock(&fmgr.nm1_flows_lock);
-
- list_for_each_safe(pos, n, &fmgr.nm1_flows) {
- struct nm1_flow * flow =
- list_entry(pos, struct nm1_flow, next);
- list_del(&flow->next);
- flow_dealloc(flow->fd);
- ipcp_flow_get_qoscube(flow->fd, &cube);
- flow_set_del(fmgr.nm1_set[cube], flow->fd);
- free(flow->info);
- free(flow);
- }
+ gam_destroy(fmgr.gam);
- pthread_rwlock_unlock(&fmgr.nm1_flows_lock);
+ fmgr_destroy_routing();
- pthread_rwlock_destroy(&fmgr.nm1_flows_lock);
- pthread_rwlock_destroy(&fmgr.np1_flows_lock);
+ fmgr_destroy_pff();
fmgr_destroy_flows();
+
+ connmgr_ae_destroy(fmgr.ae);
+
+ nbs_destroy(fmgr.nbs);
}
int fmgr_np1_alloc(int fd,
@@ -601,24 +656,20 @@ int fmgr_np1_post_sdu(cep_id_t cep_id,
return 0;
}
-int fmgr_nm1_flow_arr(int fd,
- qosspec_t qs)
-{
- assert(fmgr.gam);
-
- if (gam_flow_arr(fmgr.gam, fd, qs)) {
- log_err("Failed to hand to graph adjacency manager.");
- return -1;
- }
-
- return 0;
-}
-
int fmgr_nm1_write_sdu(struct pci * pci,
struct shm_du_buff * sdb)
{
+ int fd;
+
if (pci == NULL || sdb == NULL)
+ return -EINVAL;
+
+ fd = pff_nhop(fmgr.pff[pci->qos_id], pci->dst_addr);
+ if (fd < 0) {
+ log_err("Could not get nhop for address %lu", pci->dst_addr);
+ ipcp_flow_del(sdb);
return -1;
+ }
if (shm_pci_ser(sdb, pci)) {
log_err("Failed to serialize PDU.");
@@ -626,8 +677,8 @@ int fmgr_nm1_write_sdu(struct pci * pci,
return -1;
}
- if (ipcp_flow_write(fmgr.fd, sdb)) {
- log_err("Failed to write SDU to fd %d.", fmgr.fd);
+ if (ipcp_flow_write(fd, sdb)) {
+ log_err("Failed to write SDU to fd %d.", fd);
ipcp_flow_del(sdb);
return -1;
}
@@ -639,9 +690,17 @@ int fmgr_nm1_write_buf(struct pci * pci,
buffer_t * buf)
{
buffer_t * buffer;
+ int fd;
if (pci == NULL || buf == NULL || buf->data == NULL)
+ return -EINVAL;
+
+ fd = pff_nhop(fmgr.pff[pci->qos_id], pci->dst_addr);
+ if (fd < 0) {
+ log_err("Could not get nhop for address %lu", pci->dst_addr);
+ free(buf->data);
return -1;
+ }
buffer = shm_pci_ser_buf(buf, pci);
if (buffer == NULL) {
@@ -650,7 +709,7 @@ int fmgr_nm1_write_buf(struct pci * pci,
return -1;
}
- if (flow_write(fmgr.fd, buffer->data, buffer->len) == -1) {
+ if (flow_write(fd, buffer->data, buffer->len) == -1) {
log_err("Failed to write buffer to fd.");
free(buffer);
return -1;