From a409fd81dfc6d22f9a287f15394b86490dea5273 Mon Sep 17 00:00:00 2001 From: Sander Vrijders Date: Thu, 23 Feb 2017 14:31:31 +0100 Subject: ipcpd: normal: Refactor application entities and add neighbors struct This refactors the different Application Entities of the normal IPCP. They all listen to and use the connection manager to establish new application connections. This commit also adds a neighbors struct to the normal IPCP. It contains neighbor structs that contain application connection. Notifiers can be registered in case a neighbor changes (added, removed, QoS changed). The flow manager has an instance of this neighbors struct and listens to these events to update its flow set. The routing component also listens to these events so that it can update the FSDB if needed. The flow manager now also creates the PFF instances and the routing instances per QoS cube. The RIB manager also uses this an instance of the neighbors struct and listens to neighbor events as well. --- src/ipcpd/normal/fmgr.c | 283 +++++++++++++++++++++++++++++------------------- 1 file changed, 171 insertions(+), 112 deletions(-) (limited to 'src/ipcpd/normal/fmgr.c') diff --git a/src/ipcpd/normal/fmgr.c b/src/ipcpd/normal/fmgr.c index 34724ddd..b7a99f6c 100644 --- a/src/ipcpd/normal/fmgr.c +++ b/src/ipcpd/normal/fmgr.c @@ -31,12 +31,16 @@ #include #include +#include "connmgr.h" #include "fmgr.h" #include "frct.h" #include "ipcp.h" #include "shm_pci.h" -#include "gam.h" #include "ribconfig.h" +#include "pff.h" +#include "neighbors.h" +#include "gam.h" +#include "routing.h" #include #include @@ -48,19 +52,7 @@ typedef FlowAllocMsg flow_alloc_msg_t; #define FD_UPDATE_TIMEOUT 100000 /* nanoseconds */ -struct nm1_flow { - struct list_head next; - int fd; - qosspec_t qs; - struct conn_info * info; -}; - struct { - flow_set_t * nm1_set[QOS_CUBE_MAX]; - fqueue_t * nm1_fqs[QOS_CUBE_MAX]; - struct list_head nm1_flows; - pthread_rwlock_t nm1_flows_lock; - flow_set_t * np1_set[QOS_CUBE_MAX]; fqueue_t * np1_fqs[QOS_CUBE_MAX]; pthread_rwlock_t np1_flows_lock; @@ -69,15 +61,43 @@ struct { int np1_cep_id_to_fd[IPCPD_MAX_CONNS]; pthread_t np1_sdu_reader; + + flow_set_t * nm1_set[QOS_CUBE_MAX]; + fqueue_t * nm1_fqs[QOS_CUBE_MAX]; pthread_t nm1_sdu_reader; - pthread_t nm1_flow_wait; - /* FIXME: Replace with PFF */ - int fd; + struct pff * pff[QOS_CUBE_MAX]; + struct routing * routing[QOS_CUBE_MAX]; struct gam * gam; + struct nbs * nbs; + struct ae * ae; + + struct nb_notifier nb_notifier; } fmgr; +static int fmgr_neighbor_event(enum nb_event event, + struct conn conn) +{ + qoscube_t cube; + + /* We are only interested in neighbors being added and removed. */ + switch (event) { + case NEIGHBOR_ADDED: + ipcp_flow_get_qoscube(conn.flow_info.fd, &cube); + flow_set_add(fmgr.nm1_set[cube], conn.flow_info.fd); + break; + case NEIGHBOR_REMOVED: + ipcp_flow_get_qoscube(conn.flow_info.fd, &cube); + flow_set_del(fmgr.nm1_set[cube], conn.flow_info.fd); + break; + default: + break; + } + + return 0; +} + static void * fmgr_np1_sdu_reader(void * o) { struct shm_du_buff * sdb; @@ -171,12 +191,20 @@ void * fmgr_nm1_sdu_reader(void * o) continue; } - /* - * FIXME: Dropping for now, since - * we don't have a PFF yet - */ - ipcp_flow_del(sdb); - continue; + fd = pff_nhop(fmgr.pff[i], pci.dst_addr); + if (fd < 0) { + log_err("No next hop for %lu", + pci.dst_addr); + ipcp_flow_del(sdb); + continue; + } + + if (ipcp_flow_write(fd, sdb)) { + log_err("Failed to write SDU to fd %d.", + fd); + ipcp_flow_del(sdb); + continue; + } } shm_pci_shrink(sdb); @@ -192,49 +220,6 @@ void * fmgr_nm1_sdu_reader(void * o) return (void *) 0; } -static void * fmgr_nm1_flow_wait(void * o) -{ - qoscube_t cube; - struct conn_info * info; - int fd; - qosspec_t qs; - struct nm1_flow * flow; - - (void) o; - - while (true) { - if (gam_flow_wait(fmgr.gam, &fd, &info, &qs)) { - log_err("Failed to get next flow descriptor."); - continue; - } - - ipcp_flow_get_qoscube(fd, &cube); - flow_set_add(fmgr.nm1_set[cube], fd); - - /* FIXME: Temporary, until we have a PFF */ - fmgr.fd = fd; - - pthread_rwlock_wrlock(&fmgr.nm1_flows_lock); - flow = malloc(sizeof(*flow)); - if (flow == NULL) { - free(info); - pthread_rwlock_unlock(&fmgr.nm1_flows_lock); - continue; - } - - flow->info = info; - flow->fd = fd; - flow->qs = qs; - - list_head_init(&flow->next); - list_add(&flow->next, &fmgr.nm1_flows); - - pthread_rwlock_unlock(&fmgr.nm1_flows_lock); - } - - return (void *) 0; -} - static void fmgr_destroy_flows(void) { int i; @@ -247,12 +232,29 @@ static void fmgr_destroy_flows(void) } } -int fmgr_init(void) +static void fmgr_destroy_routing(void) { - enum pol_gam pg; + int i; + + for (i = 0; i < QOS_CUBE_MAX; ++i) + routing_destroy(fmgr.routing[i]); +} +static void fmgr_destroy_pff(void) +{ int i; + for (i = 0; i < QOS_CUBE_MAX; ++i) + pff_destroy(fmgr.pff[i]); +} + +int fmgr_init(void) +{ + enum pol_gam pg; + int i; + int j; + struct conn_info info; + for (i = 0; i < AP_MAX_FLOWS; ++i) fmgr.np1_fd_to_cep_id[i] = INVALID_CEP_ID; @@ -288,63 +290,116 @@ int fmgr_init(void) if (rib_read(BOOT_PATH "/dt/gam/type", &pg, sizeof(pg)) != sizeof(pg)) { log_err("Failed to read policy for ribmgr gam."); + fmgr_destroy_flows(); return -1; } - fmgr.gam = gam_create(pg); - if (fmgr.gam == NULL) { - log_err("Failed to create graph adjacency manager."); + strcpy(info.ae_name, DT_AE); + strcpy(info.protocol, FRCT_PROTO); + info.pref_version = 1; + info.pref_syntax = PROTO_FIXED; + info.addr = ipcpi.dt_addr; + + fmgr.ae = connmgr_ae_create(info); + if (fmgr.ae == NULL) { + log_err("Failed to create AE struct."); fmgr_destroy_flows(); return -1; } - list_head_init(&fmgr.nm1_flows); + fmgr.nbs = nbs_create(); + if (fmgr.nbs == NULL) { + log_err("Failed to create neighbors struct."); + fmgr_destroy_flows(); + connmgr_ae_destroy(fmgr.ae); + return -1; + } - pthread_rwlock_init(&fmgr.nm1_flows_lock, NULL); - pthread_rwlock_init(&fmgr.np1_flows_lock, NULL); + fmgr.nb_notifier.notify_call = fmgr_neighbor_event; + if (nbs_reg_notifier(fmgr.nbs, &fmgr.nb_notifier)) { + log_err("Failed to register notifier."); + nbs_destroy(fmgr.nbs); + fmgr_destroy_flows(); + connmgr_ae_destroy(fmgr.ae); + return -1; + } + + if (pthread_rwlock_init(&fmgr.np1_flows_lock, NULL)) { + gam_destroy(fmgr.gam); + nbs_unreg_notifier(fmgr.nbs, &fmgr.nb_notifier); + nbs_destroy(fmgr.nbs); + fmgr_destroy_flows(); + connmgr_ae_destroy(fmgr.ae); + return -1; + } + + for (i = 0; i < QOS_CUBE_MAX; ++i) { + fmgr.pff[i] = pff_create(); + if (fmgr.pff[i] == NULL) { + for (j = 0; j < i; ++j) + pff_destroy(fmgr.pff[j]); + pthread_rwlock_destroy(&fmgr.np1_flows_lock); + nbs_unreg_notifier(fmgr.nbs, &fmgr.nb_notifier); + nbs_destroy(fmgr.nbs); + fmgr_destroy_flows(); + connmgr_ae_destroy(fmgr.ae); + return -1; + } + + fmgr.routing[i] = routing_create(fmgr.pff[i], fmgr.nbs); + if (fmgr.routing[i] == NULL) { + for (j = 0; j < i; ++j) + routing_destroy(fmgr.routing[j]); + fmgr_destroy_pff(); + pthread_rwlock_destroy(&fmgr.np1_flows_lock); + nbs_unreg_notifier(fmgr.nbs, &fmgr.nb_notifier); + nbs_destroy(fmgr.nbs); + fmgr_destroy_flows(); + connmgr_ae_destroy(fmgr.ae); + return -1; + } + } + + fmgr.gam = gam_create(pg, fmgr.nbs, fmgr.ae); + if (fmgr.gam == NULL) { + log_err("Failed to init dt graph adjacency manager."); + fmgr_destroy_routing(); + fmgr_destroy_pff(); + pthread_rwlock_destroy(&fmgr.np1_flows_lock); + nbs_unreg_notifier(fmgr.nbs, &fmgr.nb_notifier); + nbs_destroy(fmgr.nbs); + fmgr_destroy_flows(); + connmgr_ae_destroy(fmgr.ae); + return -1; + } pthread_create(&fmgr.np1_sdu_reader, NULL, fmgr_np1_sdu_reader, NULL); pthread_create(&fmgr.nm1_sdu_reader, NULL, fmgr_nm1_sdu_reader, NULL); - pthread_create(&fmgr.nm1_flow_wait, NULL, fmgr_nm1_flow_wait, NULL); return 0; } void fmgr_fini() { - struct list_head * pos = NULL; - struct list_head * n = NULL; - qoscube_t cube; - pthread_cancel(fmgr.np1_sdu_reader); pthread_cancel(fmgr.nm1_sdu_reader); - pthread_cancel(fmgr.nm1_flow_wait); pthread_join(fmgr.np1_sdu_reader, NULL); pthread_join(fmgr.nm1_sdu_reader, NULL); - pthread_join(fmgr.nm1_flow_wait, NULL); - gam_destroy(fmgr.gam); + nbs_unreg_notifier(fmgr.nbs, &fmgr.nb_notifier); - pthread_rwlock_wrlock(&fmgr.nm1_flows_lock); - - list_for_each_safe(pos, n, &fmgr.nm1_flows) { - struct nm1_flow * flow = - list_entry(pos, struct nm1_flow, next); - list_del(&flow->next); - flow_dealloc(flow->fd); - ipcp_flow_get_qoscube(flow->fd, &cube); - flow_set_del(fmgr.nm1_set[cube], flow->fd); - free(flow->info); - free(flow); - } + gam_destroy(fmgr.gam); - pthread_rwlock_unlock(&fmgr.nm1_flows_lock); + fmgr_destroy_routing(); - pthread_rwlock_destroy(&fmgr.nm1_flows_lock); - pthread_rwlock_destroy(&fmgr.np1_flows_lock); + fmgr_destroy_pff(); fmgr_destroy_flows(); + + connmgr_ae_destroy(fmgr.ae); + + nbs_destroy(fmgr.nbs); } int fmgr_np1_alloc(int fd, @@ -601,24 +656,20 @@ int fmgr_np1_post_sdu(cep_id_t cep_id, return 0; } -int fmgr_nm1_flow_arr(int fd, - qosspec_t qs) -{ - assert(fmgr.gam); - - if (gam_flow_arr(fmgr.gam, fd, qs)) { - log_err("Failed to hand to graph adjacency manager."); - return -1; - } - - return 0; -} - int fmgr_nm1_write_sdu(struct pci * pci, struct shm_du_buff * sdb) { + int fd; + if (pci == NULL || sdb == NULL) + return -EINVAL; + + fd = pff_nhop(fmgr.pff[pci->qos_id], pci->dst_addr); + if (fd < 0) { + log_err("Could not get nhop for address %lu", pci->dst_addr); + ipcp_flow_del(sdb); return -1; + } if (shm_pci_ser(sdb, pci)) { log_err("Failed to serialize PDU."); @@ -626,8 +677,8 @@ int fmgr_nm1_write_sdu(struct pci * pci, return -1; } - if (ipcp_flow_write(fmgr.fd, sdb)) { - log_err("Failed to write SDU to fd %d.", fmgr.fd); + if (ipcp_flow_write(fd, sdb)) { + log_err("Failed to write SDU to fd %d.", fd); ipcp_flow_del(sdb); return -1; } @@ -639,9 +690,17 @@ int fmgr_nm1_write_buf(struct pci * pci, buffer_t * buf) { buffer_t * buffer; + int fd; if (pci == NULL || buf == NULL || buf->data == NULL) + return -EINVAL; + + fd = pff_nhop(fmgr.pff[pci->qos_id], pci->dst_addr); + if (fd < 0) { + log_err("Could not get nhop for address %lu", pci->dst_addr); + free(buf->data); return -1; + } buffer = shm_pci_ser_buf(buf, pci); if (buffer == NULL) { @@ -650,7 +709,7 @@ int fmgr_nm1_write_buf(struct pci * pci, return -1; } - if (flow_write(fmgr.fd, buffer->data, buffer->len) == -1) { + if (flow_write(fd, buffer->data, buffer->len) == -1) { log_err("Failed to write buffer to fd."); free(buffer); return -1; -- cgit v1.2.3