diff options
author | Sander Vrijders <[email protected]> | 2017-04-05 14:28:24 +0200 |
---|---|---|
committer | Sander Vrijders <[email protected]> | 2017-04-05 14:28:24 +0200 |
commit | c5b092665c219c679ede91b3dc816c61f2f9dabe (patch) | |
tree | e60083545da6ce0dc5fdacd06610d907b90c5aee | |
parent | fa1590539e66c17902bb4f09221e6447b3233bfb (diff) | |
download | ouroboros-c5b092665c219c679ede91b3dc816c61f2f9dabe.tar.gz ouroboros-c5b092665c219c679ede91b3dc816c61f2f9dabe.zip |
ipcpd: Add lock for allocation requests
This adds a lock to prevent a race condition between flow_req_arr and
flow_alloc_resp.
-rw-r--r-- | src/ipcpd/ipcp.c | 148 | ||||
-rw-r--r-- | src/ipcpd/ipcp.h | 2 | ||||
-rw-r--r-- | src/ipcpd/local/main.c | 19 | ||||
-rw-r--r-- | src/ipcpd/normal/fmgr.c | 3 | ||||
-rw-r--r-- | src/ipcpd/shim-eth-llc/main.c | 6 | ||||
-rw-r--r-- | src/ipcpd/shim-udp/main.c | 4 |
6 files changed, 79 insertions, 103 deletions
diff --git a/src/ipcpd/ipcp.c b/src/ipcpd/ipcp.c index fc0e3587..4b7da030 100644 --- a/src/ipcpd/ipcp.c +++ b/src/ipcpd/ipcp.c @@ -261,9 +261,12 @@ static void * ipcp_main_loop(void * o) } } ret_msg.has_result = true; + + pthread_mutex_lock(&ipcpi.alloc_lock); ret_msg.result = ipcpi.ops->ipcp_flow_alloc_resp(fd, msg->response); + pthread_mutex_unlock(&ipcpi.alloc_lock); break; case IPCP_MSG_CODE__IPCP_FLOW_DEALLOC: if (ipcpi.ops->ipcp_flow_dealloc == NULL) { @@ -354,11 +357,11 @@ int ipcp_init(int argc, enum ipcp_type type, struct ipcp_ops * ops) { - bool log; + bool log; pthread_condattr_t cattr; - - struct timeval tv = {(IPCP_ACCEPT_TIMEOUT / 1000), - (IPCP_ACCEPT_TIMEOUT % 1000) * 1000}; + struct timeval tv = {(IPCP_ACCEPT_TIMEOUT / 1000), + (IPCP_ACCEPT_TIMEOUT % 1000) * 1000}; + int ret = -1; if (parse_args(argc, argv, &log)) return -1; @@ -383,27 +386,21 @@ int ipcp_init(int argc, ipcpi.threadpool = malloc(sizeof(pthread_t) * IPCP_MAX_THREADS); if (ipcpi.threadpool == NULL) { - ap_fini(); - return -ENOMEM; + ret = -ENOMEM; + goto fail_thr; } ipcpi.threads = 0; ipcpi.max_threads = IPCP_MIN_AV_THREADS; ipcpi.sock_path = ipcp_sock_path(getpid()); - if (ipcpi.sock_path == NULL) { - free(ipcpi.threadpool); - ap_fini(); - return -1; - } + if (ipcpi.sock_path == NULL) + goto fail_sock_path; ipcpi.sockfd = server_socket_open(ipcpi.sock_path); if (ipcpi.sockfd < 0) { log_err("Could not open server socket."); - free(ipcpi.threadpool); - free(ipcpi.sock_path); - ap_fini(); - return -1; + goto fail_serv_sock; } if (setsockopt(ipcpi.sockfd, SOL_SOCKET, SO_RCVTIMEO, @@ -414,112 +411,89 @@ int ipcp_init(int argc, if (pthread_rwlock_init(&ipcpi.state_lock, NULL)) { log_err("Could not create rwlock."); - close(ipcpi.sockfd); - free(ipcpi.threadpool); - free(ipcpi.sock_path); - ap_fini(); - return -1; + goto fail_state_rwlock; } if (pthread_mutex_init(&ipcpi.state_mtx, NULL)) { log_err("Could not create mutex."); - pthread_rwlock_destroy(&ipcpi.state_lock); - close(ipcpi.sockfd); - free(ipcpi.threadpool); - free(ipcpi.sock_path); - ap_fini(); - return -1; + goto fail_state_mtx; } if (pthread_mutex_init(&ipcpi.threads_lock, NULL)) { log_err("Could not create mutex."); - pthread_mutex_destroy(&ipcpi.state_mtx); - pthread_rwlock_destroy(&ipcpi.state_lock); - close(ipcpi.sockfd); - free(ipcpi.threadpool); - free(ipcpi.sock_path); - ap_fini(); - return -1; + goto fail_thread_lock; } if (pthread_condattr_init(&cattr)) { log_err("Could not create condattr."); - pthread_mutex_destroy(&ipcpi.threads_lock); - pthread_mutex_destroy(&ipcpi.state_mtx); - pthread_rwlock_destroy(&ipcpi.state_lock); - close(ipcpi.sockfd); - free(ipcpi.threadpool); - free(ipcpi.sock_path); - ap_fini(); - return -1; + goto fail_cond_attr; } - ; + #ifndef __APPLE__ pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK); #endif if (pthread_cond_init(&ipcpi.state_cond, &cattr)) { log_err("Could not init condvar."); - pthread_condattr_destroy(&cattr); - pthread_mutex_destroy(&ipcpi.threads_lock); - pthread_mutex_destroy(&ipcpi.state_mtx); - pthread_rwlock_destroy(&ipcpi.state_lock); - close(ipcpi.sockfd); - free(ipcpi.threadpool); - free(ipcpi.sock_path); - ap_fini(); - return -1; + goto fail_state_cond; } if (pthread_cond_init(&ipcpi.threads_cond, &cattr)) { log_err("Could not init condvar."); - pthread_cond_destroy(&ipcpi.state_cond); - pthread_condattr_destroy(&cattr); - pthread_mutex_destroy(&ipcpi.threads_lock); - pthread_mutex_destroy(&ipcpi.state_mtx); - pthread_rwlock_destroy(&ipcpi.state_lock); - close(ipcpi.sockfd); - free(ipcpi.threadpool); - free(ipcpi.sock_path); - ap_fini(); - return -1; - }; - - - pthread_condattr_destroy(&cattr); + goto fail_thread_cond; + } ipcpi.thread_ids = bmp_create(IPCP_MAX_THREADS, 0); if (ipcpi.thread_ids == NULL) { log_err("Could not init condvar."); - pthread_cond_destroy(&ipcpi.threads_cond); - pthread_cond_destroy(&ipcpi.state_cond); - pthread_mutex_destroy(&ipcpi.threads_lock); - pthread_mutex_destroy(&ipcpi.state_mtx); - pthread_rwlock_destroy(&ipcpi.state_lock); - close(ipcpi.sockfd); - free(ipcpi.threadpool); - free(ipcpi.sock_path); - ap_fini(); - return -1; - }; + goto fail_bmp; + } - if (type == IPCP_NORMAL) + if (pthread_mutex_init(&ipcpi.alloc_lock, NULL)) { + log_err("Failed to init mutex."); + goto fail_alloc_lock; + } + + if (type == IPCP_NORMAL) { + pthread_condattr_destroy(&cattr); return 0; + } ipcpi.shim_data = shim_data_create(); if (ipcpi.shim_data == NULL) { - bmp_destroy(ipcpi.thread_ids); - pthread_cond_destroy(&ipcpi.threads_cond); - pthread_cond_destroy(&ipcpi.state_cond); - pthread_mutex_destroy(&ipcpi.state_mtx); - pthread_rwlock_destroy(&ipcpi.state_lock); - close(ipcpi.sockfd); - free(ipcpi.threadpool); - free(ipcpi.sock_path); - ap_fini(); - return -ENOMEM; + ret = -ENOMEM; + goto fail_shim_data; } + pthread_condattr_destroy(&cattr); + return 0; + + fail_shim_data: + pthread_mutex_destroy(&ipcpi.alloc_lock); + fail_alloc_lock: + bmp_destroy(ipcpi.thread_ids); + fail_bmp: + pthread_cond_destroy(&ipcpi.threads_cond); + fail_thread_cond: + pthread_cond_destroy(&ipcpi.state_cond); + fail_state_cond: + pthread_condattr_destroy(&cattr); + fail_cond_attr: + pthread_mutex_destroy(&ipcpi.threads_lock); + fail_thread_lock: + pthread_mutex_destroy(&ipcpi.state_mtx); + fail_state_mtx: + pthread_rwlock_destroy(&ipcpi.state_lock); + fail_state_rwlock: + close(ipcpi.sockfd); + fail_serv_sock: + free(ipcpi.sock_path); + fail_sock_path: + free(ipcpi.threadpool); + fail_thr: + ap_fini(); + + return ret; } void * threadpoolmgr(void * o) diff --git a/src/ipcpd/ipcp.h b/src/ipcpd/ipcp.h index 581ca5e3..145e91f5 100644 --- a/src/ipcpd/ipcp.h +++ b/src/ipcpd/ipcp.h @@ -81,6 +81,8 @@ struct ipcp { int sockfd; char * sock_path; + pthread_mutex_t alloc_lock; + pthread_t * threadpool; struct bmp * thread_ids; diff --git a/src/ipcpd/local/main.c b/src/ipcpd/local/main.c index 3e5c4803..88d0912e 100644 --- a/src/ipcpd/local/main.c +++ b/src/ipcpd/local/main.c @@ -223,33 +223,22 @@ static int ipcp_local_flow_alloc(int fd, return -1; /* -ENOTENROLLED */ } - /* - * This function needs to return completely before - * flow_resp. Taking the wrlock on the data is the simplest - * way to achieve this. - */ - - pthread_rwlock_wrlock(&local_data.lock); + pthread_mutex_lock(&ipcpi.alloc_lock); out_fd = ipcp_flow_req_arr(getpid(), dst_name, cube); if (out_fd < 0) { - pthread_rwlock_unlock(&local_data.lock); + pthread_mutex_unlock(&ipcpi.alloc_lock); log_dbg("Flow allocation failed: %d", out_fd); return -1; } - /* - * The idea of the port_wait_assign in dev.c was to do the - * above synchronisation. But if the lock is not taken, the - * resp() function may be called before a lock would be taken - * here. This shim will be deprecated, but ideally the sync is - * fixed in ipcp.c. - */ + pthread_rwlock_wrlock(&local_data.lock); local_data.in_out[fd] = out_fd; local_data.in_out[out_fd] = fd; pthread_rwlock_unlock(&local_data.lock); + pthread_mutex_unlock(&ipcpi.alloc_lock); flow_set_add(local_data.flows, fd); diff --git a/src/ipcpd/normal/fmgr.c b/src/ipcpd/normal/fmgr.c index 19653430..56f1e099 100644 --- a/src/ipcpd/normal/fmgr.c +++ b/src/ipcpd/normal/fmgr.c @@ -619,10 +619,12 @@ int fmgr_np1_post_buf(cep_id_t cep_id, switch (msg->code) { case FLOW_ALLOC_CODE__FLOW_REQ: + pthread_mutex_lock(&ipcpi.alloc_lock); fd = ipcp_flow_req_arr(getpid(), msg->dst_name, msg->qoscube); if (fd < 0) { + pthread_mutex_unlock(&ipcpi.alloc_lock); flow_alloc_msg__free_unpacked(msg, NULL); log_err("Failed to get fd for flow."); return -1; @@ -634,6 +636,7 @@ int fmgr_np1_post_buf(cep_id_t cep_id, fmgr.np1_cep_id_to_fd[cep_id] = fd; pthread_rwlock_unlock(&fmgr.np1_flows_lock); + pthread_mutex_unlock(&ipcpi.alloc_lock); break; case FLOW_ALLOC_CODE__FLOW_REPLY: diff --git a/src/ipcpd/shim-eth-llc/main.c b/src/ipcpd/shim-eth-llc/main.c index d42e6fff..142ca680 100644 --- a/src/ipcpd/shim-eth-llc/main.c +++ b/src/ipcpd/shim-eth-llc/main.c @@ -347,19 +347,23 @@ static int eth_llc_ipcp_sap_req(uint8_t r_sap, { int fd; - pthread_rwlock_wrlock(ð_llc_data.flows_lock); + pthread_mutex_lock(&ipcpi.alloc_lock); /* reply to IRM, called under lock to prevent race */ fd = ipcp_flow_req_arr(getpid(), dst_name, cube); if (fd < 0) { + pthread_mutex_unlock(&ipcpi.alloc_lock); log_err("Could not get new flow from IRMd."); return -1; } + pthread_rwlock_wrlock(ð_llc_data.flows_lock); + eth_llc_data.fd_to_ef[fd].r_sap = r_sap; memcpy(eth_llc_data.fd_to_ef[fd].r_addr, r_addr, MAC_SIZE); pthread_rwlock_unlock(ð_llc_data.flows_lock); + pthread_mutex_unlock(&ipcpi.alloc_lock); log_dbg("New flow request, fd %d, remote SAP %d.", fd, r_sap); diff --git a/src/ipcpd/shim-udp/main.c b/src/ipcpd/shim-udp/main.c index 71f1270b..ea3d1f88 100644 --- a/src/ipcpd/shim-udp/main.c +++ b/src/ipcpd/shim-udp/main.c @@ -269,9 +269,12 @@ static int ipcp_udp_port_req(struct sockaddr_in * c_saddr, return -1; } + pthread_mutex_lock(&ipcpi.alloc_lock); + /* reply to IRM */ fd = ipcp_flow_req_arr(getpid(), dst_name, cube); if (fd < 0) { + pthread_mutex_unlock(&ipcpi.alloc_lock); log_err("Could not get new flow from IRMd."); close(skfd); return -1; @@ -286,6 +289,7 @@ static int ipcp_udp_port_req(struct sockaddr_in * c_saddr, pthread_rwlock_unlock(&udp_data.flows_lock); pthread_rwlock_unlock(&ipcpi.state_lock); + pthread_mutex_unlock(&ipcpi.alloc_lock); log_dbg("Pending allocation request, fd %d, UDP port (%d, %d).", fd, ntohs(f_saddr.sin_port), ntohs(c_saddr->sin_port)); |