diff options
author | Dimitri Staessens <[email protected]> | 2023-08-28 12:29:00 +0200 |
---|---|---|
committer | Sander Vrijders <[email protected]> | 2023-08-30 10:33:20 +0200 |
commit | 08332eefba9aa4b08d00e190720de4771081e855 (patch) | |
tree | 9e6b4dbf920b3813c696f79c6f2b926f976402b8 /src/ipcpd/eth/eth.c | |
parent | 870e3fdfaee4991592cc29e90767abd0e9fba43b (diff) | |
download | ouroboros-08332eefba9aa4b08d00e190720de4771081e855.tar.gz ouroboros-08332eefba9aa4b08d00e190720de4771081e855.zip |
ipcpd: Move alloc race mitigation to common source
All flow allocator code was duplicating the mitigation for a race
where the IRMd response for the flow allocation with a new flow fd was
arriving before the response to the flow_req_arr. This is now moved to
the ipcp common source.
Signed-off-by: Dimitri Staessens <[email protected]>
Signed-off-by: Sander Vrijders <[email protected]>
Diffstat (limited to 'src/ipcpd/eth/eth.c')
-rw-r--r-- | src/ipcpd/eth/eth.c | 54 |
1 files changed, 3 insertions, 51 deletions
diff --git a/src/ipcpd/eth/eth.c b/src/ipcpd/eth/eth.c index 12bd294e..ef64c07e 100644 --- a/src/ipcpd/eth/eth.c +++ b/src/ipcpd/eth/eth.c @@ -136,7 +136,6 @@ #define ETH_FRAME_SIZE (ETH_HEADER_SIZE + ETH_MTU_MAX) #endif -#define ALLOC_TIMEO 10 /* ms */ #define NAME_QUERY_TIMEO 2000 /* ms */ #define MGMT_TIMEO 100 /* ms */ #define MGMT_FRAME_SIZE 2048 @@ -575,32 +574,10 @@ static int eth_ipcp_req(uint8_t * r_addr, const void * data, size_t len) { - struct timespec ts = {0, ALLOC_TIMEO * MILLION}; - struct timespec abstime; - int fd; - time_t mpl = IPCP_ETH_MPL; + int fd; - clock_gettime(PTHREAD_COND_CLOCK, &abstime); - - pthread_mutex_lock(&ipcpi.alloc_lock); - - while (ipcpi.alloc_id != -1 && ipcp_get_state() == IPCP_OPERATIONAL) { - ts_add(&abstime, &ts, &abstime); - pthread_cond_timedwait(&ipcpi.alloc_cond, - &ipcpi.alloc_lock, - &abstime); - } - - if (ipcp_get_state() != IPCP_OPERATIONAL) { - log_dbg("Won't allocate over non-operational IPCP."); - pthread_mutex_unlock(&ipcpi.alloc_lock); - return -1; - } - - /* reply to IRM, called under lock to prevent race */ - fd = ipcp_flow_req_arr(dst, ipcp_dir_hash_len(), qs, mpl, data, len); + fd = ipcp_wait_flow_req_arr(dst, qs, IPCP_ETH_MPL, data, len); if (fd < 0) { - pthread_mutex_unlock(&ipcpi.alloc_lock); log_err("Could not get new flow from IRMd."); return -1; } @@ -615,11 +592,6 @@ static int eth_ipcp_req(uint8_t * r_addr, pthread_rwlock_unlock(ð_data.flows_lock); - ipcpi.alloc_id = fd; - pthread_cond_broadcast(&ipcpi.alloc_cond); - - pthread_mutex_unlock(&ipcpi.alloc_lock); - #if defined(BUILD_ETH_DIX) log_dbg("New flow request, fd %d, remote endpoint %d.", fd, r_eid); #elif defined(BUILD_ETH_LLC) @@ -1704,8 +1676,6 @@ static int eth_ipcp_flow_alloc_resp(int fd, const void * data, size_t len) { - struct timespec ts = {0, ALLOC_TIMEO * MILLION}; - struct timespec abstime; #if defined(BUILD_ETH_DIX) uint16_t r_eid; #elif defined(BUILD_ETH_LLC) @@ -1714,26 +1684,8 @@ static int eth_ipcp_flow_alloc_resp(int fd, #endif uint8_t r_addr[MAC_SIZE]; - clock_gettime(PTHREAD_COND_CLOCK, &abstime); - - pthread_mutex_lock(&ipcpi.alloc_lock); - - while (ipcpi.alloc_id != fd && ipcp_get_state() == IPCP_OPERATIONAL) { - ts_add(&abstime, &ts, &abstime); - pthread_cond_timedwait(&ipcpi.alloc_cond, - &ipcpi.alloc_lock, - &abstime); - } - - if (ipcp_get_state() != IPCP_OPERATIONAL) { - pthread_mutex_unlock(&ipcpi.alloc_lock); + if (ipcp_wait_flow_resp(fd) < 0) return -1; - } - - ipcpi.alloc_id = -1; - pthread_cond_broadcast(&ipcpi.alloc_cond); - - pthread_mutex_unlock(&ipcpi.alloc_lock); pthread_rwlock_wrlock(ð_data.flows_lock); #if defined(BUILD_ETH_DIX) |