lpm: report error when defer queue overflows
Coverity complains about unchecked return value of rte_rcu_qsbr_dq_enqueue.
By default, defer queue size is big enough to hold all tbl8 groups. When
enqueue fails, return error to the user to indicate system issue.
Coverity issue: 360832
Fixes: 8a9f8564e9
("lpm: implement RCU rule reclamation")
Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
Acked-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
This commit is contained in:
parent
e1187407b4
commit
4cdd49f9b0
|
@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm)
|
||||||
return group_idx;
|
return group_idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static int32_t
|
||||||
tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
|
tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
|
||||||
{
|
{
|
||||||
struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
|
struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
|
||||||
struct __rte_lpm *internal_lpm;
|
struct __rte_lpm *internal_lpm;
|
||||||
|
int status;
|
||||||
|
|
||||||
internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
|
internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
|
||||||
if (internal_lpm->v == NULL) {
|
if (internal_lpm->v == NULL) {
|
||||||
|
@ -552,9 +553,15 @@ tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
|
||||||
__ATOMIC_RELAXED);
|
__ATOMIC_RELAXED);
|
||||||
} else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
|
} else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
|
||||||
/* Push into QSBR defer queue. */
|
/* Push into QSBR defer queue. */
|
||||||
rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
|
status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
|
||||||
(void *)&tbl8_group_start);
|
(void *)&tbl8_group_start);
|
||||||
|
if (status == 1) {
|
||||||
|
RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
|
||||||
|
return -rte_errno;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __rte_noinline int32_t
|
static __rte_noinline int32_t
|
||||||
|
@ -1040,7 +1047,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
|
||||||
#define group_idx next_hop
|
#define group_idx next_hop
|
||||||
uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
|
uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
|
||||||
tbl8_range, i;
|
tbl8_range, i;
|
||||||
int32_t tbl8_recycle_index;
|
int32_t tbl8_recycle_index, status = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Calculate the index into tbl24 and range. Note: All depths larger
|
* Calculate the index into tbl24 and range. Note: All depths larger
|
||||||
|
@ -1097,7 +1104,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
|
||||||
*/
|
*/
|
||||||
lpm->tbl24[tbl24_index].valid = 0;
|
lpm->tbl24[tbl24_index].valid = 0;
|
||||||
__atomic_thread_fence(__ATOMIC_RELEASE);
|
__atomic_thread_fence(__ATOMIC_RELEASE);
|
||||||
tbl8_free(lpm, tbl8_group_start);
|
status = tbl8_free(lpm, tbl8_group_start);
|
||||||
} else if (tbl8_recycle_index > -1) {
|
} else if (tbl8_recycle_index > -1) {
|
||||||
/* Update tbl24 entry. */
|
/* Update tbl24 entry. */
|
||||||
struct rte_lpm_tbl_entry new_tbl24_entry = {
|
struct rte_lpm_tbl_entry new_tbl24_entry = {
|
||||||
|
@ -1113,10 +1120,10 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
|
||||||
__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
|
__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
|
||||||
__ATOMIC_RELAXED);
|
__ATOMIC_RELAXED);
|
||||||
__atomic_thread_fence(__ATOMIC_RELEASE);
|
__atomic_thread_fence(__ATOMIC_RELEASE);
|
||||||
tbl8_free(lpm, tbl8_group_start);
|
status = tbl8_free(lpm, tbl8_group_start);
|
||||||
}
|
}
|
||||||
#undef group_idx
|
#undef group_idx
|
||||||
return 0;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in a new issue