lpm: implement RCU rule reclamation

Currently, the tbl8 group is freed even though the readers might be
using the tbl8 group entries. The freed tbl8 group can be reallocated
quickly. This results in incorrect lookup results.

RCU QSBR process is integrated for safe tbl8 group reclaim.
Refer to RCU documentation to understand various aspects of
integrating RCU library into other libraries.

To avoid ABI breakage, a struct __rte_lpm is created for lpm library
internal use. This struct wraps rte_lpm that has been exposed and
also includes members that don't need to be exposed such as RCU related
config.

Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
Acked-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
This commit is contained in:
Ruifeng Wang 2020-07-10 10:22:25 +08:00 committed by David Marchand
parent e0a439466b
commit 8a9f8564e9
6 changed files with 239 additions and 24 deletions

View file

@ -145,6 +145,38 @@ depending on whether we need to move to the next table or not.
Prefix expansion is one of the keys of this algorithm,
since it improves the speed dramatically by adding redundancy.
Deletion
~~~~~~~~
When deleting a rule, a replacement rule is searched for. Replacement rule is an existing rule that has
the longest prefix match with the rule to be deleted, but has shorter prefix.
If a replacement rule is found, target tbl24 and tbl8 entries are updated to have the same depth and next hop
value with the replacement rule.
If no replacement rule can be found, target tbl24 and tbl8 entries will be cleared.
Prefix expansion is performed if the rule's depth is not exactly 24 bits or 32 bits.
After deleting a rule, a group of tbl8s that belongs to the same tbl24 entry are freed in following cases:
* All tbl8s in the group are empty .
* All tbl8s in the group have the same values and with depth no greater than 24.
Free of tbl8s have different behaviors:
* If RCU is not used, tbl8s are cleared and reclaimed immediately.
* If RCU is used, tbl8s are reclaimed when readers are in quiescent state.
When the LPM is not using RCU, tbl8 group can be freed immediately even though the readers might be using
the tbl8 group entries. This might result in incorrect lookup results.
RCU QSBR process is integrated for safe tbl8 group reclamation. Application has certain responsibilities
while using this feature. Please refer to resource reclamation framework of :ref:`RCU library <RCU_Library>`
for more details.
Lookup
~~~~~~

View file

@ -8,7 +8,7 @@ LIB = librte_lpm.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
LDLIBS += -lrte_eal -lrte_hash
LDLIBS += -lrte_eal -lrte_hash -lrte_rcu
EXPORT_MAP := rte_lpm_version.map

View file

@ -7,3 +7,4 @@ headers = files('rte_lpm.h', 'rte_lpm6.h')
# without worrying about which architecture we actually need
headers += files('rte_lpm_altivec.h', 'rte_lpm_neon.h', 'rte_lpm_sse.h')
deps += ['hash']
deps += ['rcu']

View file

@ -1,5 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2014 Intel Corporation
* Copyright(c) 2020 Arm Limited
*/
#include <string.h>
@ -39,6 +40,17 @@ enum valid_flag {
VALID
};
/** @internal LPM structure. */
struct __rte_lpm {
/* LPM metadata. */
struct rte_lpm lpm;
/* RCU config. */
struct rte_rcu_qsbr *v; /* RCU QSBR variable. */
enum rte_lpm_qsbr_mode rcu_mode;/* Blocking, defer queue. */
struct rte_rcu_qsbr_dq *dq; /* RCU QSBR defer queue. */
};
/* Macro to enable/disable run-time checks. */
#if defined(RTE_LIBRTE_LPM_DEBUG)
#include <rte_debug.h>
@ -122,6 +134,7 @@ rte_lpm_create(const char *name, int socket_id,
const struct rte_lpm_config *config)
{
char mem_name[RTE_LPM_NAMESIZE];
struct __rte_lpm *internal_lpm;
struct rte_lpm *lpm = NULL;
struct rte_tailq_entry *te;
uint32_t mem_size, rules_size, tbl8s_size;
@ -140,12 +153,6 @@ rte_lpm_create(const char *name, int socket_id,
snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
/* Determine the amount of memory to allocate. */
mem_size = sizeof(*lpm);
rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
rte_mcfg_tailq_write_lock();
/* guarantee there's no existing */
@ -161,6 +168,12 @@ rte_lpm_create(const char *name, int socket_id,
goto exit;
}
/* Determine the amount of memory to allocate. */
mem_size = sizeof(*internal_lpm);
rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
tbl8s_size = sizeof(struct rte_lpm_tbl_entry) *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s;
/* allocate tailq entry */
te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
if (te == NULL) {
@ -170,21 +183,23 @@ rte_lpm_create(const char *name, int socket_id,
}
/* Allocate memory to store the LPM data structures. */
lpm = rte_zmalloc_socket(mem_name, mem_size,
internal_lpm = rte_zmalloc_socket(mem_name, mem_size,
RTE_CACHE_LINE_SIZE, socket_id);
if (lpm == NULL) {
if (internal_lpm == NULL) {
RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
rte_free(te);
rte_errno = ENOMEM;
goto exit;
}
lpm = &internal_lpm->lpm;
lpm->rules_tbl = rte_zmalloc_socket(NULL,
(size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
if (lpm->rules_tbl == NULL) {
RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
rte_free(lpm);
rte_free(internal_lpm);
internal_lpm = NULL;
lpm = NULL;
rte_free(te);
rte_errno = ENOMEM;
@ -197,7 +212,8 @@ rte_lpm_create(const char *name, int socket_id,
if (lpm->tbl8 == NULL) {
RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
rte_free(lpm->rules_tbl);
rte_free(lpm);
rte_free(internal_lpm);
internal_lpm = NULL;
lpm = NULL;
rte_free(te);
rte_errno = ENOMEM;
@ -225,6 +241,7 @@ rte_lpm_create(const char *name, int socket_id,
void
rte_lpm_free(struct rte_lpm *lpm)
{
struct __rte_lpm *internal_lpm;
struct rte_lpm_list *lpm_list;
struct rte_tailq_entry *te;
@ -246,12 +263,84 @@ rte_lpm_free(struct rte_lpm *lpm)
rte_mcfg_tailq_write_unlock();
internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
if (internal_lpm->dq != NULL)
rte_rcu_qsbr_dq_delete(internal_lpm->dq);
rte_free(lpm->tbl8);
rte_free(lpm->rules_tbl);
rte_free(lpm);
rte_free(te);
}
static void
__lpm_rcu_qsbr_free_resource(void *p, void *data, unsigned int n)
{
struct rte_lpm_tbl_entry *tbl8 = ((struct rte_lpm *)p)->tbl8;
struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
uint32_t tbl8_group_index = *(uint32_t *)data;
RTE_SET_USED(n);
/* Set tbl8 group invalid */
__atomic_store(&tbl8[tbl8_group_index], &zero_tbl8_entry,
__ATOMIC_RELAXED);
}
/* Associate QSBR variable with an LPM object.
*/
int
rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg,
struct rte_rcu_qsbr_dq **dq)
{
struct rte_rcu_qsbr_dq_parameters params = {0};
char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
struct __rte_lpm *internal_lpm;
if (lpm == NULL || cfg == NULL) {
rte_errno = EINVAL;
return 1;
}
internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
if (internal_lpm->v != NULL) {
rte_errno = EEXIST;
return 1;
}
if (cfg->mode == RTE_LPM_QSBR_MODE_SYNC) {
/* No other things to do. */
} else if (cfg->mode == RTE_LPM_QSBR_MODE_DQ) {
/* Init QSBR defer queue. */
snprintf(rcu_dq_name, sizeof(rcu_dq_name),
"LPM_RCU_%s", lpm->name);
params.name = rcu_dq_name;
params.size = cfg->dq_size;
if (params.size == 0)
params.size = lpm->number_tbl8s;
params.trigger_reclaim_limit = cfg->reclaim_thd;
params.max_reclaim_size = cfg->reclaim_max;
if (params.max_reclaim_size == 0)
params.max_reclaim_size = RTE_LPM_RCU_DQ_RECLAIM_MAX;
params.esize = sizeof(uint32_t); /* tbl8 group index */
params.free_fn = __lpm_rcu_qsbr_free_resource;
params.p = lpm;
params.v = cfg->v;
internal_lpm->dq = rte_rcu_qsbr_dq_create(&params);
if (internal_lpm->dq == NULL) {
RTE_LOG(ERR, LPM, "LPM defer queue creation failed\n");
return 1;
}
if (dq != NULL)
*dq = internal_lpm->dq;
} else {
rte_errno = EINVAL;
return 1;
}
internal_lpm->rcu_mode = cfg->mode;
internal_lpm->v = cfg->v;
return 0;
}
/*
* Adds a rule to the rule table.
*
@ -394,14 +483,15 @@ rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
* Find, clean and allocate a tbl8.
*/
static int32_t
tbl8_alloc(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
_tbl8_alloc(struct rte_lpm *lpm)
{
uint32_t group_idx; /* tbl8 group index. */
struct rte_lpm_tbl_entry *tbl8_entry;
/* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
for (group_idx = 0; group_idx < lpm->number_tbl8s; group_idx++) {
tbl8_entry = &lpm->tbl8[group_idx *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
/* If a free tbl8 group is found clean it and set as VALID. */
if (!tbl8_entry->valid_group) {
struct rte_lpm_tbl_entry new_tbl8_entry = {
@ -427,14 +517,47 @@ tbl8_alloc(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
return -ENOSPC;
}
static void
tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
static int32_t
tbl8_alloc(struct rte_lpm *lpm)
{
/* Set tbl8 group invalid*/
struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
int32_t group_idx; /* tbl8 group index. */
struct __rte_lpm *internal_lpm;
__atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
__ATOMIC_RELAXED);
internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
group_idx = _tbl8_alloc(lpm);
if (group_idx == -ENOSPC && internal_lpm->dq != NULL) {
/* If there are no tbl8 groups try to reclaim one. */
if (rte_rcu_qsbr_dq_reclaim(internal_lpm->dq, 1,
NULL, NULL, NULL) == 0)
group_idx = _tbl8_alloc(lpm);
}
return group_idx;
}
static void
tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
{
struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
struct __rte_lpm *internal_lpm;
internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
if (internal_lpm->v == NULL) {
/* Set tbl8 group invalid*/
__atomic_store(&lpm->tbl8[tbl8_group_start], &zero_tbl8_entry,
__ATOMIC_RELAXED);
} else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) {
/* Wait for quiescent state change. */
rte_rcu_qsbr_synchronize(internal_lpm->v,
RTE_QSBR_THRID_INVALID);
/* Set tbl8 group invalid*/
__atomic_store(&lpm->tbl8[tbl8_group_start], &zero_tbl8_entry,
__ATOMIC_RELAXED);
} else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
/* Push into QSBR defer queue. */
rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
(void *)&tbl8_group_start);
}
}
static __rte_noinline int32_t
@ -523,7 +646,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
if (!lpm->tbl24[tbl24_index].valid) {
/* Search for a free tbl8 group. */
tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
tbl8_group_index = tbl8_alloc(lpm);
/* Check tbl8 allocation was successful. */
if (tbl8_group_index < 0) {
@ -569,7 +692,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
} /* If valid entry but not extended calculate the index into Table8. */
else if (lpm->tbl24[tbl24_index].valid_group == 0) {
/* Search for free tbl8 group. */
tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
tbl8_group_index = tbl8_alloc(lpm);
if (tbl8_group_index < 0) {
return tbl8_group_index;
@ -977,7 +1100,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
*/
lpm->tbl24[tbl24_index].valid = 0;
__atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free(lpm->tbl8, tbl8_group_start);
tbl8_free(lpm, tbl8_group_start);
} else if (tbl8_recycle_index > -1) {
/* Update tbl24 entry. */
struct rte_lpm_tbl_entry new_tbl24_entry = {
@ -993,7 +1116,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
__ATOMIC_RELAXED);
__atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free(lpm->tbl8, tbl8_group_start);
tbl8_free(lpm, tbl8_group_start);
}
#undef group_idx
return 0;

View file

@ -1,5 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2014 Intel Corporation
* Copyright(c) 2020 Arm Limited
*/
#ifndef _RTE_LPM_H_
@ -20,6 +21,7 @@
#include <rte_memory.h>
#include <rte_common.h>
#include <rte_vect.h>
#include <rte_rcu_qsbr.h>
#ifdef __cplusplus
extern "C" {
@ -62,6 +64,17 @@ extern "C" {
/** Bitmask used to indicate successful lookup */
#define RTE_LPM_LOOKUP_SUCCESS 0x01000000
/** @internal Default RCU defer queue entries to reclaim in one go. */
#define RTE_LPM_RCU_DQ_RECLAIM_MAX 16
/** RCU reclamation modes */
enum rte_lpm_qsbr_mode {
/** Create defer queue for reclaim. */
RTE_LPM_QSBR_MODE_DQ = 0,
/** Use blocking mode reclaim. No defer queue created. */
RTE_LPM_QSBR_MODE_SYNC
};
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
/** @internal Tbl24 entry structure. */
__extension__
@ -132,6 +145,22 @@ struct rte_lpm {
struct rte_lpm_rule *rules_tbl; /**< LPM rules. */
};
/** LPM RCU QSBR configuration structure. */
struct rte_lpm_rcu_config {
struct rte_rcu_qsbr *v; /* RCU QSBR variable. */
/* Mode of RCU QSBR. RTE_LPM_QSBR_MODE_xxx
* '0' for default: create defer queue for reclaim.
*/
enum rte_lpm_qsbr_mode mode;
uint32_t dq_size; /* RCU defer queue size.
* default: lpm->number_tbl8s.
*/
uint32_t reclaim_thd; /* Threshold to trigger auto reclaim. */
uint32_t reclaim_max; /* Max entries to reclaim in one go.
* default: RTE_LPM_RCU_DQ_RECLAIM_MAX.
*/
};
/**
* Create an LPM object.
*
@ -179,6 +208,30 @@ rte_lpm_find_existing(const char *name);
void
rte_lpm_free(struct rte_lpm *lpm);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
* Associate RCU QSBR variable with an LPM object.
*
* @param lpm
* the lpm object to add RCU QSBR
* @param cfg
* RCU QSBR configuration
* @param dq
* handler of created RCU QSBR defer queue
* @return
* On success - 0
* On error - 1 with error code set in rte_errno.
* Possible rte_errno codes are:
* - EINVAL - invalid pointer
* - EEXIST - already added QSBR
* - ENOMEM - memory allocation failure
*/
__rte_experimental
int rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg,
struct rte_rcu_qsbr_dq **dq);
/**
* Add a rule to the LPM table.
*

View file

@ -21,3 +21,9 @@ DPDK_20.0 {
local: *;
};
EXPERIMENTAL {
global:
rte_lpm_rcu_qsbr_add;
};