examples/qos_sched: add subport config flexibility

Modify qos sample app to allow different subports of the same port
to have different configuration in terms of number of pipes, pipe
queue sizes, etc.

Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Signed-off-by: Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
This commit is contained in:
Jasvinder Singh 2019-10-25 11:51:23 +01:00 committed by Thomas Monjalon
parent 29169a4184
commit b0c1628b15
7 changed files with 203 additions and 155 deletions

View file

@ -35,15 +35,25 @@ get_pkt_sched(struct rte_mbuf *m, uint32_t *subport, uint32_t *pipe,
uint16_t *pdata = rte_pktmbuf_mtod(m, uint16_t *);
uint16_t pipe_queue;
/* Outer VLAN ID*/
*subport = (rte_be_to_cpu_16(pdata[SUBPORT_OFFSET]) & 0x0FFF) &
(port_params.n_subports_per_port - 1); /* Outer VLAN ID*/
(port_params.n_subports_per_port - 1);
/* Inner VLAN ID */
*pipe = (rte_be_to_cpu_16(pdata[PIPE_OFFSET]) & 0x0FFF) &
(port_params.n_pipes_per_subport - 1); /* Inner VLAN ID */
(subport_params[*subport].n_pipes_per_subport_enabled - 1);
pipe_queue = active_queues[(pdata[QUEUE_OFFSET] >> 8) % n_active_queues];
/* Traffic class (Destination IP) */
*traffic_class = pipe_queue > RTE_SCHED_TRAFFIC_CLASS_BE ?
RTE_SCHED_TRAFFIC_CLASS_BE : pipe_queue; /* Destination IP */
*queue = pipe_queue - *traffic_class; /* Destination IP */
*color = pdata[COLOR_OFFSET] & 0x03; /* Destination IP */
RTE_SCHED_TRAFFIC_CLASS_BE : pipe_queue;
/* Traffic class queue (Destination IP) */
*queue = pipe_queue - *traffic_class;
/* Color (Destination IP) */
*color = pdata[COLOR_OFFSET] & 0x03;
return 0;
}

View file

@ -24,14 +24,10 @@ int
cfg_load_port(struct rte_cfgfile *cfg, struct rte_sched_port_params *port_params)
{
const char *entry;
int j;
if (!cfg || !port_params)
return -1;
memset(active_queues, 0, sizeof(active_queues));
n_active_queues = 0;
entry = rte_cfgfile_get_entry(cfg, "port", "frame overhead");
if (entry)
port_params->frame_overhead = (uint32_t)atoi(entry);
@ -40,106 +36,6 @@ cfg_load_port(struct rte_cfgfile *cfg, struct rte_sched_port_params *port_params
if (entry)
port_params->n_subports_per_port = (uint32_t)atoi(entry);
entry = rte_cfgfile_get_entry(cfg, "port", "number of pipes per subport");
if (entry)
port_params->n_pipes_per_subport = (uint32_t)atoi(entry);
entry = rte_cfgfile_get_entry(cfg, "port", "queue sizes");
if (entry) {
char *next;
for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
port_params->qsize[j] = (uint16_t)strtol(entry, &next, 10);
if (next == NULL)
break;
entry = next;
}
for (j = 0; j < RTE_SCHED_TRAFFIC_CLASS_BE; j++)
if (port_params->qsize[j]) {
active_queues[n_active_queues] = j;
n_active_queues++;
}
if (port_params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE])
for (j = 0; j < RTE_SCHED_BE_QUEUES_PER_PIPE; j++) {
active_queues[n_active_queues] =
RTE_SCHED_TRAFFIC_CLASS_BE + j;
n_active_queues++;
}
}
#ifdef RTE_SCHED_RED
for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
char str[32];
/* Parse WRED min thresholds */
snprintf(str, sizeof(str), "tc %d wred min", j);
entry = rte_cfgfile_get_entry(cfg, "red", str);
if (entry) {
char *next;
int k;
/* for each packet colour (green, yellow, red) */
for (k = 0; k < RTE_COLORS; k++) {
port_params->red_params[j][k].min_th
= (uint16_t)strtol(entry, &next, 10);
if (next == NULL)
break;
entry = next;
}
}
/* Parse WRED max thresholds */
snprintf(str, sizeof(str), "tc %d wred max", j);
entry = rte_cfgfile_get_entry(cfg, "red", str);
if (entry) {
char *next;
int k;
/* for each packet colour (green, yellow, red) */
for (k = 0; k < RTE_COLORS; k++) {
port_params->red_params[j][k].max_th
= (uint16_t)strtol(entry, &next, 10);
if (next == NULL)
break;
entry = next;
}
}
/* Parse WRED inverse mark probabilities */
snprintf(str, sizeof(str), "tc %d wred inv prob", j);
entry = rte_cfgfile_get_entry(cfg, "red", str);
if (entry) {
char *next;
int k;
/* for each packet colour (green, yellow, red) */
for (k = 0; k < RTE_COLORS; k++) {
port_params->red_params[j][k].maxp_inv
= (uint8_t)strtol(entry, &next, 10);
if (next == NULL)
break;
entry = next;
}
}
/* Parse WRED EWMA filter weights */
snprintf(str, sizeof(str), "tc %d wred weight", j);
entry = rte_cfgfile_get_entry(cfg, "red", str);
if (entry) {
char *next;
int k;
/* for each packet colour (green, yellow, red) */
for (k = 0; k < RTE_COLORS; k++) {
port_params->red_params[j][k].wq_log2
= (uint8_t)strtol(entry, &next, 10);
if (next == NULL)
break;
entry = next;
}
}
}
#endif /* RTE_SCHED_RED */
return 0;
}
@ -155,7 +51,7 @@ cfg_load_pipe(struct rte_cfgfile *cfg, struct rte_sched_pipe_params *pipe_params
return -1;
profiles = rte_cfgfile_num_sections(cfg, "pipe profile", sizeof("pipe profile") - 1);
port_params.n_pipe_profiles = profiles;
subport_params[0].n_pipe_profiles = profiles;
for (j = 0; j < profiles; j++) {
char pipe_name[32];
@ -253,12 +149,121 @@ cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subpo
return -1;
memset(app_pipe_to_profile, -1, sizeof(app_pipe_to_profile));
memset(active_queues, 0, sizeof(active_queues));
n_active_queues = 0;
#ifdef RTE_SCHED_RED
char sec_name[CFG_NAME_LEN];
struct rte_red_params red_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
snprintf(sec_name, sizeof(sec_name), "red");
if (rte_cfgfile_has_section(cfg, sec_name)) {
for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
char str[32];
/* Parse WRED min thresholds */
snprintf(str, sizeof(str), "tc %d wred min", i);
entry = rte_cfgfile_get_entry(cfg, sec_name, str);
if (entry) {
char *next;
/* for each packet colour (green, yellow, red) */
for (j = 0; j < RTE_COLORS; j++) {
red_params[i][j].min_th
= (uint16_t)strtol(entry, &next, 10);
if (next == NULL)
break;
entry = next;
}
}
/* Parse WRED max thresholds */
snprintf(str, sizeof(str), "tc %d wred max", i);
entry = rte_cfgfile_get_entry(cfg, "red", str);
if (entry) {
char *next;
/* for each packet colour (green, yellow, red) */
for (j = 0; j < RTE_COLORS; j++) {
red_params[i][j].max_th
= (uint16_t)strtol(entry, &next, 10);
if (next == NULL)
break;
entry = next;
}
}
/* Parse WRED inverse mark probabilities */
snprintf(str, sizeof(str), "tc %d wred inv prob", i);
entry = rte_cfgfile_get_entry(cfg, "red", str);
if (entry) {
char *next;
/* for each packet colour (green, yellow, red) */
for (j = 0; j < RTE_COLORS; j++) {
red_params[i][j].maxp_inv
= (uint8_t)strtol(entry, &next, 10);
if (next == NULL)
break;
entry = next;
}
}
/* Parse WRED EWMA filter weights */
snprintf(str, sizeof(str), "tc %d wred weight", i);
entry = rte_cfgfile_get_entry(cfg, "red", str);
if (entry) {
char *next;
/* for each packet colour (green, yellow, red) */
for (j = 0; j < RTE_COLORS; j++) {
red_params[i][j].wq_log2
= (uint8_t)strtol(entry, &next, 10);
if (next == NULL)
break;
entry = next;
}
}
}
}
#endif /* RTE_SCHED_RED */
for (i = 0; i < MAX_SCHED_SUBPORTS; i++) {
char sec_name[CFG_NAME_LEN];
snprintf(sec_name, sizeof(sec_name), "subport %d", i);
if (rte_cfgfile_has_section(cfg, sec_name)) {
entry = rte_cfgfile_get_entry(cfg, sec_name,
"number of pipes per subport");
if (entry)
subport_params[i].n_pipes_per_subport_enabled =
(uint32_t)atoi(entry);
entry = rte_cfgfile_get_entry(cfg, sec_name, "queue sizes");
if (entry) {
char *next;
for (j = 0; j < RTE_SCHED_TRAFFIC_CLASS_BE; j++) {
subport_params[i].qsize[j] =
(uint16_t)strtol(entry, &next, 10);
if (subport_params[i].qsize[j] != 0) {
active_queues[n_active_queues] = j;
n_active_queues++;
}
if (next == NULL)
break;
entry = next;
}
subport_params[i].qsize[RTE_SCHED_TRAFFIC_CLASS_BE] =
(uint16_t)strtol(entry, &next, 10);
for (j = 0; j < RTE_SCHED_BE_QUEUES_PER_PIPE; j++) {
active_queues[n_active_queues] =
RTE_SCHED_TRAFFIC_CLASS_BE + j;
n_active_queues++;
}
}
entry = rte_cfgfile_get_entry(cfg, sec_name, "tb rate");
if (entry)
subport_params[i].tb_rate = (uint32_t)atoi(entry);
@ -362,6 +367,20 @@ cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subpo
}
}
}
#ifdef RTE_SCHED_RED
for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
for (k = 0; k < RTE_COLORS; k++) {
subport_params[i].red_params[j][k].min_th =
red_params[j][k].min_th;
subport_params[i].red_params[j][k].max_th =
red_params[j][k].max_th;
subport_params[i].red_params[j][k].maxp_inv =
red_params[j][k].maxp_inv;
subport_params[i].red_params[j][k].wq_log2 =
red_params[j][k].wq_log2;
}
}
#endif
}
}

View file

@ -180,18 +180,6 @@ app_init_port(uint16_t portid, struct rte_mempool *mp)
return 0;
}
static struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
{
.tb_rate = 1250000000,
.tb_size = 1000000,
.tc_rate = {1250000000, 1250000000, 1250000000, 1250000000,
1250000000, 1250000000, 1250000000, 1250000000, 1250000000,
1250000000, 1250000000, 1250000000, 1250000000},
.tc_period = 10,
},
};
static struct rte_sched_pipe_params pipe_profiles[MAX_SCHED_PIPE_PROFILES] = {
{ /* Profile #0 */
.tb_rate = 305175,
@ -208,19 +196,21 @@ static struct rte_sched_pipe_params pipe_profiles[MAX_SCHED_PIPE_PROFILES] = {
},
};
struct rte_sched_port_params port_params = {
.name = "port_scheduler_0",
.socket = 0, /* computed */
.rate = 0, /* computed */
.mtu = 6 + 6 + 4 + 4 + 2 + 1500,
.frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
.n_subports_per_port = 1,
.n_pipes_per_subport = 4096,
.qsize = {64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64},
.pipe_profiles = pipe_profiles,
.n_pipe_profiles = sizeof(pipe_profiles) / sizeof(struct rte_sched_pipe_params),
.n_max_pipe_profiles = MAX_SCHED_PIPE_PROFILES,
struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
{
.tb_rate = 1250000000,
.tb_size = 1000000,
.tc_rate = {1250000000, 1250000000, 1250000000, 1250000000,
1250000000, 1250000000, 1250000000, 1250000000, 1250000000,
1250000000, 1250000000, 1250000000, 1250000000},
.tc_period = 10,
.n_pipes_per_subport_enabled = 4096,
.qsize = {64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64},
.pipe_profiles = pipe_profiles,
.n_pipe_profiles = sizeof(pipe_profiles) /
sizeof(struct rte_sched_pipe_params),
.n_max_pipe_profiles = MAX_SCHED_PIPE_PROFILES,
#ifdef RTE_SCHED_RED
.red_params = {
/* Traffic Class 0 Colors Green / Yellow / Red */
@ -289,6 +279,17 @@ struct rte_sched_port_params port_params = {
[12][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
},
#endif /* RTE_SCHED_RED */
},
};
struct rte_sched_port_params port_params = {
.name = "port_scheduler_0",
.socket = 0, /* computed */
.rate = 0, /* computed */
.mtu = 6 + 6 + 4 + 4 + 2 + 1500,
.frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
.n_subports_per_port = 1,
.n_pipes_per_subport = MAX_SCHED_PIPES,
};
static struct rte_sched_port *
@ -323,7 +324,10 @@ app_init_sched_port(uint32_t portid, uint32_t socketid)
subport, err);
}
for (pipe = 0; pipe < port_params.n_pipes_per_subport; pipe++) {
uint32_t n_pipes_per_subport =
subport_params[subport].n_pipes_per_subport_enabled;
for (pipe = 0; pipe < n_pipes_per_subport; pipe++) {
if (app_pipe_to_profile[subport][pipe] != -1) {
err = rte_sched_pipe_config(port, subport, pipe,
app_pipe_to_profile[subport][pipe]);

View file

@ -152,6 +152,7 @@ uint32_t active_queues[RTE_SCHED_QUEUES_PER_PIPE];
uint32_t n_active_queues;
extern struct rte_sched_port_params port_params;
extern struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS];
int app_parse_args(int argc, char **argv);
int app_init(void);

View file

@ -20,11 +20,12 @@
[port]
frame overhead = 24
number of subports per port = 1
number of pipes per subport = 4096
queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64
; Subport configuration
[subport 0]
number of pipes per subport = 4096
queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64
tb rate = 1250000000 ; Bytes per second
tb size = 1000000 ; Bytes

View file

@ -5,11 +5,12 @@
[port]
frame overhead = 24
number of subports per port = 1
number of pipes per subport = 32
queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64
; Subport configuration
[subport 0]
number of pipes per subport = 32
queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64
tb rate = 8400000 ; Bytes per second
tb size = 100000 ; Bytes

View file

@ -24,7 +24,7 @@ qavg_q(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc,
if (i == nb_pfc ||
subport_id >= port_params.n_subports_per_port ||
pipe_id >= port_params.n_pipes_per_subport ||
pipe_id >= subport_params[subport_id].n_pipes_per_subport_enabled ||
tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE ||
q >= RTE_SCHED_BE_QUEUES_PER_PIPE ||
(tc < RTE_SCHED_TRAFFIC_CLASS_BE && q > 0))
@ -32,7 +32,7 @@ qavg_q(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc,
port = qos_conf[i].sched_port;
for (i = 0; i < subport_id; i++)
queue_id += port_params.n_pipes_per_subport *
queue_id += subport_params[i].n_pipes_per_subport_enabled *
RTE_SCHED_QUEUES_PER_PIPE;
if (tc < RTE_SCHED_TRAFFIC_CLASS_BE)
queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE + tc;
@ -69,14 +69,16 @@ qavg_tcpipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id,
}
if (i == nb_pfc || subport_id >= port_params.n_subports_per_port ||
pipe_id >= port_params.n_pipes_per_subport ||
pipe_id >= subport_params[subport_id].n_pipes_per_subport_enabled ||
tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
return -1;
port = qos_conf[i].sched_port;
for (i = 0; i < subport_id; i++)
queue_id += port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_PIPE;
queue_id +=
subport_params[i].n_pipes_per_subport_enabled *
RTE_SCHED_QUEUES_PER_PIPE;
queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE + tc;
@ -123,13 +125,13 @@ qavg_pipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id)
if (i == nb_pfc ||
subport_id >= port_params.n_subports_per_port ||
pipe_id >= port_params.n_pipes_per_subport)
pipe_id >= subport_params[subport_id].n_pipes_per_subport_enabled)
return -1;
port = qos_conf[i].sched_port;
for (i = 0; i < subport_id; i++)
queue_id += port_params.n_pipes_per_subport *
queue_id += subport_params[i].n_pipes_per_subport_enabled *
RTE_SCHED_QUEUES_PER_PIPE;
queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE;
@ -177,13 +179,17 @@ qavg_tcsubport(uint16_t port_id, uint32_t subport_id, uint8_t tc)
for (i = 0; i < subport_id; i++)
subport_queue_id +=
port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_PIPE;
subport_params[i].n_pipes_per_subport_enabled *
RTE_SCHED_QUEUES_PER_PIPE;
average = 0;
for (count = 0; count < qavg_ntimes; count++) {
uint32_t n_pipes_per_subport =
subport_params[subport_id].n_pipes_per_subport_enabled;
part_average = 0;
for (i = 0; i < port_params.n_pipes_per_subport; i++) {
for (i = 0; i < n_pipes_per_subport; i++) {
if (tc < RTE_SCHED_TRAFFIC_CLASS_BE) {
queue_id = subport_queue_id +
i * RTE_SCHED_QUEUES_PER_PIPE + tc;
@ -203,10 +209,11 @@ qavg_tcsubport(uint16_t port_id, uint32_t subport_id, uint8_t tc)
}
if (tc < RTE_SCHED_TRAFFIC_CLASS_BE)
average += part_average / (port_params.n_pipes_per_subport);
average += part_average /
(subport_params[subport_id].n_pipes_per_subport_enabled);
else
average +=
part_average / (port_params.n_pipes_per_subport) *
average += part_average /
(subport_params[subport_id].n_pipes_per_subport_enabled) *
RTE_SCHED_BE_QUEUES_PER_PIPE;
usleep(qavg_period);
@ -240,14 +247,17 @@ qavg_subport(uint16_t port_id, uint32_t subport_id)
port = qos_conf[i].sched_port;
for (i = 0; i < subport_id; i++)
subport_queue_id += port_params.n_pipes_per_subport *
subport_queue_id += subport_params[i].n_pipes_per_subport_enabled *
RTE_SCHED_QUEUES_PER_PIPE;
average = 0;
for (count = 0; count < qavg_ntimes; count++) {
uint32_t n_pipes_per_subport =
subport_params[subport_id].n_pipes_per_subport_enabled;
part_average = 0;
for (i = 0; i < port_params.n_pipes_per_subport; i++) {
for (i = 0; i < n_pipes_per_subport; i++) {
queue_id = subport_queue_id + i * RTE_SCHED_QUEUES_PER_PIPE;
for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j++) {
@ -258,7 +268,8 @@ qavg_subport(uint16_t port_id, uint32_t subport_id)
}
average += part_average /
(port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_PIPE);
(subport_params[subport_id].n_pipes_per_subport_enabled *
RTE_SCHED_QUEUES_PER_PIPE);
usleep(qavg_period);
}
@ -322,12 +333,13 @@ pipe_stat(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id)
if (i == nb_pfc ||
subport_id >= port_params.n_subports_per_port ||
pipe_id >= port_params.n_pipes_per_subport)
pipe_id >= subport_params[subport_id].n_pipes_per_subport_enabled)
return -1;
port = qos_conf[i].sched_port;
for (i = 0; i < subport_id; i++)
queue_id += port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_PIPE;
queue_id += subport_params[i].n_pipes_per_subport_enabled *
RTE_SCHED_QUEUES_PER_PIPE;
queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE;