diff --git a/home/nipa/nipa_out/955487/ynl/old-code/ethtool-user.c b/home/nipa/nipa_out/955487/ynl/new-code/ethtool-user.c index 72ccc16eae19..5bfe61087b05 100644 --- a/home/nipa/nipa_out/955487/ynl/old-code/ethtool-user.c +++ b/home/nipa/nipa_out/955487/ynl/new-code/ethtool-user.c @@ -571,6 +571,7 @@ const struct ynl_policy_attr ethtool_rings_policy[ETHTOOL_A_RINGS_MAX + 1] = { [ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX] = { .name = "tx-push-buf-len-max", .type = YNL_PT_U32, }, [ETHTOOL_A_RINGS_HDS_THRESH] = { .name = "hds-thresh", .type = YNL_PT_U32, }, [ETHTOOL_A_RINGS_HDS_THRESH_MAX] = { .name = "hds-thresh-max", .type = YNL_PT_U32, }, + [ETHTOOL_A_RINGS_RX_BUF_LEN_MAX] = { .name = "rx-buf-len-max", .type = YNL_PT_U32, }, }; const struct ynl_policy_nest ethtool_rings_nest = { @@ -3735,6 +3736,11 @@ int ethtool_rings_get_rsp_parse(const struct nlmsghdr *nlh, return YNL_PARSE_CB_ERROR; dst->_present.rx_buf_len = 1; dst->rx_buf_len = ynl_attr_get_u32(attr); + } else if (type == ETHTOOL_A_RINGS_RX_BUF_LEN_MAX) { + if (ynl_attr_validate(yarg, attr)) + return YNL_PARSE_CB_ERROR; + dst->_present.rx_buf_len_max = 1; + dst->rx_buf_len_max = ynl_attr_get_u32(attr); } else if (type == ETHTOOL_A_RINGS_TCP_DATA_SPLIT) { if (ynl_attr_validate(yarg, attr)) return YNL_PARSE_CB_ERROR; @@ -3908,6 +3914,8 @@ int ethtool_rings_set(struct ynl_sock *ys, struct ethtool_rings_set_req *req) ynl_attr_put_u32(nlh, ETHTOOL_A_RINGS_TX, req->tx); if (req->_present.rx_buf_len) ynl_attr_put_u32(nlh, ETHTOOL_A_RINGS_RX_BUF_LEN, req->rx_buf_len); + if (req->_present.rx_buf_len_max) + ynl_attr_put_u32(nlh, ETHTOOL_A_RINGS_RX_BUF_LEN_MAX, req->rx_buf_len_max); if (req->_present.tcp_data_split) ynl_attr_put_u8(nlh, ETHTOOL_A_RINGS_TCP_DATA_SPLIT, req->tcp_data_split); if (req->_present.cqe_size) diff --git a/home/nipa/nipa_out/955487/ynl/old-code/ethtool-user.h b/home/nipa/nipa_out/955487/ynl/new-code/ethtool-user.h index dae56d1efe3a..10520faa928a 100644 --- a/home/nipa/nipa_out/955487/ynl/old-code/ethtool-user.h +++ b/home/nipa/nipa_out/955487/ynl/new-code/ethtool-user.h @@ -2523,6 +2523,7 @@ struct ethtool_rings_get_rsp { __u32 rx_jumbo:1; __u32 tx:1; __u32 rx_buf_len:1; + __u32 rx_buf_len_max:1; __u32 tcp_data_split:1; __u32 cqe_size:1; __u32 tx_push:1; @@ -2543,6 +2544,7 @@ struct ethtool_rings_get_rsp { __u32 rx_jumbo; __u32 tx; __u32 rx_buf_len; + __u32 rx_buf_len_max; enum ethtool_tcp_data_split tcp_data_split; __u32 cqe_size; __u8 tx_push; @@ -2649,6 +2651,7 @@ struct ethtool_rings_set_req { __u32 rx_jumbo:1; __u32 tx:1; __u32 rx_buf_len:1; + __u32 rx_buf_len_max:1; __u32 tcp_data_split:1; __u32 cqe_size:1; __u32 tx_push:1; @@ -2669,6 +2672,7 @@ struct ethtool_rings_set_req { __u32 rx_jumbo; __u32 tx; __u32 rx_buf_len; + __u32 rx_buf_len_max; enum ethtool_tcp_data_split tcp_data_split; __u32 cqe_size; __u8 tx_push; @@ -2782,6 +2786,13 @@ ethtool_rings_set_req_set_rx_buf_len(struct ethtool_rings_set_req *req, req->rx_buf_len = rx_buf_len; } static inline void +ethtool_rings_set_req_set_rx_buf_len_max(struct ethtool_rings_set_req *req, + __u32 rx_buf_len_max) +{ + req->_present.rx_buf_len_max = 1; + req->rx_buf_len_max = rx_buf_len_max; +} +static inline void ethtool_rings_set_req_set_tcp_data_split(struct ethtool_rings_set_req *req, enum ethtool_tcp_data_split tcp_data_split) { diff --git a/home/nipa/nipa_out/955487/ynl/old-code/netdev-user.c b/home/nipa/nipa_out/955487/ynl/new-code/netdev-user.c index 878960df097c..b04d28bcf3b9 100644 --- a/home/nipa/nipa_out/955487/ynl/old-code/netdev-user.c +++ b/home/nipa/nipa_out/955487/ynl/new-code/netdev-user.c @@ -27,6 +27,7 @@ static const char * const netdev_op_strmap[] = { [NETDEV_CMD_QSTATS_GET] = "qstats-get", [NETDEV_CMD_BIND_RX] = "bind-rx", [NETDEV_CMD_NAPI_SET] = "napi-set", + [NETDEV_CMD_QUEUE_SET] = "queue-set", }; const char *netdev_op_str(int op) @@ -201,6 +202,7 @@ const struct ynl_policy_attr netdev_queue_policy[NETDEV_A_QUEUE_MAX + 1] = { [NETDEV_A_QUEUE_DMABUF] = { .name = "dmabuf", .type = YNL_PT_U32, }, [NETDEV_A_QUEUE_IO_URING] = { .name = "io-uring", .type = YNL_PT_NEST, .nest = &netdev_io_uring_provider_info_nest, }, [NETDEV_A_QUEUE_XSK] = { .name = "xsk", .type = YNL_PT_NEST, .nest = &netdev_xsk_info_nest, }, + [NETDEV_A_QUEUE_RX_BUF_LEN] = { .name = "rx-buf-len", .type = YNL_PT_U32, }, }; const struct ynl_policy_nest netdev_queue_nest = { @@ -1335,6 +1337,38 @@ int netdev_napi_set(struct ynl_sock *ys, struct netdev_napi_set_req *req) return 0; } +/* ============== NETDEV_CMD_QUEUE_SET ============== */ +/* NETDEV_CMD_QUEUE_SET - do */ +void netdev_queue_set_req_free(struct netdev_queue_set_req *req) +{ + free(req); +} + +int netdev_queue_set(struct ynl_sock *ys, struct netdev_queue_set_req *req) +{ + struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; + struct nlmsghdr *nlh; + int err; + + nlh = ynl_gemsg_start_req(ys, ys->family_id, NETDEV_CMD_QUEUE_SET, 1); + ys->req_policy = &netdev_queue_nest; + + if (req->_present.ifindex) + ynl_attr_put_u32(nlh, NETDEV_A_QUEUE_IFINDEX, req->ifindex); + if (req->_present.type) + ynl_attr_put_u32(nlh, NETDEV_A_QUEUE_TYPE, req->type); + if (req->_present.id) + ynl_attr_put_u32(nlh, NETDEV_A_QUEUE_ID, req->id); + if (req->_present.rx_buf_len) + ynl_attr_put_u32(nlh, NETDEV_A_QUEUE_RX_BUF_LEN, req->rx_buf_len); + + err = ynl_exec(ys, nlh, &yrs); + if (err < 0) + return -1; + + return 0; +} + static const struct ynl_ntf_info netdev_ntf_info[] = { [NETDEV_CMD_DEV_ADD_NTF] = { .alloc_sz = sizeof(struct netdev_dev_get_ntf), diff --git a/home/nipa/nipa_out/955487/ynl/old-code/netdev-user.h b/home/nipa/nipa_out/955487/ynl/new-code/netdev-user.h index 33285edfc651..dc378f1af237 100644 --- a/home/nipa/nipa_out/955487/ynl/old-code/netdev-user.h +++ b/home/nipa/nipa_out/955487/ynl/new-code/netdev-user.h @@ -653,4 +653,59 @@ netdev_napi_set_req_set_irq_suspend_timeout(struct netdev_napi_set_req *req, */ int netdev_napi_set(struct ynl_sock *ys, struct netdev_napi_set_req *req); +/* ============== NETDEV_CMD_QUEUE_SET ============== */ +/* NETDEV_CMD_QUEUE_SET - do */ +struct netdev_queue_set_req { + struct { + __u32 ifindex:1; + __u32 type:1; + __u32 id:1; + __u32 rx_buf_len:1; + } _present; + + __u32 ifindex; + enum netdev_queue_type type; + __u32 id; + __u32 rx_buf_len; +}; + +static inline struct netdev_queue_set_req *netdev_queue_set_req_alloc(void) +{ + return calloc(1, sizeof(struct netdev_queue_set_req)); +} +void netdev_queue_set_req_free(struct netdev_queue_set_req *req); + +static inline void +netdev_queue_set_req_set_ifindex(struct netdev_queue_set_req *req, + __u32 ifindex) +{ + req->_present.ifindex = 1; + req->ifindex = ifindex; +} +static inline void +netdev_queue_set_req_set_type(struct netdev_queue_set_req *req, + enum netdev_queue_type type) +{ + req->_present.type = 1; + req->type = type; +} +static inline void +netdev_queue_set_req_set_id(struct netdev_queue_set_req *req, __u32 id) +{ + req->_present.id = 1; + req->id = id; +} +static inline void +netdev_queue_set_req_set_rx_buf_len(struct netdev_queue_set_req *req, + __u32 rx_buf_len) +{ + req->_present.rx_buf_len = 1; + req->rx_buf_len = rx_buf_len; +} + +/* + * Set per-queue configurable options. + */ +int netdev_queue_set(struct ynl_sock *ys, struct netdev_queue_set_req *req); + #endif /* _LINUX_NETDEV_GEN_H */