SPDK NVMe-over RDMA transport框架

本文基于SPDK代码,nvmf_tgt -m 0x0F(3个IO reactor)运行环境
主要内容如下:

  1. RDMA transport注册
  2. NVMe-over RDMA transport初始化
  3. NVMe-over RDMA transport listen流程
  4. NVMe-over RDMA transport accept流程
  5. NVMe-over RDMA transport request处理

RDMA transport注册

传统的模块注册方式,声明transport变量和方法抽象transport层,将rdma ops注册到全局链表中:

void
spdk_nvmf_transport_register(const struct spdk_nvmf_transport_ops *ops)
{
    new_ops->ops = *ops;  
    TAILQ_INSERT_TAIL(&g_spdk_nvmf_transport_ops, new_ops, link);
}

#define SPDK_NVMF_TRANSPORT_REGISTER(name, transport_ops) \
static void __attribute__((constructor)) _spdk_nvmf_transport_register_##name(void) \
{ \
	spdk_nvmf_transport_register(transport_ops); \
}

NVMe-over RDMA transport初始化

通过rpc创建transport,例如:rpc.py nvmf_create_transport -t RDMA -u 8192 -i 131072 -c 8192
1.先根据-t RDMA找到对应传输层,调用opts_init()初始化opts,再通过解析rpc其他参数更新opts。

  #0  nvmf_rdma_opts_init (opts=0x7fffffffdc50) at rdma.c:2521
  #1  0x0000555555681dd0 in spdk_nvmf_transport_opts_init (transport_name=0x555555dcf4d0 "RDMA", opts=0x555555dd0360, opts_size=64) at transport.c:804
  #2  0x000055555567d195 in rpc_nvmf_create_transport (request=0x555555dce0a0, params=0x555555dab850) at nvmf_rpc.c:2261

2.异步初始化rtransport,先构造struct nvmf_transport_create_ctx,后使用spdk_thread_send_msg(spdk_get_thread(), _nvmf_transport_create_done, ctx)将初始化任务放到当前spdk_thread的消息队列中等待执行。nvmf_rdma_create()调用rdma_get_devices()和create_ib_device()初始化rtransport->devices链表,并为transport的event channel和ib devices async_fd创建一个poll_fds, 最后创建accept_poller轮询poll_fds处理RDMA设备事件。
3.rtransport初始化完成后,在完成回调nvmf_rpc_create_transport_done()为所有struct spdk_nvmf_poll_group创建struct spdk_nvmf_rdma_poll_group,即为每个设备创建cq,srq,预分配cache,将cq对应的poller添加到rgroup->pollers链表。

struct nvmf_transport_create_ctx {
	const struct spdk_nvmf_transport_ops *ops;
	struct spdk_nvmf_transport_opts opts;
	void *cb_arg;
	spdk_nvmf_transport_create_done_cb cb_fn;
};

#0  nvmf_transport_create (transport_name=0x555555dcf4d0 "RDMA", opts=0x555555dd0360, cb_fn=0x55555567cf86 <nvmf_rpc_create_transport_done>,
    cb_arg=0x555555dd0350, sync=false) at transport.c:226
#1  0x0000555555680c44 in spdk_nvmf_transport_create_async (transport_name=0x555555dcf4d0 "RDMA", opts=0x555555dd0360,
    cb_fn=0x55555567cf86 <nvmf_rpc_create_transport_done>, cb_arg=0x555555dd0350) at transport.c:324
#2  0x000055555567d351 in rpc_nvmf_create_transport (request=0x555555dce0a0, params=0x555555dab850) at nvmf_rpc.c:2293

#0  nvmf_rdma_create (opts=0x555555dcd9b8) at rdma.c:2690
#1  0x000055555568078f in _nvmf_transport_create_done (ctx=0x555555dcd9b0) at transport.c:218
#2  0x000055555573c205 in msg_queue_run_batch (thread=0x555555db88b0, max_msgs=8) at thread.c:848
#3  0x000055555573cbb2 in thread_poll (thread=0x555555db88b0, max_msgs=0, now=4421943135727) at thread.c:1070

/* Set up poll descriptor array to monitor events from RDMA and IB
 * in a single poll syscall
 */
rtransport->poll_fds[i].fd = rtransport->event_channel->fd;
rtransport->poll_fds[i++].events = POLLIN;
TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) {
    rtransport->poll_fds[i].fd = device->context->async_fd;
    rtransport->poll_fds[i++].events = POLLIN;
}

count = nfds = poll(rtransport->poll_fds, rtransport->npoll_fds, 0);
/* The first poll descriptor is RDMA CM event */
if (rtransport->poll_fds[i++].revents & POLLIN) {
    nvmf_process_cm_event(transport);
    nfds--;
}
/* Second and subsequent poll descriptors are IB async events */
TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) {
    revents = rtransport->poll_fds[i++].revents;
    if (revents & POLLIN) {
    	nvmf_process_ib_events(device, 32);	
    }
}
rtransport->accept_poller = SPDK_POLLER_REGISTER(nvmf_rdma_accept, &rtransport->transport,
				    opts->acceptor_poll_rate);

#0  spdk_nvmf_tgt_add_transport (tgt=0x555555dd7760, transport=0x555555de9a20, cb_fn=0x55555567cee3 <nvmf_rpc_tgt_add_transport_done>, cb_arg=0x555555dd0350)
    at nvmf.c:932
#1  0x000055555567d03b in nvmf_rpc_create_transport_done (cb_arg=0x555555dd0350, transport=0x555555de9a20) at nvmf_rpc.c:2221
#2  0x00005555556806ff in nvmf_transport_create_async_done (cb_arg=0x555555dcd9b0, transport=0x555555de9a20) at transport.c:200
#3  0x00005555556807a1 in _nvmf_transport_create_done (ctx=0x555555dcd9b0) at transport.c:218
#4  0x000055555573c205 in msg_queue_run_batch (thread=0x555555db88b0, max_msgs=8) at thread.c:848
#5  0x000055555573cbb2 in thread_poll (thread=0x555555db88b0, max_msgs=0, now=51338732651757) at thread.c:1070
#6  0x000055555573ceca in spdk_thread_poll (thread=0x555555db88b0, max_msgs=0, now=51338732651757) at thread.c:1163
spdk_for_each_channel(tgt,
			      _nvmf_tgt_add_transport,
			      ctx,
			      _nvmf_tgt_add_transport_done);

#0  nvmf_rdma_poll_group_create (transport=0x555555de9a20, group=0x7ffff0000e70) at rdma.c:4124
#1  0x00005555556815fb in nvmf_transport_poll_group_create (transport=0x555555de9a20, group=0x7ffff0000e70) at transport.c:573
#2  0x0000555555672373 in nvmf_poll_group_add_transport (group=0x7ffff0000e70, transport=0x555555de9a20) at nvmf.c:243
#3  0x000055555567407c in _nvmf_tgt_add_transport (i=0x555555e36d40) at nvmf.c:918
#4  0x00005555557404ce in _call_channel (ctx=0x555555e36d40) at thread.c:2552
#5  0x000055555573c205 in msg_queue_run_batch (thread=0x555555de7a10, max_msgs=8) at thread.c:848
#6  0x000055555573cbb2 in thread_poll (thread=0x555555de7a10, max_msgs=0, now=45645559624050) at thread.c:1070
TAILQ_INSERT_TAIL(&rgroup->pollers, poller, link);

NVMe-over RDMA transport listen流程

通过rpc为subsystem添加Listener,例如:rpc.py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a 192.168.2.30 -s 4420
1.构造struct nvmf_rpc_listener_ctx,PAUSE subsystem,PAUSE完成回调中调用nvmf_rdma_listen()。

struct nvmf_rpc_listener_ctx {
	char				*nqn;
	char				*tgt_name;
	struct spdk_nvmf_tgt		*tgt;
	struct spdk_nvmf_transport	*transport;
	struct spdk_nvmf_subsystem	*subsystem;
	struct rpc_listen_address	address;
	char				*ana_state_str;
	enum spdk_nvme_ana_state	ana_state;
	uint32_t			anagrpid;

	struct spdk_jsonrpc_request	*request;
	struct spdk_nvme_transport_id	trid;
	enum nvmf_rpc_listen_op		op;
	bool				response_sent;
	struct spdk_nvmf_listen_opts	opts;

	/* Additional options for listener creation. */
	struct spdk_nvmf_listener_opts	listener_opts;
};

#0  nvmf_subsystem_state_change (subsystem=0x555555e37d10, nsid=0, requested_state=SPDK_NVMF_SUBSYSTEM_PAUSED, cb_fn=0x555555679b9b <nvmf_rpc_listen_paused>,
    cb_arg=0x555555fd7800) at subsystem.c:689
#1  0x000055555566972d in spdk_nvmf_subsystem_pause (subsystem=0x555555e37d10, nsid=0, cb_fn=0x555555679b9b <nvmf_rpc_listen_paused>, cb_arg=0x555555fd7800)
    at subsystem.c:757
#2  0x000055555567a3da in rpc_nvmf_subsystem_add_listener (request=0x555555dce0a0, params=0x555555dd0bf0) at nvmf_rpc.c:930

#0  nvmf_rdma_listen (transport=0x555555de9a20, trid=0x555555fd7b30, listen_opts=0x7fffffffddb0) at rdma.c:2968
#1  0x0000555555680fc2 in spdk_nvmf_transport_listen (transport=0x555555de9a20, trid=0x555555fd7860, opts=0x7fffffffddb0) at transport.c:412
#2  0x0000555555673cdb in spdk_nvmf_tgt_listen_ext (tgt=0x555555dd7760, trid=0x555555fd7860, opts=0x555555fd7a99) at nvmf.c:825
#3  0x0000555555679c12 in nvmf_rpc_listen_paused (subsystem=0x555555e37d10, cb_arg=0x555555fd7800, status=0) at nvmf_rpc.c:765
#4  0x0000555555669344 in subsystem_state_change_done (i=0x555555e37f30, status=0) at subsystem.c:626
#5  0x0000555555740452 in _call_completion (ctx=0x555555e37f30) at thread.c:2531
#6  0x000055555573c205 in msg_queue_run_batch (thread=0x555555db88b0, max_msgs=8) at thread.c:848
#7  0x000055555573cbb2 in thread_poll (thread=0x555555db88b0, max_msgs=0, now=27259428778286) at thread.c:1070

2.nvmf_rdma_listen()中依次调用rdma_create_id(监听socket,关联event channel), rdma_bind, rdma_listen,最终会在accept_poller中轮询到RDMA_CM_EVENT_CONNECT_REQUEST事件处理建连请求。

rdma_create_id(rtransport->event_channel, &port->id, port, RDMA_PS_TCP):
  rdma_cm_ids are conceptually equivalent to a socket for RDMA communication. The difference is
  that RDMA communication requires explicitly binding to a specified RDMA device before
  communication can occur, and most operations are asynchronous in nature. Communication events
  on an rdma_cm_id are reported through the associated event channel.

NVMe-over RDMA transport accept流程

Initiator端发起连接,例如:nvme discover -t rdma -a 192.168.2.30 -s 4420
1.accept poller获取到连接事件,调用nvmf_rdma_connect创建struct spdk_nvmf_qpair(协商queue depth等),并选取最优spdk_nvmf_poll_group,向其消息队列添加qpair add消息。

struct nvmf_new_qpair_ctx {
	struct spdk_nvmf_qpair *qpair;
	struct spdk_nvmf_poll_group *group;
};
#0  spdk_nvmf_tgt_new_qpair (tgt=0x555555dd7760, qpair=0x555555fd8230) at nvmf.c:1150
#1  0x0000555555697512 in nvmf_rdma_connect (transport=0x555555de9a20, event=0x555555dd8b30) at rdma.c:1441
#2  0x000055555569e25a in nvmf_process_cm_event (transport=0x555555de9a20) at rdma.c:3684
#3  0x000055555569edc4 in nvmf_rdma_accept (ctx=0x555555de9a20) at rdma.c:3945
#4  0x000055555573c972 in thread_execute_timed_poller (thread=0x555555db88b0, poller=0x555555e36ba0, now=53347558515989) at thread.c:1014
#5  0x000055555573cc9d in thread_poll (thread=0x555555db88b0, max_msgs=0, now=53347558515989) at thread.c:1104

group = spdk_nvmf_get_optimal_poll_group(qpair);
spdk_thread_send_msg(group->thread, _nvmf_poll_group_add, ctx);

2.在nvmf_rdma_qpair_initialize中初始化struct spdk_nvmf_rdma_qpair,之后调用rdma_create_qp(cm_id, qp_attr->pd, &attr)创建struct spdk_rdma_qp并将其添加到poller->qpairs树中,且通过attr将send_cq, recv_cq均关联到poller的cq,最后调用rdma_accept(spdk_rdma_qp->cm_id, conn_param)完成握手并建立网络底层连接。

#0  spdk_rdma_qp_create (cm_id=0x555555fd7fd0, qp_attr=0x7ffff6e6e650) at rdma_verbs.c:20
#1  0x00005555556961ab in nvmf_rdma_qpair_initialize (qpair=0x555555fd8230) at rdma.c:1065
#2  0x000055555569fd18 in nvmf_rdma_poll_group_add (group=0x7ffff000f0e0, qpair=0x555555fd8230) at rdma.c:4340
#3  0x0000555555681a1f in nvmf_transport_poll_group_add (group=0x7ffff000f0e0, qpair=0x555555fd8230) at transport.c:701
#4  0x0000555555674995 in spdk_nvmf_poll_group_add (group=0x7ffff0000e70, qpair=0x555555fd8230) at nvmf.c:1208
#5  0x0000555555674641 in _nvmf_poll_group_add (_ctx=0x555555db95e0) at nvmf.c:1124
#6  0x000055555573c205 in msg_queue_run_batch (thread=0x555555de7a10, max_msgs=8) at thread.c:848
#7  0x000055555573cbb2 in thread_poll (thread=0x555555de7a10, max_msgs=0, now=54264556071129) at thread.c:1070

#0  spdk_rdma_qp_accept (spdk_rdma_qp=0x7ffff01ad8c0, conn_param=0x7ffff6e6e650) at rdma_verbs.c:64
#1  0x0000555555696cfe in nvmf_rdma_event_accept (id=0x555555fd7fd0, rqpair=0x555555fd8230) at rdma.c:1305
#2  0x000055555569fdaa in nvmf_rdma_poll_group_add (group=0x7ffff000f0e0, qpair=0x555555fd8230) at rdma.c:4350
#3  0x0000555555681a1f in nvmf_transport_poll_group_add (group=0x7ffff000f0e0, qpair=0x555555fd8230) at transport.c:701
#4  0x0000555555674995 in spdk_nvmf_poll_group_add (group=0x7ffff0000e70, qpair=0x555555fd8230) at nvmf.c:1208
#5  0x0000555555674641 in _nvmf_poll_group_add (_ctx=0x555555db95e0) at nvmf.c:1124
#6  0x000055555573c205 in msg_queue_run_batch (thread=0x555555de7a10, max_msgs=8) at thread.c:848
#7  0x000055555573cbb2 in thread_poll (thread=0x555555de7a10, max_msgs=0, now=61794576997205) at thread.c:1070

RB_INSERT(qpairs_tree, &poller->qpairs, rqpair);

int rdma_create_qp (struct rdma_cm_id *id, struct ibv_pd *pd, struct ibv_qp_init_attr *qp_init_attr):
  rdma_create_qp allocates a QP associated with the specified rdma_cm_id and transitions it for
  sending and receiving. The actual capabilities and properties of the created QP will be returned to
  the user through the qp_init_attr parameter.
  Notes:
  The rdma_cm_id must be bound to a local RDMA device before calling this function, and the
  protection domain must be for that same device. QPs allocated to an rdma_cm_id are automatically 
  transitioned by the librdmacm through their states. After being allocated, the QP will be ready
  to handle posting of receives. If the QP is unconnected, it will be ready to post sends.

int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param):
  rdma_accept is called from the listening side to accept a connection or datagram service lookup
  request.
  Unlike the socket accept routine, rdma_accept is not called on a listening rdma_cm_id. Instead,
  after calling rdma_listen, the user waits for an RDMA_CM_EVENT_CONNECT_REQUEST event to
  occur. Connection request events give the user a newly created rdma_cm_id, similar to a new
  socket, but the rdma_cm_id is bound to a specific RDMA device. rdma_accept is called on the new
  rdma_cm_id.

NVMe-over RDMA transport request处理

IO读写整体流程:

1.通过ibv_post_srq_recv()发送Recv WRE到SRQ中,等待Initiator端发送IO读写(Send Request)请求。

#0  ibv_post_srq_recv (srq=0x555555e34e10, recv_wr=0x20001943f000, bad_recv_wr=0x7fffffffd7a0) at /usr/include/infiniband/verbs.h:2986
#1  0x00005555556f1005 in spdk_rdma_srq_flush_recv_wrs (rdma_srq=0x555555dcb590, bad_wr=0x7fffffffd7a0) at common.c:335
#2  0x00005555556a0356 in _poller_submit_recvs (rtransport=0x555555db8c60, rpoller=0x555555e34d20) at rdma.c:4526
#3  0x00005555556a14d1 in nvmf_rdma_poller_poll (rtransport=0x555555db8c60, rpoller=0x555555e34d20) at rdma.c:4835
#4  0x00005555556a170f in nvmf_rdma_poll_group_poll (group=0x555555e34cb0) at rdma.c:4905
#5  0x0000555555681ae4 in nvmf_transport_poll_group_poll (group=0x555555e34cb0) at transport.c:724
#6  0x0000555555671fdf in nvmf_poll_group_poll (ctx=0x555555db9cf0) at nvmf.c:160
#7  0x000055555573c65b in thread_execute_poller (thread=0x555555dd74a0, poller=0x555555dd7860) at thread.c:953
#8  0x000055555573cc12 in thread_poll (thread=0x555555dd74a0, max_msgs=0, now=7255672180596) at thread.c:1079

2.Initiator端发送Send Request,HBA网卡通过SRQ队列头部的Recv WRE接收请求,将数据放到WRE的sgl中,根据数据中的qpair id找到对应Qpair的CQ队列并发送CQE。NVMe-oF rpoller轮询CQ队列,获取到CQE,将struct spdk_nvmf_rdma_recv放入incoming_queue等待Request object。

reaped = ibv_poll_cq(rpoller->cq, 32, wc);

/* Receives that are waiting for a request object */
STAILQ_HEAD(, spdk_nvmf_rdma_recv)	incoming_queue;
STAILQ_INSERT_HEAD(&rqpair->resources->incoming_queue, rdma_recv, link);

#0  nvmf_rdma_poller_poll (rtransport=0x555555db8c60, rpoller=0x555555e34d20) at rdma.c:4757
#1  0x00005555556a170f in nvmf_rdma_poll_group_poll (group=0x555555e34cb0) at rdma.c:4905
#2  0x0000555555681ae4 in nvmf_transport_poll_group_poll (group=0x555555e34cb0) at transport.c:724
#3  0x0000555555671fdf in nvmf_poll_group_poll (ctx=0x555555db9cf0) at nvmf.c:160
#4  0x000055555573c65b in thread_execute_poller (thread=0x555555dd74a0, poller=0x555555dd7860) at thread.c:953
#5  0x000055555573cc12 in thread_poll (thread=0x555555dd74a0, max_msgs=0, now=32984254910247) at thread.c:1079

3.在同一个上下文中,会调用nvmf_rdma_qpair_process_pending去free_queue队列上获取可用的Request object,然后调用nvmf_rdma_request_process状态机处理请求。

rdma_req = STAILQ_FIRST(&resources->free_queue);
STAILQ_REMOVE_HEAD(&resources->free_queue, state_link);
rdma_req->recv = STAILQ_FIRST(&resources->incoming_queue);
STAILQ_REMOVE_HEAD(&resources->incoming_queue, link);

#0  nvmf_rdma_request_process (rtransport=0x555555db8c60, rdma_req=0x2000194a8000) at rdma.c:2108
#1  0x000055555569d98b in nvmf_rdma_qpair_process_pending (rtransport=0x555555db8c60, rqpair=0x555555de70f0, drain=false) at rdma.c:3434
#2  0x00005555556a146f in nvmf_rdma_poller_poll (rtransport=0x555555db8c60, rpoller=0x555555e34d20) at rdma.c:4823
#3  0x00005555556a170f in nvmf_rdma_poll_group_poll (group=0x555555e34cb0) at rdma.c:4905
#4  0x0000555555681ae4 in nvmf_transport_poll_group_poll (group=0x555555e34cb0) at transport.c:724
#5  0x0000555555671fdf in nvmf_poll_group_poll (ctx=0x555555db9cf0) at nvmf.c:160
#6  0x000055555573c65b in thread_execute_poller (thread=0x555555dd74a0, poller=0x555555dd7860) at thread.c:953
#7  0x000055555573cc12 in thread_poll (thread=0x555555dd74a0, max_msgs=0, now=32984254910247) at thread.c:1079

4.nvmf_rdma_request_process状态机不是从头执行到尾的,例如IO并发度大的话,为READ/WRITE请求获取sgl buffer时会失败,这时状态机会退出。直到下一次轮询到CQE时,执行nvmf_rdma_qpair_process_pending时再启动。

/* 请求处于RDMA_REQUEST_STATE_NEED_BUFFER状态时获取buffer失败,状态机退出 */
if (rdma_req->req.iovcnt == 0) {
    /* No buffers available. */
    rgroup->stat.pending_data_buffer++;
    break;
}

/* nvmf_rdma_qpair_process_pending执行时会再次处理之前未处理完成的请求 */
/* First process requests which are waiting for response to be sent */
STAILQ_FOREACH_SAFE(rdma_req, &rqpair->pending_rdma_send_queue, state_link, req_tmp) {
    if (nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
        break;
    }
}

/* We process I/O in the data transfer pending queue at the highest priority. */
STAILQ_FOREACH_SAFE(rdma_req, &rqpair->pending_rdma_read_queue, state_link, req_tmp) {
	if (nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
		break;
	}
}

/* Then RDMA writes since reads have stronger restrictions than writes */
STAILQ_FOREACH_SAFE(rdma_req, &rqpair->pending_rdma_write_queue, state_link, req_tmp) {
	if (nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
		break;
	}
}

/* Then we handle request waiting on memory buffers. */
STAILQ_FOREACH_SAFE(req, &rqpair->poller->group->group.pending_buf_queue, buf_link, tmp) {
	rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
	if (nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
		break;
	}
}

5.请求处理流程较长,nvmf_rdma_request_process状态机涵盖了大部分流程,状态机函数执行是通过rpoller轮询CQ时触发的。

enum spdk_nvmf_rdma_request_state {
	/* The request is not currently in use */
	RDMA_REQUEST_STATE_FREE = 0,

	/* Initial state when request first received */
	RDMA_REQUEST_STATE_NEW,

	/* The request is queued until a data buffer is available. */
	RDMA_REQUEST_STATE_NEED_BUFFER,

	/* The request is waiting on RDMA queue depth availability
	 * to transfer data from the host to the controller.
	 */
	RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING,

	/* The request is currently transferring data from the host to the controller. */
	RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER,

	/* The request is ready to execute at the block device */
	RDMA_REQUEST_STATE_READY_TO_EXECUTE,

	/* The request is currently executing at the block device */
	RDMA_REQUEST_STATE_EXECUTING,

	/* The request finished executing at the block device */
	RDMA_REQUEST_STATE_EXECUTED,

	/* The request is waiting on RDMA queue depth availability
	 * to transfer data from the controller to the host.
	 */
	RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING,

	/* The request is waiting on RDMA queue depth availability
	 * to send response to the host.
	 */
	RDMA_REQUEST_STATE_READY_TO_COMPLETE_PENDING,

	/* The request is ready to send a completion */
	RDMA_REQUEST_STATE_READY_TO_COMPLETE,

	/* The request is currently transferring data from the controller to the host. */
	RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST,

	/* The request currently has an outstanding completion without an
	 * associated data transfer.
	 */
	RDMA_REQUEST_STATE_COMPLETING,

	/* The request completed and can be marked free. */
	RDMA_REQUEST_STATE_COMPLETED,

	/* Terminator */
	RDMA_REQUEST_NUM_STATES,
};
posted @ 2025-03-01 21:50  3yearleft  阅读(326)  评论(0)    收藏  举报