* [PATCH v7 5/8] RDMA/rxe: Make requester support atomic write on RC service
@ 2022-12-01 14:39 Xiao Yang
2022-12-01 14:39 ` [PATCH v7 6/8] RDMA/rxe: Make responder " Xiao Yang
` (2 more replies)
0 siblings, 3 replies; 6+ messages in thread
From: Xiao Yang @ 2022-12-01 14:39 UTC (permalink / raw)
To: linux-rdma, jgg, rpearsonhpe
Cc: leon, lizhijian, y-goto, zyjzyj2000, Xiao Yang
Make requester process and send an atomic write request on RC service.
Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com>
---
drivers/infiniband/sw/rxe/rxe_req.c | 15 +++++++++++++--
1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 4d45f508392f..2713e9058922 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -258,6 +258,10 @@ static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
else
return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
IB_OPCODE_RC_SEND_FIRST;
+
+ case IB_WR_ATOMIC_WRITE:
+ return IB_OPCODE_RC_ATOMIC_WRITE;
+
case IB_WR_REG_MR:
case IB_WR_LOCAL_INV:
return opcode;
@@ -486,6 +490,11 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_av *av,
}
}
+ if (pkt->mask & RXE_ATOMIC_WRITE_MASK) {
+ memcpy(payload_addr(pkt), wqe->dma.atomic_wr, payload);
+ wqe->dma.resid -= payload;
+ }
+
return 0;
}
@@ -709,13 +718,15 @@ int rxe_requester(void *arg)
}
mask = rxe_opcode[opcode].mask;
- if (unlikely(mask & RXE_READ_OR_ATOMIC_MASK)) {
+ if (unlikely(mask & (RXE_READ_OR_ATOMIC_MASK |
+ RXE_ATOMIC_WRITE_MASK))) {
if (check_init_depth(qp, wqe))
goto exit;
}
mtu = get_mtu(qp);
- payload = (mask & RXE_WRITE_OR_SEND_MASK) ? wqe->dma.resid : 0;
+ payload = (mask & (RXE_WRITE_OR_SEND_MASK | RXE_ATOMIC_WRITE_MASK)) ?
+ wqe->dma.resid : 0;
if (payload > mtu) {
if (qp_type(qp) == IB_QPT_UD) {
/* C10-93.1.1: If the total sum of all the buffer lengths specified for a
--
2.34.1
^ permalink raw reply related [flat|nested] 6+ messages in thread* [PATCH v7 6/8] RDMA/rxe: Make responder support atomic write on RC service 2022-12-01 14:39 [PATCH v7 5/8] RDMA/rxe: Make requester support atomic write on RC service Xiao Yang @ 2022-12-01 14:39 ` Xiao Yang 2022-12-15 15:19 ` Guenter Roeck 2022-12-01 14:39 ` [PATCH v7 7/8] RDMA/rxe: Implement atomic write completion Xiao Yang 2022-12-01 14:39 ` [PATCH v7 8/8] RDMA/rxe: Enable atomic write capability for rxe device Xiao Yang 2 siblings, 1 reply; 6+ messages in thread From: Xiao Yang @ 2022-12-01 14:39 UTC (permalink / raw) To: linux-rdma, jgg, rpearsonhpe Cc: leon, lizhijian, y-goto, zyjzyj2000, Xiao Yang Make responder process an atomic write request and send a read response on RC service. Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com> --- drivers/infiniband/sw/rxe/rxe_resp.c | 84 ++++++++++++++++++++++++++-- 1 file changed, 79 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 6761bcd1d4d8..6ac544477f3f 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -22,6 +22,7 @@ enum resp_states { RESPST_EXECUTE, RESPST_READ_REPLY, RESPST_ATOMIC_REPLY, + RESPST_ATOMIC_WRITE_REPLY, RESPST_COMPLETE, RESPST_ACKNOWLEDGE, RESPST_CLEANUP, @@ -57,6 +58,7 @@ static char *resp_state_name[] = { [RESPST_EXECUTE] = "EXECUTE", [RESPST_READ_REPLY] = "READ_REPLY", [RESPST_ATOMIC_REPLY] = "ATOMIC_REPLY", + [RESPST_ATOMIC_WRITE_REPLY] = "ATOMIC_WRITE_REPLY", [RESPST_COMPLETE] = "COMPLETE", [RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE", [RESPST_CLEANUP] = "CLEANUP", @@ -263,7 +265,7 @@ static enum resp_states check_op_valid(struct rxe_qp *qp, case IB_QPT_RC: if (((pkt->mask & RXE_READ_MASK) && !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) || - ((pkt->mask & RXE_WRITE_MASK) && + ((pkt->mask & (RXE_WRITE_MASK | RXE_ATOMIC_WRITE_MASK)) && !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) || ((pkt->mask & RXE_ATOMIC_MASK) && !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) { @@ -367,7 +369,7 @@ static enum resp_states check_resource(struct rxe_qp *qp, } } - if (pkt->mask & RXE_READ_OR_ATOMIC_MASK) { + if (pkt->mask & (RXE_READ_OR_ATOMIC_MASK | RXE_ATOMIC_WRITE_MASK)) { /* it is the requesters job to not send * too many read/atomic ops, we just * recycle the responder resource queue @@ -438,7 +440,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp, enum resp_states state; int access; - if (pkt->mask & RXE_READ_OR_WRITE_MASK) { + if (pkt->mask & (RXE_READ_OR_WRITE_MASK | RXE_ATOMIC_WRITE_MASK)) { if (pkt->mask & RXE_RETH_MASK) { qp->resp.va = reth_va(pkt); qp->resp.offset = 0; @@ -504,7 +506,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp, goto err; } - if (pkt->mask & RXE_WRITE_MASK) { + if (pkt->mask & (RXE_WRITE_MASK | RXE_ATOMIC_WRITE_MASK)) { if (resid > mtu) { if (pktlen != mtu || bth_pad(pkt)) { state = RESPST_ERR_LENGTH; @@ -604,6 +606,7 @@ static struct resp_res *rxe_prepare_res(struct rxe_qp *qp, res->state = rdatm_res_state_new; break; case RXE_ATOMIC_MASK: + case RXE_ATOMIC_WRITE_MASK: res->first_psn = pkt->psn; res->last_psn = pkt->psn; res->cur_psn = pkt->psn; @@ -673,6 +676,55 @@ static enum resp_states atomic_reply(struct rxe_qp *qp, return ret; } +static enum resp_states atomic_write_reply(struct rxe_qp *qp, + struct rxe_pkt_info *pkt) +{ + u64 src, *dst; + struct resp_res *res = qp->resp.res; + struct rxe_mr *mr = qp->resp.mr; + int payload = payload_size(pkt); + + if (!res) { + res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK); + qp->resp.res = res; + } + + if (!res->replay) { +#ifdef CONFIG_64BIT + if (mr->state != RXE_MR_STATE_VALID) + return RESPST_ERR_RKEY_VIOLATION; + + memcpy(&src, payload_addr(pkt), payload); + + dst = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, payload); + /* check vaddr is 8 bytes aligned. */ + if (!dst || (uintptr_t)dst & 7) + return RESPST_ERR_MISALIGNED_ATOMIC; + + /* Do atomic write after all prior operations have completed */ + smp_store_release(dst, src); + + /* decrease resp.resid to zero */ + qp->resp.resid -= sizeof(payload); + + qp->resp.msn++; + + /* next expected psn, read handles this separately */ + qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; + qp->resp.ack_psn = qp->resp.psn; + + qp->resp.opcode = pkt->opcode; + qp->resp.status = IB_WC_SUCCESS; + + return RESPST_ACKNOWLEDGE; +#else + return RESPST_ERR_UNSUPPORTED_OPCODE; +#endif /* CONFIG_64BIT */ + } + + return RESPST_ACKNOWLEDGE; +} + static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, struct rxe_pkt_info *ack, int opcode, @@ -912,6 +964,8 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) return RESPST_READ_REPLY; } else if (pkt->mask & RXE_ATOMIC_MASK) { return RESPST_ATOMIC_REPLY; + } else if (pkt->mask & RXE_ATOMIC_WRITE_MASK) { + return RESPST_ATOMIC_WRITE_REPLY; } else { /* Unreachable */ WARN_ON_ONCE(1); @@ -1085,6 +1139,19 @@ static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) return ret; } +static int send_read_response_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) +{ + int ret = send_common_ack(qp, syndrome, psn, + IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY, + "RDMA READ response of length zero ACK"); + + /* have to clear this since it is used to trigger + * long read replies + */ + qp->resp.res = NULL; + return ret; +} + static enum resp_states acknowledge(struct rxe_qp *qp, struct rxe_pkt_info *pkt) { @@ -1095,6 +1162,8 @@ static enum resp_states acknowledge(struct rxe_qp *qp, send_ack(qp, qp->resp.aeth_syndrome, pkt->psn); else if (pkt->mask & RXE_ATOMIC_MASK) send_atomic_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); + else if (pkt->mask & RXE_ATOMIC_WRITE_MASK) + send_read_response_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); else if (bth_ack(pkt)) send_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); @@ -1206,7 +1275,9 @@ static enum resp_states duplicate_request(struct rxe_qp *qp, res->replay = 1; res->cur_psn = pkt->psn; qp->resp.res = res; - rc = RESPST_ATOMIC_REPLY; + rc = pkt->mask & RXE_ATOMIC_MASK ? + RESPST_ATOMIC_REPLY : + RESPST_ATOMIC_WRITE_REPLY; goto out; } @@ -1343,6 +1414,9 @@ int rxe_responder(void *arg) case RESPST_ATOMIC_REPLY: state = atomic_reply(qp, pkt); break; + case RESPST_ATOMIC_WRITE_REPLY: + state = atomic_write_reply(qp, pkt); + break; case RESPST_ACKNOWLEDGE: state = acknowledge(qp, pkt); break; -- 2.34.1 ^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH v7 6/8] RDMA/rxe: Make responder support atomic write on RC service 2022-12-01 14:39 ` [PATCH v7 6/8] RDMA/rxe: Make responder " Xiao Yang @ 2022-12-15 15:19 ` Guenter Roeck 2022-12-15 15:32 ` Jason Gunthorpe 0 siblings, 1 reply; 6+ messages in thread From: Guenter Roeck @ 2022-12-15 15:19 UTC (permalink / raw) To: Xiao Yang Cc: linux-rdma, jgg, rpearsonhpe, leon, lizhijian, y-goto, zyjzyj2000 On Thu, Dec 01, 2022 at 02:39:26PM +0000, Xiao Yang wrote: > Make responder process an atomic write request and send a read response > on RC service. > > Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com> > --- On all 32-bit builds with CONFIG_WERROR enabled: Error log: drivers/infiniband/sw/rxe/rxe_resp.c: In function 'atomic_write_reply': drivers/infiniband/sw/rxe/rxe_resp.c:794:13: error: unused variable 'payload' [-Werror=unused-variable] 794 | int payload = payload_size(pkt); | ^~~~~~~ drivers/infiniband/sw/rxe/rxe_resp.c:793:24: error: unused variable 'mr' [-Werror=unused-variable] 793 | struct rxe_mr *mr = qp->resp.mr; | ^~ drivers/infiniband/sw/rxe/rxe_resp.c:791:19: error: unused variable 'dst' [-Werror=unused-variable] 791 | u64 src, *dst; | ^~~ drivers/infiniband/sw/rxe/rxe_resp.c:791:13: error: unused variable 'src' [-Werror=unused-variable] 791 | u64 src, *dst; Guenter ^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v7 6/8] RDMA/rxe: Make responder support atomic write on RC service 2022-12-15 15:19 ` Guenter Roeck @ 2022-12-15 15:32 ` Jason Gunthorpe 0 siblings, 0 replies; 6+ messages in thread From: Jason Gunthorpe @ 2022-12-15 15:32 UTC (permalink / raw) To: Guenter Roeck Cc: Xiao Yang, linux-rdma, rpearsonhpe, leon, lizhijian, y-goto, zyjzyj2000 On Thu, Dec 15, 2022 at 07:19:24AM -0800, Guenter Roeck wrote: > On Thu, Dec 01, 2022 at 02:39:26PM +0000, Xiao Yang wrote: > > Make responder process an atomic write request and send a read response > > on RC service. > > > > Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com> > > --- > > On all 32-bit builds with CONFIG_WERROR enabled: Why are we only just seeing this now? It has been in linux-next for long enough? From 7e708569f7bb5dba6df8342bc2402deda4e2414e Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe <jgg@nvidia.com> Date: Thu, 15 Dec 2022 11:29:25 -0400 Subject: [PATCH] RDMA/rxe: Fix compile warnings on 32-bit Move the conditional code into a function, with two varients so it is harder to make these kinds of mistakes. drivers/infiniband/sw/rxe/rxe_resp.c: In function 'atomic_write_reply': drivers/infiniband/sw/rxe/rxe_resp.c:794:13: error: unused variable 'payload' [-Werror=unused-variable] 794 | int payload = payload_size(pkt); | ^~~~~~~ drivers/infiniband/sw/rxe/rxe_resp.c:793:24: error: unused variable 'mr' [-Werror=unused-variable] 793 | struct rxe_mr *mr = qp->resp.mr; | ^~ drivers/infiniband/sw/rxe/rxe_resp.c:791:19: error: unused variable 'dst' [-Werror=unused-variable] 791 | u64 src, *dst; | ^~~ drivers/infiniband/sw/rxe/rxe_resp.c:791:13: error: unused variable 'src' [-Werror=unused-variable] 791 | u64 src, *dst; Fixes: 034e285f8b99 ("RDMA/rxe: Make responder support atomic write on RC service") Reported-by: Guenter Roeck <linux@roeck-us.net> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> --- drivers/infiniband/sw/rxe/rxe_resp.c | 72 +++++++++++++++------------- 1 file changed, 40 insertions(+), 32 deletions(-) diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 7a60c7709da045..c74972244f08f5 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -785,53 +785,61 @@ static enum resp_states atomic_reply(struct rxe_qp *qp, return ret; } -static enum resp_states atomic_write_reply(struct rxe_qp *qp, - struct rxe_pkt_info *pkt) +#ifdef CONFIG_64BIT +static enum resp_states do_atomic_write(struct rxe_qp *qp, + struct rxe_pkt_info *pkt) { - u64 src, *dst; - struct resp_res *res = qp->resp.res; struct rxe_mr *mr = qp->resp.mr; int payload = payload_size(pkt); + u64 src, *dst; - if (!res) { - res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK); - qp->resp.res = res; - } - - if (!res->replay) { -#ifdef CONFIG_64BIT - if (mr->state != RXE_MR_STATE_VALID) - return RESPST_ERR_RKEY_VIOLATION; - - memcpy(&src, payload_addr(pkt), payload); + if (mr->state != RXE_MR_STATE_VALID) + return RESPST_ERR_RKEY_VIOLATION; - dst = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, payload); - /* check vaddr is 8 bytes aligned. */ - if (!dst || (uintptr_t)dst & 7) - return RESPST_ERR_MISALIGNED_ATOMIC; + memcpy(&src, payload_addr(pkt), payload); - /* Do atomic write after all prior operations have completed */ - smp_store_release(dst, src); + dst = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, payload); + /* check vaddr is 8 bytes aligned. */ + if (!dst || (uintptr_t)dst & 7) + return RESPST_ERR_MISALIGNED_ATOMIC; - /* decrease resp.resid to zero */ - qp->resp.resid -= sizeof(payload); + /* Do atomic write after all prior operations have completed */ + smp_store_release(dst, src); - qp->resp.msn++; + /* decrease resp.resid to zero */ + qp->resp.resid -= sizeof(payload); - /* next expected psn, read handles this separately */ - qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; - qp->resp.ack_psn = qp->resp.psn; + qp->resp.msn++; - qp->resp.opcode = pkt->opcode; - qp->resp.status = IB_WC_SUCCESS; + /* next expected psn, read handles this separately */ + qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; + qp->resp.ack_psn = qp->resp.psn; - return RESPST_ACKNOWLEDGE; + qp->resp.opcode = pkt->opcode; + qp->resp.status = IB_WC_SUCCESS; + return RESPST_ACKNOWLEDGE; +} #else - return RESPST_ERR_UNSUPPORTED_OPCODE; +static enum resp_states do_atomic_write(struct rxe_qp *qp, + struct rxe_pkt_info *pkt) +{ + return RESPST_ERR_UNSUPPORTED_OPCODE; +} #endif /* CONFIG_64BIT */ + +static enum resp_states atomic_write_reply(struct rxe_qp *qp, + struct rxe_pkt_info *pkt) +{ + struct resp_res *res = qp->resp.res; + + if (!res) { + res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK); + qp->resp.res = res; } - return RESPST_ACKNOWLEDGE; + if (res->replay) + return RESPST_ACKNOWLEDGE; + return do_atomic_write(qp, pkt); } static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, -- 2.38.2 ^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH v7 7/8] RDMA/rxe: Implement atomic write completion 2022-12-01 14:39 [PATCH v7 5/8] RDMA/rxe: Make requester support atomic write on RC service Xiao Yang 2022-12-01 14:39 ` [PATCH v7 6/8] RDMA/rxe: Make responder " Xiao Yang @ 2022-12-01 14:39 ` Xiao Yang 2022-12-01 14:39 ` [PATCH v7 8/8] RDMA/rxe: Enable atomic write capability for rxe device Xiao Yang 2 siblings, 0 replies; 6+ messages in thread From: Xiao Yang @ 2022-12-01 14:39 UTC (permalink / raw) To: linux-rdma, jgg, rpearsonhpe Cc: leon, lizhijian, y-goto, zyjzyj2000, Xiao Yang Generate an atomic write completion when the atomic write request has been finished. Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com> --- drivers/infiniband/sw/rxe/rxe_comp.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index 4dca4f8bbb5a..1c525325e271 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -104,6 +104,7 @@ static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode) case IB_WR_LOCAL_INV: return IB_WC_LOCAL_INV; case IB_WR_REG_MR: return IB_WC_REG_MR; case IB_WR_BIND_MW: return IB_WC_BIND_MW; + case IB_WR_ATOMIC_WRITE: return IB_WC_ATOMIC_WRITE; default: return 0xff; @@ -269,6 +270,9 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, if ((syn & AETH_TYPE_MASK) != AETH_ACK) return COMPST_ERROR; + if (wqe->wr.opcode == IB_WR_ATOMIC_WRITE) + return COMPST_WRITE_SEND; + fallthrough; /* (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE doesn't have an AETH) */ -- 2.34.1 ^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH v7 8/8] RDMA/rxe: Enable atomic write capability for rxe device 2022-12-01 14:39 [PATCH v7 5/8] RDMA/rxe: Make requester support atomic write on RC service Xiao Yang 2022-12-01 14:39 ` [PATCH v7 6/8] RDMA/rxe: Make responder " Xiao Yang 2022-12-01 14:39 ` [PATCH v7 7/8] RDMA/rxe: Implement atomic write completion Xiao Yang @ 2022-12-01 14:39 ` Xiao Yang 2 siblings, 0 replies; 6+ messages in thread From: Xiao Yang @ 2022-12-01 14:39 UTC (permalink / raw) To: linux-rdma, jgg, rpearsonhpe Cc: leon, lizhijian, y-goto, zyjzyj2000, Xiao Yang The capability shows that rxe device supports atomic write operation. Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com> --- drivers/infiniband/sw/rxe/rxe_param.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h index 86c7a8bf3cbb..bbc88cd71d95 100644 --- a/drivers/infiniband/sw/rxe/rxe_param.h +++ b/drivers/infiniband/sw/rxe/rxe_param.h @@ -51,7 +51,12 @@ enum rxe_device_param { | IB_DEVICE_SRQ_RESIZE | IB_DEVICE_MEM_MGT_EXTENSIONS | IB_DEVICE_MEM_WINDOW +#ifdef CONFIG_64BIT + | IB_DEVICE_MEM_WINDOW_TYPE_2B + | IB_DEVICE_ATOMIC_WRITE, +#else | IB_DEVICE_MEM_WINDOW_TYPE_2B, +#endif /* CONFIG_64BIT */ RXE_MAX_SGE = 32, RXE_MAX_WQE_SIZE = sizeof(struct rxe_send_wqe) + sizeof(struct ib_sge) * RXE_MAX_SGE, -- 2.34.1 ^ permalink raw reply related [flat|nested] 6+ messages in thread
end of thread, other threads:[~2022-12-15 15:32 UTC | newest] Thread overview: 6+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2022-12-01 14:39 [PATCH v7 5/8] RDMA/rxe: Make requester support atomic write on RC service Xiao Yang 2022-12-01 14:39 ` [PATCH v7 6/8] RDMA/rxe: Make responder " Xiao Yang 2022-12-15 15:19 ` Guenter Roeck 2022-12-15 15:32 ` Jason Gunthorpe 2022-12-01 14:39 ` [PATCH v7 7/8] RDMA/rxe: Implement atomic write completion Xiao Yang 2022-12-01 14:39 ` [PATCH v7 8/8] RDMA/rxe: Enable atomic write capability for rxe device Xiao Yang
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).