From: "Philippe Mathieu-Daudé" <philmd@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Fam Zheng" <fam@euphon.net>,
qemu-ppc@nongnu.org, "Paolo Bonzini" <pbonzini@redhat.com>,
"Philippe Mathieu-Daudé" <philmd@redhat.com>,
"David Gibson" <david@gibson.dropbear.id.au>
Subject: [PATCH 4/5] hw/scsi/spapr_vscsi: Introduce req_ui() helper
Date: Wed, 4 Mar 2020 16:33:10 +0100 [thread overview]
Message-ID: <20200304153311.22959-5-philmd@redhat.com> (raw)
In-Reply-To: <20200304153311.22959-1-philmd@redhat.com>
Introduce the req_ui() helper which returns a pointer to
the viosrp_iu union held in the vscsi_req structure.
This simplifies the next patch.
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
hw/scsi/spapr_vscsi.c | 40 ++++++++++++++++++++++++----------------
1 file changed, 24 insertions(+), 16 deletions(-)
diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c
index 3cb5a38181..f1a0bbdc31 100644
--- a/hw/scsi/spapr_vscsi.c
+++ b/hw/scsi/spapr_vscsi.c
@@ -97,6 +97,12 @@ typedef struct {
vscsi_req reqs[VSCSI_REQ_LIMIT];
} VSCSIState;
+static union viosrp_iu *req_iu(vscsi_req *req)
+{
+ return (union viosrp_iu *)req->iu.srp.reserved;
+}
+
+
static struct vscsi_req *vscsi_get_req(VSCSIState *s)
{
vscsi_req *req;
@@ -121,7 +127,7 @@ static struct vscsi_req *vscsi_find_req(VSCSIState *s, uint64_t srp_tag)
for (i = 0; i < VSCSI_REQ_LIMIT; i++) {
req = &s->reqs[i];
- if (req->iu.srp.cmd.tag == srp_tag) {
+ if (req_iu(req)->srp.cmd.tag == srp_tag) {
return req;
}
}
@@ -188,7 +194,7 @@ static int vscsi_send_iu(VSCSIState *s, vscsi_req *req,
req->crq.s.reserved = 0x00;
req->crq.s.timeout = cpu_to_be16(0x0000);
req->crq.s.IU_length = cpu_to_be16(length);
- req->crq.s.IU_data_ptr = req->iu.srp.rsp.tag; /* right byte order */
+ req->crq.s.IU_data_ptr = req_iu(req)->srp.rsp.tag; /* right byte order */
if (rc == 0) {
req->crq.s.status = VIOSRP_OK;
@@ -224,7 +230,7 @@ static void vscsi_makeup_sense(VSCSIState *s, vscsi_req *req,
static int vscsi_send_rsp(VSCSIState *s, vscsi_req *req,
uint8_t status, int32_t res_in, int32_t res_out)
{
- union viosrp_iu *iu = &req->iu;
+ union viosrp_iu *iu = req_iu(req);
uint64_t tag = iu->srp.rsp.tag;
int total_len = sizeof(iu->srp.rsp);
uint8_t sol_not = iu->srp.cmd.sol_not;
@@ -285,7 +291,7 @@ static int vscsi_fetch_desc(VSCSIState *s, struct vscsi_req *req,
unsigned n, unsigned buf_offset,
struct srp_direct_buf *ret)
{
- struct srp_cmd *cmd = &req->iu.srp.cmd;
+ struct srp_cmd *cmd = &req_iu(req)->srp.cmd;
switch (req->dma_fmt) {
case SRP_NO_DATA_DESC: {
@@ -473,7 +479,7 @@ static int data_out_desc_size(struct srp_cmd *cmd)
static int vscsi_preprocess_desc(vscsi_req *req)
{
- struct srp_cmd *cmd = &req->iu.srp.cmd;
+ struct srp_cmd *cmd = &req_iu(req)->srp.cmd;
req->cdb_offset = cmd->add_cdb_len & ~3;
@@ -655,7 +661,7 @@ static void *vscsi_load_request(QEMUFile *f, SCSIRequest *sreq)
static void vscsi_process_login(VSCSIState *s, vscsi_req *req)
{
- union viosrp_iu *iu = &req->iu;
+ union viosrp_iu *iu = req_iu(req);
struct srp_login_rsp *rsp = &iu->srp.login_rsp;
uint64_t tag = iu->srp.rsp.tag;
@@ -681,7 +687,7 @@ static void vscsi_process_login(VSCSIState *s, vscsi_req *req)
static void vscsi_inquiry_no_target(VSCSIState *s, vscsi_req *req)
{
- uint8_t *cdb = req->iu.srp.cmd.cdb;
+ uint8_t *cdb = req_iu(req)->srp.cmd.cdb;
uint8_t resp_data[36];
int rc, len, alen;
@@ -770,7 +776,7 @@ static void vscsi_report_luns(VSCSIState *s, vscsi_req *req)
static int vscsi_queue_cmd(VSCSIState *s, vscsi_req *req)
{
- union srp_iu *srp = &req->iu.srp;
+ union srp_iu *srp = &req_iu(req)->srp;
SCSIDevice *sdev;
int n, lun;
@@ -821,7 +827,7 @@ static int vscsi_queue_cmd(VSCSIState *s, vscsi_req *req)
static int vscsi_process_tsk_mgmt(VSCSIState *s, vscsi_req *req)
{
- union viosrp_iu *iu = &req->iu;
+ union viosrp_iu *iu = req_iu(req);
vscsi_req *tmpreq;
int i, lun = 0, resp = SRP_TSK_MGMT_COMPLETE;
SCSIDevice *d;
@@ -831,7 +837,8 @@ static int vscsi_process_tsk_mgmt(VSCSIState *s, vscsi_req *req)
fprintf(stderr, "vscsi_process_tsk_mgmt %02x\n",
iu->srp.tsk_mgmt.tsk_mgmt_func);
- d = vscsi_device_find(&s->bus, be64_to_cpu(req->iu.srp.tsk_mgmt.lun), &lun);
+ d = vscsi_device_find(&s->bus,
+ be64_to_cpu(req_iu(req)->srp.tsk_mgmt.lun), &lun);
if (!d) {
resp = SRP_TSK_MGMT_FIELDS_INVALID;
} else {
@@ -842,7 +849,7 @@ static int vscsi_process_tsk_mgmt(VSCSIState *s, vscsi_req *req)
break;
}
- tmpreq = vscsi_find_req(s, req->iu.srp.tsk_mgmt.task_tag);
+ tmpreq = vscsi_find_req(s, req_iu(req)->srp.tsk_mgmt.task_tag);
if (tmpreq && tmpreq->sreq) {
assert(tmpreq->sreq->hba_private);
scsi_req_cancel(tmpreq->sreq);
@@ -867,7 +874,8 @@ static int vscsi_process_tsk_mgmt(VSCSIState *s, vscsi_req *req)
for (i = 0; i < VSCSI_REQ_LIMIT; i++) {
tmpreq = &s->reqs[i];
- if (tmpreq->iu.srp.cmd.lun != req->iu.srp.tsk_mgmt.lun) {
+ if (req_iu(tmpreq)->srp.cmd.lun
+ != req_iu(req)->srp.tsk_mgmt.lun) {
continue;
}
if (!tmpreq->active || !tmpreq->sreq) {
@@ -911,7 +919,7 @@ static int vscsi_process_tsk_mgmt(VSCSIState *s, vscsi_req *req)
static int vscsi_handle_srp_req(VSCSIState *s, vscsi_req *req)
{
- union srp_iu *srp = &req->iu.srp;
+ union srp_iu *srp = &req_iu(req)->srp;
int done = 1;
uint8_t opcode = srp->rsp.opcode;
@@ -948,7 +956,7 @@ static int vscsi_send_adapter_info(VSCSIState *s, vscsi_req *req)
struct mad_adapter_info_data info;
int rc;
- sinfo = &req->iu.mad.adapter_info;
+ sinfo = &req_iu(req)->mad.adapter_info;
#if 0 /* What for ? */
rc = spapr_vio_dma_read(&s->vdev, be64_to_cpu(sinfo->buffer),
@@ -984,7 +992,7 @@ static int vscsi_send_capabilities(VSCSIState *s, vscsi_req *req)
uint64_t buffer;
int rc;
- vcap = &req->iu.mad.capabilities;
+ vcap = &req_iu(req)->mad.capabilities;
req_len = len = be16_to_cpu(vcap->common.length);
buffer = be64_to_cpu(vcap->buffer);
if (len > sizeof(cap)) {
@@ -1029,7 +1037,7 @@ static int vscsi_send_capabilities(VSCSIState *s, vscsi_req *req)
static int vscsi_handle_mad_req(VSCSIState *s, vscsi_req *req)
{
- union mad_iu *mad = &req->iu.mad;
+ union mad_iu *mad = &req_iu(req)->mad;
bool request_handled = false;
uint64_t retlen = 0;
--
2.21.1
next prev parent reply other threads:[~2020-03-04 15:35 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-03-04 15:33 [PATCH 0/5] hw/scsi/spapr_vscsi: Fix time bomb zero-length array use Philippe Mathieu-Daudé
2020-03-04 15:33 ` [PATCH 1/5] hw/scsi/viosrp: Add missing 'hw/scsi/srp.h' include Philippe Mathieu-Daudé
2020-03-05 0:39 ` David Gibson
2020-03-04 15:33 ` [PATCH 2/5] hw/scsi/spapr_vscsi: Use SRP_MAX_IU_LEN instead of sizeof flexible array Philippe Mathieu-Daudé
2020-03-05 0:40 ` David Gibson
2020-03-04 15:33 ` [PATCH 3/5] hw/scsi/spapr_vscsi: Simplify a bit Philippe Mathieu-Daudé
2020-03-05 0:40 ` David Gibson
2020-03-05 10:21 ` Greg Kurz
2020-03-04 15:33 ` Philippe Mathieu-Daudé [this message]
2020-03-05 0:41 ` [PATCH 4/5] hw/scsi/spapr_vscsi: Introduce req_ui() helper David Gibson
2020-03-05 0:42 ` David Gibson
2020-03-04 15:33 ` [PATCH 5/5] hw/scsi/spapr_vscsi: Do not mix SRP IU size with DMA buffer size Philippe Mathieu-Daudé
2020-03-05 0:43 ` David Gibson
2020-03-05 6:47 ` Philippe Mathieu-Daudé
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200304153311.22959-5-philmd@redhat.com \
--to=philmd@redhat.com \
--cc=david@gibson.dropbear.id.au \
--cc=fam@euphon.net \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).