From: Dongsheng Yang <dongsheng.yang@linux.dev>
To: axboe@kernel.dk, dan.j.williams@intel.com,
gregory.price@memverge.com, John@groves.net,
Jonathan.Cameron@Huawei.com, bbhushan2@marvell.com,
chaitanyak@nvidia.com, rdunlap@infradead.org
Cc: linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-cxl@vger.kernel.org,
Dongsheng Yang <dongsheng.yang@linux.dev>
Subject: [PATCH v1 3/7] cbd: introduce cbd_segment
Date: Tue, 9 Jul 2024 13:03:39 +0000 [thread overview]
Message-ID: <20240709130343.858363-4-dongsheng.yang@linux.dev> (raw)
In-Reply-To: <20240709130343.858363-1-dongsheng.yang@linux.dev>
The `cbd_segments` is an abstraction of the data area in transport. The
data area in transport is divided into segments.
The specific use of this area is determined by `cbd_seg_type`. For example,
`cbd_blkdev` and `cbd_backend` data transfers
need to access a segment of the type `cbds_type_channel`.
The segment also allows for more scenarios and more segment types to be expanded.
Signed-off-by: Dongsheng Yang <dongsheng.yang@linux.dev>
---
drivers/block/cbd/cbd_segment.c | 108 ++++++++++++++++++++++++++++++++
1 file changed, 108 insertions(+)
create mode 100644 drivers/block/cbd/cbd_segment.c
diff --git a/drivers/block/cbd/cbd_segment.c b/drivers/block/cbd/cbd_segment.c
new file mode 100644
index 000000000000..855bfa473b4c
--- /dev/null
+++ b/drivers/block/cbd/cbd_segment.c
@@ -0,0 +1,108 @@
+#include "cbd_internal.h"
+
+static ssize_t cbd_seg_detail_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cbd_segment_device *segment;
+ struct cbd_segment_info *segment_info;
+
+ segment = container_of(dev, struct cbd_segment_device, dev);
+ segment_info = segment->segment_info;
+
+ if (segment_info->state == cbd_segment_state_none)
+ return 0;
+
+ if (segment_info->type == cbds_type_channel)
+ return cbd_channel_seg_detail_show((struct cbd_channel_info *)segment_info, buf);
+
+ return 0;
+}
+
+static ssize_t cbd_seg_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cbd_segment_device *segment;
+ struct cbd_segment_info *segment_info;
+
+ segment = container_of(dev, struct cbd_segment_device, dev);
+ segment_info = segment->segment_info;
+
+ if (segment_info->state == cbd_segment_state_none)
+ return 0;
+
+ return sprintf(buf, "%s\n", cbds_type_str(segment_info->type));
+}
+
+static DEVICE_ATTR(detail, 0400, cbd_seg_detail_show, NULL);
+static DEVICE_ATTR(type, 0400, cbd_seg_type_show, NULL);
+
+CBD_OBJ_HEARTBEAT(segment);
+
+static struct attribute *cbd_segment_attrs[] = {
+ &dev_attr_detail.attr,
+ &dev_attr_type.attr,
+ &dev_attr_alive.attr,
+ NULL
+};
+
+static struct attribute_group cbd_segment_attr_group = {
+ .attrs = cbd_segment_attrs,
+};
+
+static const struct attribute_group *cbd_segment_attr_groups[] = {
+ &cbd_segment_attr_group,
+ NULL
+};
+
+static void cbd_segment_release(struct device *dev)
+{
+}
+
+const struct device_type cbd_segment_type = {
+ .name = "cbd_segment",
+ .groups = cbd_segment_attr_groups,
+ .release = cbd_segment_release,
+};
+
+const struct device_type cbd_segments_type = {
+ .name = "cbd_segments",
+ .release = cbd_segment_release,
+};
+
+void cbd_segment_init(struct cbd_segment *segment, struct cbd_transport *cbdt, u32 seg_id)
+{
+ struct cbd_segment_info *segment_info = cbdt_get_segment_info(cbdt, seg_id);
+
+ segment->cbdt = cbdt;
+ segment->segment_info = segment_info;
+ segment->seg_id = seg_id;
+
+ segment_info->state = cbd_segment_state_running;
+
+ INIT_DELAYED_WORK(&segment->hb_work, segment_hb_workfn);
+ queue_delayed_work(cbd_wq, &segment->hb_work, 0);
+}
+
+void cbd_segment_exit(struct cbd_segment *segment)
+{
+ cancel_delayed_work_sync(&segment->hb_work);
+
+ segment->segment_info->state = cbd_segment_state_none;
+}
+
+int cbd_segment_clear(struct cbd_transport *cbdt, u32 seg_id)
+{
+ struct cbd_segment_info *segment_info;
+
+ segment_info = cbdt_get_segment_info(cbdt, seg_id);
+ if (cbd_segment_info_is_alive(segment_info)) {
+ cbdt_err(cbdt, "segment %u is still alive\n", seg_id);
+ return -EBUSY;
+ }
+
+ cbdt_zero_range(cbdt, segment_info, CBDT_SEG_SIZE);
+
+ return 0;
+}
--
2.34.1
next prev parent reply other threads:[~2024-07-09 13:04 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-07-09 13:03 [PATCH v1 0/7] Introduce CBD (CXL Block Device) Dongsheng Yang
2024-07-09 13:03 ` [PATCH v1 1/7] cbd: introduce cbd_transport Dongsheng Yang
2024-07-09 13:03 ` [PATCH v1 2/7] cbd: introduce cbd_host Dongsheng Yang
2024-07-09 13:03 ` Dongsheng Yang [this message]
2024-07-09 13:03 ` [PATCH v1 4/7] cbd: introduce cbd_channel Dongsheng Yang
2024-07-09 13:03 ` [PATCH v1 5/7] cbd: introduce cbd_blkdev Dongsheng Yang
2024-07-09 13:03 ` [PATCH v1 6/7] cbd: introduce cbd_backend Dongsheng Yang
2024-07-09 13:03 ` [PATCH v1 7/7] block: Init for CBD(CXL Block Device) module Dongsheng Yang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240709130343.858363-4-dongsheng.yang@linux.dev \
--to=dongsheng.yang@linux.dev \
--cc=John@groves.net \
--cc=Jonathan.Cameron@Huawei.com \
--cc=axboe@kernel.dk \
--cc=bbhushan2@marvell.com \
--cc=chaitanyak@nvidia.com \
--cc=dan.j.williams@intel.com \
--cc=gregory.price@memverge.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-cxl@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=rdunlap@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox