From: Jens Axboe <axboe@kernel.dk>
To: Coly Li <colyli@suse.de>
Cc: linux-bcache@vger.kernel.org, linux-block@vger.kernel.org,
Jianpeng Ma <jianpeng.ma@intel.com>,
kernel test robot <lkp@intel.com>,
Dan Carpenter <dan.carpenter@oracle.com>,
Qiaowei Ren <qiaowei.ren@intel.com>,
Christoph Hellwig <hch@lst.de>,
Dan Williams <dan.j.williams@intel.com>,
Hannes Reinecke <hare@suse.de>
Subject: Re: [PATCH v13 03/12] bcache: initialization of the buddy
Date: Sun, 12 Dec 2021 13:10:27 -0700 [thread overview]
Message-ID: <9e578d54-296d-813a-876f-45881ce5a1ba@kernel.dk> (raw)
In-Reply-To: <20211212170552.2812-4-colyli@suse.de>
> diff --git a/drivers/md/bcache/nvmpg.c b/drivers/md/bcache/nvmpg.c
> index b654bbbda03e..2b70ee4a6028 100644
> --- a/drivers/md/bcache/nvmpg.c
> +++ b/drivers/md/bcache/nvmpg.c
> @@ -50,6 +50,36 @@ unsigned long bch_nvmpg_ptr_to_offset(struct bch_nvmpg_ns *ns, void *ptr)
> return BCH_NVMPG_OFFSET(ns_id, offset);
> }
>
> +static struct page *bch_nvmpg_va_to_pg(void *addr)
> +{
> + return virt_to_page(addr);
> +}
What's the purpose of this helper?
> +static inline void reserve_nvmpg_pages(struct bch_nvmpg_ns *ns,
> + pgoff_t pgoff, u64 nr)
> +{
> + while (nr > 0) {
> + unsigned int num = nr > UINT_MAX ? UINT_MAX : nr;
Surely UINT_MAX isn't anywhere near a valid limit?
> @@ -76,10 +110,73 @@ static void release_nvmpg_set(struct bch_nvmpg_set *set)
> kfree(set);
> }
>
> +static int validate_recs(int ns_id,
> + struct bch_nvmpg_head *head,
> + struct bch_nvmpg_recs *recs)
> +{
> + if (memcmp(recs->magic, bch_nvmpg_recs_magic, 16)) {
> + pr_err("Invalid bch_nvmpg_recs magic\n");
> + return -EINVAL;
> + }
> +
> + if (memcmp(recs->uuid, head->uuid, 16)) {
> + pr_err("Invalid bch_nvmpg_recs uuid\n");
> + return -EINVAL;
> + }
> +
> + if (recs->head_offset !=
> + bch_nvmpg_ptr_to_offset(global_nvmpg_set->ns_tbl[ns_id], head)) {
> + pr_err("Invalid recs head_offset\n");
> + return -EINVAL;
> + }
Same comments here on the frivilous error messaging, other places in
this file too. Check all the other patches as well, please.
> /* Namespace 0 contains all meta data of the nvmpg allocation set */
> static int init_nvmpg_set_header(struct bch_nvmpg_ns *ns)
> {
> struct bch_nvmpg_set_header *set_header;
> + struct bch_nvmpg_recs *sys_recs;
> + int i, j, used = 0, rc = 0;
>
> if (ns->ns_id != 0) {
> pr_err("unexpected ns_id %u for first nvmpg namespace.\n",
> @@ -93,9 +190,83 @@ static int init_nvmpg_set_header(struct bch_nvmpg_ns *ns)
> global_nvmpg_set->set_header = set_header;
> global_nvmpg_set->heads_size = set_header->size;
> global_nvmpg_set->heads_used = set_header->used;
> +
> + /* Reserve the used space from buddy allocator */
> + reserve_nvmpg_pages(ns, 0, div_u64(ns->pages_offset, ns->page_size));
> +
> + sys_recs = ns->base_addr + BCH_NVMPG_SYSRECS_OFFSET;
> + for (i = 0; i < set_header->size; i++) {
> + struct bch_nvmpg_head *head;
> +
> + head = &set_header->heads[i];
> + if (head->state == BCH_NVMPG_HD_STAT_FREE)
> + continue;
> +
> + used++;
> + if (used > global_nvmpg_set->heads_size) {
> + pr_err("used heads %d > heads size %d.\n",
> + used, global_nvmpg_set->heads_size);
> + goto unlock;
> + }
> +
> + for (j = 0; j < BCH_NVMPG_NS_MAX; j++) {
> + struct bch_nvmpg_recs *recs;
> +
> + recs = bch_nvmpg_offset_to_ptr(head->recs_offset[j]);
> +
> + /* Iterate the recs list */
> + while (recs) {
> + rc = validate_recs(j, head, recs);
> + if (rc < 0)
> + goto unlock;
> +
> + rc = reserve_nvmpg_recs(recs);
> + if (rc < 0)
> + goto unlock;
> +
> + bitmap_set(ns->recs_bitmap, recs - sys_recs, 1);
> + recs = bch_nvmpg_offset_to_ptr(recs->next_offset);
> + }
> + }
> + }
> +unlock:
> mutex_unlock(&global_nvmpg_set->lock);
> + return rc;
> +}
>
> - return 0;
> +static void bch_nvmpg_init_free_space(struct bch_nvmpg_ns *ns)
> +{
> + unsigned int start, end, pages;
> + int i;
> + struct page *page;
> + pgoff_t pgoff_start;
> +
> + bitmap_for_each_clear_region(ns->pages_bitmap,
> + start, end, 0, ns->pages_total) {
> + pgoff_start = start;
> + pages = end - start;
> +
> + while (pages) {
> + void *addr;
> +
> + for (i = BCH_MAX_ORDER - 1; i >= 0; i--) {
> + if ((pgoff_start % (1L << i) == 0) &&
> + (pages >= (1L << i)))
> + break;
> + }
> +
> + addr = bch_nvmpg_pgoff_to_ptr(ns, pgoff_start);
> + page = bch_nvmpg_va_to_pg(addr);
> + set_page_private(page, i);
> + page->index = pgoff_start;
> + __SetPageBuddy(page);
> + list_add((struct list_head *)&page->zone_device_data,
> + &ns->free_area[i]);
> +
> + pgoff_start += 1L << i;
> + pages -= 1L << i;
> + }
> + }
> }
>
> static int attach_nvmpg_set(struct bch_nvmpg_ns *ns)
> @@ -200,7 +371,7 @@ struct bch_nvmpg_ns *bch_register_namespace(const char *dev_path)
> char buf[BDEVNAME_SIZE];
> struct block_device *bdev;
> pgoff_t pgoff;
> - int id, err;
> + int id, i, err;
> char *path;
> long dax_ret = 0;
>
> @@ -304,13 +475,48 @@ struct bch_nvmpg_ns *bch_register_namespace(const char *dev_path)
>
> mutex_init(&ns->lock);
>
> + /*
> + * parameters of bitmap_set/clear are unsigned int.
> + * Given currently size of nvm is far from exceeding this limit,
> + * so only add a WARN_ON message.
> + */
> + WARN_ON(BITS_TO_LONGS(ns->pages_total) > UINT_MAX);
Does this really need to be a WARN_ON()? Looks more like an -EINVAL
condition.
--
Jens Axboe
next prev parent reply other threads:[~2021-12-12 20:11 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-12-12 17:05 [PATCH v13 00/12] bcache for 5.17: enable NVDIMM for bcache journal Coly Li
2021-12-12 17:05 ` [PATCH v13 01/12] bcache: add initial data structures for nvm pages Coly Li
2021-12-12 17:05 ` [PATCH v13 02/12] bcache: initialize the nvm pages allocator Coly Li
2021-12-12 19:34 ` Jens Axboe
2021-12-28 5:29 ` Coly Li
2021-12-12 17:05 ` [PATCH v13 03/12] bcache: initialization of the buddy Coly Li
2021-12-12 20:10 ` Jens Axboe [this message]
2021-12-28 5:29 ` Coly Li
2021-12-15 16:20 ` Dan Carpenter
2021-12-28 5:12 ` Coly Li
2021-12-12 17:05 ` [PATCH v13 04/12] bcache: bch_nvmpg_alloc_pages() " Coly Li
2021-12-12 20:14 ` Jens Axboe
2021-12-28 5:29 ` Coly Li
2021-12-12 17:05 ` [PATCH v13 05/12] bcache: bch_nvmpg_free_pages() of the buddy allocator Coly Li
2021-12-12 20:16 ` Jens Axboe
2021-12-28 5:29 ` Coly Li
2022-02-21 12:36 ` yukuai (C)
2022-02-22 5:03 ` Ma, Jianpeng
2021-12-12 17:05 ` [PATCH v13 06/12] bcache: get recs list head for allocated pages by specific uuid Coly Li
2021-12-12 20:18 ` Jens Axboe
2021-12-12 17:05 ` [PATCH v13 07/12] bcache: use bucket index to set GC_MARK_METADATA for journal buckets in bch_btree_gc_finish() Coly Li
2021-12-12 17:05 ` [PATCH v13 08/12] bcache: add BCH_FEATURE_INCOMPAT_NVDIMM_META into incompat feature set Coly Li
2021-12-12 17:05 ` [PATCH v13 09/12] bcache: initialize bcache journal for NVDIMM meta device Coly Li
2021-12-12 17:05 ` [PATCH v13 10/12] bcache: support storing bcache journal into " Coly Li
2021-12-12 17:05 ` [PATCH v13 11/12] bcache: read jset from NVDIMM pages for journal replay Coly Li
2021-12-12 17:05 ` [PATCH v13 12/12] bcache: add sysfs interface register_nvdimm_meta to register NVDIMM meta device Coly Li
2021-12-12 20:20 ` [PATCH v13 00/12] bcache for 5.17: enable NVDIMM for bcache journal Jens Axboe
2021-12-28 5:29 ` Coly Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=9e578d54-296d-813a-876f-45881ce5a1ba@kernel.dk \
--to=axboe@kernel.dk \
--cc=colyli@suse.de \
--cc=dan.carpenter@oracle.com \
--cc=dan.j.williams@intel.com \
--cc=hare@suse.de \
--cc=hch@lst.de \
--cc=jianpeng.ma@intel.com \
--cc=linux-bcache@vger.kernel.org \
--cc=linux-block@vger.kernel.org \
--cc=lkp@intel.com \
--cc=qiaowei.ren@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox