public inbox for linux-bcache@vger.kernel.org
 help / color / mirror / Atom feed
From: Coly Li <colyli@suse.de>
To: Qiaowei Ren <qiaowei.ren@intel.com>
Cc: linux-bcache@vger.kernel.org, Jianpeng Ma <jianpeng.ma@intel.com>
Subject: Re: [RFC PATCH 8/8] bcache: testing module for nvm pages allocator
Date: Wed, 16 Dec 2020 18:53:22 +0800	[thread overview]
Message-ID: <2a0ef4fc-73b5-b2bf-53d2-b15479806020@suse.de> (raw)
In-Reply-To: <20201203105337.4592-9-qiaowei.ren@intel.com>

On 12/3/20 6:53 PM, Qiaowei Ren wrote:
> This patch creates the testing module for nvm pages allocator.
> Before this module is loaded, the super block needs to be writen
> into nvdimm device (like /dev/pmemX).
> 
> Signed-off-by: Jianpeng Ma <jianpeng.ma@intel.com>
> Signed-off-by: Qiaowei Ren <qiaowei.ren@intel.com>


The testing patch is OK for me. Although I will not submit it to
mainline kernel, but we do need it during development.

Thanks for this.

Coly Li


> ---
>  drivers/md/bcache/Kconfig    |   6 ++
>  drivers/md/bcache/Makefile   |   2 +
>  drivers/md/bcache/test-nvm.c | 117 +++++++++++++++++++++++++++++++++++
>  3 files changed, 125 insertions(+)
>  create mode 100644 drivers/md/bcache/test-nvm.c
> 
> diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
> index 448a99ce13b2..1e4f4ea2f1a0 100644
> --- a/drivers/md/bcache/Kconfig
> +++ b/drivers/md/bcache/Kconfig
> @@ -41,3 +41,9 @@ config BCACHE_NVM_PAGES
>  	depends on BCACHE
>  	help
>  	nvm pages allocator for bcache.
> +
> +config BCACHE_NVM_PAGES_TEST
> +       tristate "Testing for NVM pages"
> +       depends on BCACHE_NVM_PAGES
> +       help
> +       Testing module for NVM pages allocator.
> diff --git a/drivers/md/bcache/Makefile b/drivers/md/bcache/Makefile
> index 948e5ed2ca66..7b7d3535f4ef 100644
> --- a/drivers/md/bcache/Makefile
> +++ b/drivers/md/bcache/Makefile
> @@ -5,3 +5,5 @@ obj-$(CONFIG_BCACHE)	+= bcache.o
>  bcache-y		:= alloc.o bset.o btree.o closure.o debug.o extents.o\
>  	io.o journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\
>  	util.o writeback.o features.o nvm-pages.o
> +
> +obj-$(CONFIG_BCACHE_NVM_PAGES_TEST) += test-nvm.o
> diff --git a/drivers/md/bcache/test-nvm.c b/drivers/md/bcache/test-nvm.c
> new file mode 100644
> index 000000000000..28133ceaa8fd
> --- /dev/null
> +++ b/drivers/md/bcache/test-nvm.c
> @@ -0,0 +1,117 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +#include <linux/init.h>
> +#include <linux/kernel.h>
> +#include <linux/module.h>
> +#include <linux/slab.h>
> +#include <linux/crc32.h>
> +#include <linux/uuid.h>
> +#include <linux/prandom.h>
> +#include <linux/pagemap.h>
> +#include <linux/pfn_t.h>
> +#include "nvm-pages.h"
> +
> +static char *host = "NVDIMM device name";
> +module_param(host, charp, 0444);
> +
> +#define MAX_OWNER 10
> +
> +static pgoff_t vaddr_to_nvm_pgoff(struct nvm_namespace *ns, void *kaddr)
> +{
> +	return (kaddr - ns->kaddr - ns->pages_offset) / PAGE_SIZE;
> +}
> +
> +static void print_nvm_extent(struct nvm_alloced_recs *extents)
> +{
> +	struct list_head *list = extents->extent_head.next;
> +	struct nvm_namespace *ns = extents->ns;
> +	struct extent *e;
> +	pgoff_t pgoff;
> +
> +	while (list != &extents->extent_head) {
> +		e = container_of(list, struct extent, list);
> +		pgoff = vaddr_to_nvm_pgoff(ns, e->kaddr);
> +		pr_info(" [%ld ~ %u)", pgoff, e->nr);
> +		list = list->next;
> +	}
> +	pr_info("\n");
> +}
> +
> +static void print_owner_list_info(struct nvm_set *nvm_set, bool print_extent)
> +{
> +	struct owner_list *owner_list;
> +	struct nvm_alloced_recs *extents;
> +	int i, j;
> +
> +	for (i = 0; i < nvm_set->owner_list_size; i++) {
> +		owner_list = nvm_set->owner_lists[i];
> +		pr_info("owner uuid=%pU\n", owner_list->owner_uuid);
> +		for (j = 0; j < nvm_set->total_namespaces_nr; j++) {
> +			if (owner_list->alloced_recs[j]) {
> +				extents = owner_list->alloced_recs[j];
> +				pr_info("\t nvm uuid=%pU, allocated extents=%u\n",
> +					extents->ns->uuid, extents->size);
> +				if (print_extent)
> +					print_nvm_extent(extents);
> +			}
> +		}
> +	}
> +}
> +
> +static void test_case(struct nvm_set *nvm_set, char **owner_uuids)
> +{
> +	int i, order;
> +	void *addr[MAX_OWNER];
> +
> +	for (i = 0; i < MAX_OWNER; i++) {
> +		order = prandom_u32() % MAX_ORDER;
> +		addr[i] = nvm_alloc_pages(order, owner_uuids[i]);
> +	}
> +	print_owner_list_info(nvm_set, true);
> +	for (i = 0; i < MAX_OWNER; i++) {
> +		struct page *page = virt_to_page(addr[i]);
> +
> +		nvm_free_pages(addr[i], page->private, owner_uuids[i]);
> +	}
> +	print_owner_list_info(nvm_set, true);
> +}
> +
> +static int __init test_nvm_init(void)
> +{
> +	char **owner_uuids;
> +	struct nvm_set *nvm_set;
> +	int i, r = 0;
> +	struct nvm_namespace *ns = register_namespace(host);
> +
> +	pr_info("nvm pages test enter: %s\n", host);
> +	if (IS_ERR(ns)) {
> +		pr_info("failed to register namespace: %s\n", host);
> +		r = -EINVAL;
> +		goto err;
> +	}
> +
> +	owner_uuids = kcalloc(MAX_OWNER, sizeof(char *), GFP_KERNEL);
> +	for (i = 0; i < MAX_OWNER; i++) {
> +		owner_uuids[i] = kmalloc(16, GFP_KERNEL);
> +		generate_random_uuid(owner_uuids[i]);
> +	}
> +
> +	nvm_set = ns->nvm_set;
> +	test_case(nvm_set, owner_uuids);
> +
> +	for (i = 0; i < MAX_OWNER; i++)
> +		kfree(owner_uuids[i]);
> +	kfree(owner_uuids);
> +
> +err:
> +	return r;
> +}
> +module_init(test_nvm_init);
> +
> +static void __exit test_nvm_exit(void)
> +{
> +	pr_info("nvm pages test exit\n");
> +}
> +module_exit(test_nvm_exit);
> +
> +MODULE_LICENSE("GPL v2");
> 


      reply	other threads:[~2020-12-16 10:54 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-03 10:53 [RFC PATCH 0/8] nvm page allocator for bcache Qiaowei Ren
2020-12-03 10:53 ` [RFC PATCH 1/8] bcache: add initial data structures for nvm pages Qiaowei Ren
2020-12-03 10:53 ` [RFC PATCH 2/8] bcache: initialize the nvm pages allocator Qiaowei Ren
2020-12-07 14:17   ` Zhiqiang Liu
2020-12-08  1:50     ` Ma, Jianpeng
2020-12-16 10:17   ` Coly Li
2020-12-17  3:35     ` Ma, Jianpeng
2020-12-03 10:53 ` [RFC PATCH 3/8] bcache: initialization of the buddy Qiaowei Ren
2020-12-16 10:30   ` Coly Li
2020-12-17  3:36     ` Ma, Jianpeng
2020-12-03 10:53 ` [RFC PATCH 4/8] bcache: nvm_alloc_pages() " Qiaowei Ren
2020-12-16 10:44   ` Coly Li
2020-12-03 10:53 ` [RFC PATCH 5/8] bcache: nvm_free_pages() " Qiaowei Ren
2020-12-16 10:46   ` Coly Li
2020-12-03 10:53 ` [RFC PATCH 6/8] bcache: get allocated pages from specific owner Qiaowei Ren
2020-12-16 10:48   ` Coly Li
2020-12-03 10:53 ` [RFC PATCH 7/8] bcache: persist owner info when alloc/free pages Qiaowei Ren
2020-12-16 10:49   ` Coly Li
2020-12-17  3:38     ` Ma, Jianpeng
2020-12-03 10:53 ` [RFC PATCH 8/8] bcache: testing module for nvm pages allocator Qiaowei Ren
2020-12-16 10:53   ` Coly Li [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=2a0ef4fc-73b5-b2bf-53d2-b15479806020@suse.de \
    --to=colyli@suse.de \
    --cc=jianpeng.ma@intel.com \
    --cc=linux-bcache@vger.kernel.org \
    --cc=qiaowei.ren@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox