public inbox for linux-bcache@vger.kernel.org
 help / color / mirror / Atom feed
From: Qiaowei Ren <qiaowei.ren@intel.com>
To: Coly Li <colyli@suse.de>
Cc: Qiaowei Ren <qiaowei.ren@intel.com>,
	Jianpeng Ma <jianpeng.ma@intel.com>,
	linux-bcache@vger.kernel.org
Subject: [PATCH 7/8] bcache: persist owner info when alloc/free pages.
Date: Wed, 23 Dec 2020 09:41:35 -0500	[thread overview]
Message-ID: <20201223144136.24966-8-qiaowei.ren@intel.com> (raw)
In-Reply-To: <20201223144136.24966-1-qiaowei.ren@intel.com>

This patch implement persist owner info on nvdimm device
when alloc/free pages.

Signed-off-by: Jianpeng Ma <jianpeng.ma@intel.com>
Signed-off-by: Qiaowei Ren <qiaowei.ren@intel.com>
---
 drivers/md/bcache/nvm-pages.c | 86 +++++++++++++++++++++++++++++++++++
 1 file changed, 86 insertions(+)

diff --git a/drivers/md/bcache/nvm-pages.c b/drivers/md/bcache/nvm-pages.c
index e8765b0b3398..ba1ff0582b20 100644
--- a/drivers/md/bcache/nvm-pages.c
+++ b/drivers/md/bcache/nvm-pages.c
@@ -197,6 +197,17 @@ static struct nvm_namespace *find_nvm_by_addr(void *addr, int order)
 	return NULL;
 }
 
+static void init_pgalloc_recs(struct nvm_pgalloc_recs *recs, const char *owner_uuid)
+{
+	memset(recs, 0, sizeof(struct nvm_pgalloc_recs));
+	memcpy(recs->owner_uuid, owner_uuid, 16);
+}
+
+static pgoff_t vaddr_to_nvm_pgoff(struct nvm_namespace *ns, void *kaddr)
+{
+	return (kaddr - ns->kaddr - ns->pages_offset) / PAGE_SIZE;
+}
+
 static int remove_extent(struct nvm_alloced_recs *alloced_recs, void *addr, int order)
 {
 	struct list_head *list = alloced_recs->extent_head.next;
@@ -275,6 +286,77 @@ static void __free_space(struct nvm_namespace *ns, void *addr, int order)
 	ns->free += add_pages;
 }
 
+#define RECS_LEN (sizeof(struct nvm_pgalloc_recs))
+
+static void write_owner_info(void)
+{
+	struct owner_list *owner_list;
+	struct nvm_pages_owner_head *owner_head;
+	struct nvm_pgalloc_recs *recs;
+	struct extent *extent;
+	struct nvm_namespace *ns = only_set->nss[0];
+	struct owner_list_head *owner_list_head;
+	bool update_owner = false;
+	u64 recs_pos = NVM_PAGES_SYS_RECS_HEAD_OFFSET;
+	struct list_head *list;
+	int i, j;
+
+	owner_list_head = kzalloc(sizeof(struct owner_list_head), GFP_KERNEL);
+	recs = kmalloc(RECS_LEN, GFP_KERNEL);
+
+	// in-memory owner maybe not contain alloced-pages.
+	for (i = 0; i < only_set->owner_list_size; i++) {
+		owner_head = &owner_list_head->heads[owner_list_head->size];
+		owner_list = only_set->owner_lists[i];
+
+		for (j = 0; j < only_set->total_namespaces_nr; j++) {
+			struct nvm_alloced_recs *extents = owner_list->alloced_recs[j];
+
+			if (!extents || !extents->size)
+				continue;
+
+			init_pgalloc_recs(recs, owner_list->owner_uuid);
+
+			BUG_ON(recs_pos >= NVM_PAGES_OFFSET);
+			owner_head->recs[j] = (struct nvm_pgalloc_recs *)recs_pos;
+
+			for (list = extents->extent_head.next;
+				list != &extents->extent_head;
+				list = list->next) {
+				extent = container_of(list, struct extent, list);
+
+				if (recs->size == MAX_RECORD) {
+					BUG_ON(recs_pos >= NVM_PAGES_OFFSET);
+					recs->next =
+						(struct nvm_pgalloc_recs *)(recs_pos + RECS_LEN);
+					memcpy_flushcache(ns->kaddr + recs_pos, recs, RECS_LEN);
+					init_pgalloc_recs(recs, owner_list->owner_uuid);
+					recs_pos += RECS_LEN;
+				}
+
+				recs->recs[recs->size].pgoff =
+					vaddr_to_nvm_pgoff(only_set->nss[j], extent->kaddr);
+				recs->recs[recs->size].nr = extent->nr;
+				recs->size++;
+			}
+
+			update_owner = true;
+			memcpy_flushcache(ns->kaddr + recs_pos, recs, RECS_LEN);
+			recs_pos += sizeof(struct nvm_pgalloc_recs);
+		}
+
+		if (update_owner) {
+			memcpy(owner_head->uuid, owner_list->owner_uuid, 16);
+			owner_list_head->size++;
+			update_owner = false;
+		}
+	}
+
+	memcpy_flushcache(ns->kaddr + NVM_PAGES_OWNER_LIST_HEAD_OFFSET,
+				(void *)owner_list_head, sizeof(struct owner_list_head));
+	kfree(owner_list_head);
+}
+
 void nvm_free_pages(void *addr, int order, const char *owner_uuid)
 {
 	struct nvm_namespace *ns;
@@ -309,6 +391,7 @@ void nvm_free_pages(void *addr, int order, const char *owner_uuid)
 	}
 
 	__free_space(ns, addr, order);
+	write_owner_info();
 
 unlock:
 	mutex_unlock(&only_set->lock);
@@ -368,7 +451,10 @@ void *nvm_alloc_pages(int order, const char *owner_uuid)
 		}
 	}
 
+	if (kaddr)
+		write_owner_info();
 	mutex_unlock(&only_set->lock);
+
 	return kaddr;
 }
 EXPORT_SYMBOL_GPL(nvm_alloc_pages);
-- 
2.17.1


  parent reply	other threads:[~2020-12-23  7:01 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-23 14:41 [RFC PATCH 0/8] nvm page allocator for bcache Qiaowei Ren
2020-12-23 14:41 ` [PATCH 1/8] bcache: add initial data structures for nvm pages Qiaowei Ren
2020-12-23 14:41 ` [PATCH 2/8] bcache: initialize the nvm pages allocator Qiaowei Ren
2020-12-23 14:41 ` [PATCH 3/8] bcache: initialization of the buddy Qiaowei Ren
2020-12-23 14:41 ` [PATCH 4/8] bcache: nvm_alloc_pages() " Qiaowei Ren
2020-12-23 14:41 ` [PATCH 5/8] bcache: nvm_free_pages() " Qiaowei Ren
2020-12-23 14:41 ` [PATCH 6/8] bcache: get allocated pages from specific owner Qiaowei Ren
2020-12-23 14:41 ` Qiaowei Ren [this message]
2020-12-23 14:41 ` [PATCH 8/8] bcache: testing module for nvm pages allocator Qiaowei Ren

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201223144136.24966-8-qiaowei.ren@intel.com \
    --to=qiaowei.ren@intel.com \
    --cc=colyli@suse.de \
    --cc=jianpeng.ma@intel.com \
    --cc=linux-bcache@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox