From: Qiaowei Ren <qiaowei.ren@intel.com>
To: Coly Li <colyli@suse.de>
Cc: linux-bcache@vger.kernel.org, Qiaowei Ren <qiaowei.ren@intel.com>,
Jianpeng Ma <jianpeng.ma@intel.com>
Subject: [RFC PATCH 5/8] bcache: nvm_free_pages() of the buddy
Date: Thu, 3 Dec 2020 05:53:34 -0500 [thread overview]
Message-ID: <20201203105337.4592-6-qiaowei.ren@intel.com> (raw)
In-Reply-To: <20201203105337.4592-1-qiaowei.ren@intel.com>
This patch implements the nvm_free_pages() of the buddy.
Signed-off-by: Jianpeng Ma <jianpeng.ma@intel.com>
Signed-off-by: Qiaowei Ren <qiaowei.ren@intel.com>
---
drivers/md/bcache/nvm-pages.c | 142 ++++++++++++++++++++++++++++++++++
drivers/md/bcache/nvm-pages.h | 2 +
2 files changed, 144 insertions(+)
diff --git a/drivers/md/bcache/nvm-pages.c b/drivers/md/bcache/nvm-pages.c
index 2cde62081c4f..16b65a866041 100644
--- a/drivers/md/bcache/nvm-pages.c
+++ b/drivers/md/bcache/nvm-pages.c
@@ -173,6 +173,148 @@ static void add_extent(struct nvm_alloced_recs *alloced_recs, void *addr, int or
}
}
+static inline void *nvm_end_addr(struct nvm_namespace *ns)
+{
+ return ns->kaddr + ns->pages_offset + (ns->pages_total << PAGE_SHIFT);
+}
+
+static inline bool in_nvm_range(struct nvm_namespace *ns,
+ void *start_addr, void *end_addr)
+{
+ return (start_addr >= ns->kaddr) && (end_addr <= nvm_end_addr(ns));
+}
+
+static struct nvm_namespace *find_nvm_by_addr(void *addr, int order)
+{
+ int i;
+ struct nvm_namespace *ns;
+
+ for (i = 0; i < only_set->total_namespaces_nr; i++) {
+ ns = only_set->nss[i];
+ if ((ns != NULL) && in_nvm_range(ns, addr, addr + (1 << order)))
+ return ns;
+ }
+ return NULL;
+}
+
+static int remove_extent(struct nvm_alloced_recs *alloced_recs, void *addr, int order)
+{
+ struct list_head *list = alloced_recs->extent_head.next;
+ struct extent *extent;
+ void *end_addr = addr + ((1 << order) << PAGE_SHIFT);
+
+ while (list != &alloced_recs->extent_head) {
+ extent = container_of(list, struct extent, list);
+
+ if (addr < extent->kaddr || end_addr > extent_end_addr(extent)) {
+ list = list->next;
+ continue;
+ }
+
+ if (addr == extent->kaddr) {
+ if (extent->nr == (1 << order)) {
+ list_del(list);
+ kfree(extent);
+ alloced_recs->size--;
+ } else {
+ extent->kaddr = end_addr;
+ extent->nr -= 1 << order;
+ }
+ } else {
+ if (extent_end_addr(extent) > end_addr) {
+ struct extent *e = kzalloc(sizeof(struct extent), GFP_KERNEL);
+
+ e->kaddr = end_addr;
+ e->nr = (pgoff_t)(extent_end_addr(extent) - end_addr) >> PAGE_SHIFT;
+ list_add(&e->list, list);
+ alloced_recs->size++;
+ }
+ extent->nr = (addr - extent->kaddr) >> PAGE_SHIFT;
+ }
+ break;
+ }
+ return (list == &alloced_recs->extent_head) ? -ENOENT : 0;
+}
+
+static void __free_space(struct nvm_namespace *ns, void *addr, int order)
+{
+ unsigned int add_pages = (1 << order);
+ pgoff_t pgoff;
+ struct page *page;
+
+ page = nvm_vaddr_to_page(ns, addr);
+ WARN_ON(page->private != order);
+ pgoff = page->index;
+
+ while (order < MAX_ORDER - 1) {
+ struct page *buddy_page;
+
+ pgoff_t buddy_pgoff = pgoff ^ (1 << order);
+ pgoff_t parent_pgoff = pgoff & ~(1 << order);
+
+ if ((parent_pgoff + (1 << (order + 1)) > ns->pages_total))
+ break;
+
+ buddy_page = nvm_vaddr_to_page(ns, nvm_pgoff_to_vaddr(ns, buddy_pgoff));
+
+ if (PageBuddy(buddy_page) && (buddy_page->private == order)) {
+ list_del((struct list_head *)&buddy_page->zone_device_data);
+ __ClearPageBuddy(buddy_page);
+ pgoff = parent_pgoff;
+ order++;
+ continue;
+ }
+ break;
+ }
+
+ page = nvm_vaddr_to_page(ns, nvm_pgoff_to_vaddr(ns, pgoff));
+ list_add((struct list_head *)&page->zone_device_data, &ns->free_area[order]);
+ page->index = pgoff;
+ page->private = order;
+ __SetPageBuddy(page);
+ ns->free += add_pages;
+}
+
+void nvm_free_pages(void *addr, int order, const char *owner_uuid)
+{
+ struct nvm_namespace *ns;
+ struct owner_list *owner_list;
+ struct nvm_alloced_recs *alloced_recs;
+ int r;
+
+ mutex_lock(&only_set->lock);
+
+ ns = find_nvm_by_addr(addr, order);
+ if (ns == NULL) {
+ pr_info("can't find nvm_dev by kaddr %p\n", addr);
+ goto unlock;
+ }
+
+ owner_list = find_owner_list(owner_uuid, false);
+ if (owner_list == NULL) {
+ pr_info("can't found owner(uuid=%s)\n", owner_uuid);
+ goto unlock;
+ }
+
+ alloced_recs = find_nvm_alloced_recs(owner_list, ns, false);
+ if (alloced_recs == NULL) {
+ pr_info("can't find alloced_recs(uuid=%s)\n", ns->uuid);
+ goto unlock;
+ }
+
+ r = remove_extent(alloced_recs, addr, order);
+ if (r < 0) {
+ pr_info("can't find extent\n");
+ goto unlock;
+ }
+
+ __free_space(ns, addr, order);
+
+unlock:
+ mutex_unlock(&only_set->lock);
+}
+EXPORT_SYMBOL_GPL(nvm_free_pages);
+
void *nvm_alloc_pages(int order, const char *owner_uuid)
{
void *kaddr = NULL;
diff --git a/drivers/md/bcache/nvm-pages.h b/drivers/md/bcache/nvm-pages.h
index 95b7fa4b7dd0..1e435ce0ddf4 100644
--- a/drivers/md/bcache/nvm-pages.h
+++ b/drivers/md/bcache/nvm-pages.h
@@ -78,6 +78,7 @@ extern int bch_nvm_init(void);
extern void bch_nvm_exit(void);
extern void *nvm_alloc_pages(int order, const char *owner_uuid);
+extern void nvm_free_pages(void *addr, int order, const char *owner_uuid);
#else
@@ -92,6 +93,7 @@ static inline int bch_nvm_init(void)
static inline void bch_nvm_exit(void) { }
static inline void *nvm_alloc_pages(int order, const char *owner_uuid) { }
+static inline void nvm_free_pages(void *addr, int order, const char *owner_uuid) { }
#endif /* CONFIG_BCACHE_NVM_PAGES */
--
2.17.1
next prev parent reply other threads:[~2020-12-03 3:11 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-12-03 10:53 [RFC PATCH 0/8] nvm page allocator for bcache Qiaowei Ren
2020-12-03 10:53 ` [RFC PATCH 1/8] bcache: add initial data structures for nvm pages Qiaowei Ren
2020-12-03 10:53 ` [RFC PATCH 2/8] bcache: initialize the nvm pages allocator Qiaowei Ren
2020-12-07 14:17 ` Zhiqiang Liu
2020-12-08 1:50 ` Ma, Jianpeng
2020-12-16 10:17 ` Coly Li
2020-12-17 3:35 ` Ma, Jianpeng
2020-12-03 10:53 ` [RFC PATCH 3/8] bcache: initialization of the buddy Qiaowei Ren
2020-12-16 10:30 ` Coly Li
2020-12-17 3:36 ` Ma, Jianpeng
2020-12-03 10:53 ` [RFC PATCH 4/8] bcache: nvm_alloc_pages() " Qiaowei Ren
2020-12-16 10:44 ` Coly Li
2020-12-03 10:53 ` Qiaowei Ren [this message]
2020-12-16 10:46 ` [RFC PATCH 5/8] bcache: nvm_free_pages() " Coly Li
2020-12-03 10:53 ` [RFC PATCH 6/8] bcache: get allocated pages from specific owner Qiaowei Ren
2020-12-16 10:48 ` Coly Li
2020-12-03 10:53 ` [RFC PATCH 7/8] bcache: persist owner info when alloc/free pages Qiaowei Ren
2020-12-16 10:49 ` Coly Li
2020-12-17 3:38 ` Ma, Jianpeng
2020-12-03 10:53 ` [RFC PATCH 8/8] bcache: testing module for nvm pages allocator Qiaowei Ren
2020-12-16 10:53 ` Coly Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201203105337.4592-6-qiaowei.ren@intel.com \
--to=qiaowei.ren@intel.com \
--cc=colyli@suse.de \
--cc=jianpeng.ma@intel.com \
--cc=linux-bcache@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox