From: kernel test robot <lkp@intel.com>
To: "Mike Rapoport (IBM)" <rppt@kernel.org>
Cc: llvm@lists.linux.dev, oe-kbuild-all@lists.linux.dev,
Mike Rapoport <rppt@kernel.org>
Subject: [rppt:execmem/x86-rox/rfc-v2 22/26] mm/execmem.c:54:24: error: use of undeclared identifier 'PMD_SIZE'
Date: Wed, 8 May 2024 21:09:27 +0800 [thread overview]
Message-ID: <202405082149.P22IAtVv-lkp@intel.com> (raw)
tree: https://git.kernel.org/pub/scm/linux/kernel/git/rppt/linux.git execmem/x86-rox/rfc-v2
head: 4558f82d6dd794c972d26cbbd6b2f9d5a18dc472
commit: 4955fdf4d8459ab94ae656dcb23f94c375388f21 [22/26] execmem: add support for cache of large ROX pages
config: arm-randconfig-002-20240508 (https://download.01.org/0day-ci/archive/20240508/202405082149.P22IAtVv-lkp@intel.com/config)
compiler: clang version 19.0.0git (https://github.com/llvm/llvm-project 0ab4458df0688955620b72cc2c72a32dffad3615)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240508/202405082149.P22IAtVv-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202405082149.P22IAtVv-lkp@intel.com/
All errors (new ones prefixed by >>):
In file included from mm/execmem.c:9:
In file included from include/linux/mm.h:2208:
include/linux/vmstat.h:522:36: warning: arithmetic between different enumeration types ('enum node_stat_item' and 'enum lru_list') [-Wenum-enum-conversion]
522 | return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
| ~~~~~~~~~~~ ^ ~~~
>> mm/execmem.c:54:24: error: use of undeclared identifier 'PMD_SIZE'
54 | if (IS_ALIGNED(size, PMD_SIZE) && IS_ALIGNED(mas.index, PMD_SIZE)) {
| ^
mm/execmem.c:54:59: error: use of undeclared identifier 'PMD_SIZE'
54 | if (IS_ALIGNED(size, PMD_SIZE) && IS_ALIGNED(mas.index, PMD_SIZE)) {
| ^
mm/execmem.c:88:11: error: use of undeclared identifier 'PMD_SIZE'
88 | align = PMD_SIZE;
| ^
mm/execmem.c:228:30: error: use of undeclared identifier 'PMD_SIZE'
228 | alloc_size = round_up(size, PMD_SIZE);
| ^
mm/execmem.c:244:2: error: call to undeclared function 'flush_tlb_kernel_range'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
244 | flush_tlb_kernel_range(start, end);
| ^
>> mm/execmem.c:249:12: error: use of undeclared identifier 'PMD_SHIFT'
249 | PMD_SHIFT);
| ^
1 warning and 6 errors generated.
vim +/PMD_SIZE +54 mm/execmem.c
> 9 #include <linux/mm.h>
10 #include <linux/mutex.h>
11 #include <linux/vmalloc.h>
12 #include <linux/execmem.h>
13 #include <linux/maple_tree.h>
14 #include <linux/moduleloader.h>
15 #include <linux/text-patching.h>
16
17 #include <asm/tlbflush.h>
18
19 #include "internal.h"
20
21 static struct execmem_info *execmem_info __ro_after_init;
22 static struct execmem_info default_execmem_info __ro_after_init;
23
24 struct execmem_cache {
25 struct mutex mutex;
26 struct maple_tree busy_areas;
27 struct maple_tree free_areas;
28 };
29
30 static struct execmem_cache execmem_cache = {
31 .mutex = __MUTEX_INITIALIZER(execmem_cache.mutex),
32 .busy_areas = MTREE_INIT_EXT(busy_areas, MT_FLAGS_LOCK_EXTERN,
33 execmem_cache.mutex),
34 .free_areas = MTREE_INIT_EXT(free_areas, MT_FLAGS_LOCK_EXTERN,
35 execmem_cache.mutex),
36 };
37
38 static void execmem_cache_clean(struct work_struct *work)
39 {
40 struct maple_tree *free_areas = &execmem_cache.free_areas;
41 struct mutex *mutex = &execmem_cache.mutex;
42 MA_STATE(mas, free_areas, 0, ULONG_MAX);
43 void *area;
44
45 mutex_lock(mutex);
46 mas_for_each(&mas, area, ULONG_MAX) {
47 size_t size;
48
49 if (!xa_is_value(area))
50 continue;
51
52 size = xa_to_value(area);
53
> 54 if (IS_ALIGNED(size, PMD_SIZE) && IS_ALIGNED(mas.index, PMD_SIZE)) {
55 void *ptr = (void *)mas.index;
56
57 mas_erase(&mas);
58 vfree(ptr);
59 }
60 }
61 mutex_unlock(mutex);
62 }
63
64 static DECLARE_WORK(execmem_cache_clean_work, execmem_cache_clean);
65
66 static void execmem_fill_trapping_insns(void *ptr, size_t size, bool writable)
67 {
68 if (execmem_info->fill_trapping_insns)
69 execmem_info->fill_trapping_insns(ptr, size, writable);
70 else
71 memset(ptr, 0, size);
72 }
73
74 static void *execmem_vmalloc(struct execmem_range *range, size_t size,
75 pgprot_t pgprot, unsigned long vm_flags)
76 {
77 bool kasan = range->flags & EXECMEM_KASAN_SHADOW;
78 gfp_t gfp_flags = GFP_KERNEL | __GFP_NOWARN;
79 unsigned int align = range->alignment;
80 unsigned long start = range->start;
81 unsigned long end = range->end;
82 void *p;
83
84 if (kasan)
85 vm_flags |= VM_DEFER_KMEMLEAK;
86
87 if (vm_flags & VM_ALLOW_HUGE_VMAP)
> 88 align = PMD_SIZE;
89
90 p = __vmalloc_node_range(size, align, start, end, gfp_flags, pgprot,
91 vm_flags, NUMA_NO_NODE,
92 __builtin_return_address(0));
93 if (!p && range->fallback_start) {
94 start = range->fallback_start;
95 end = range->fallback_end;
96 p = __vmalloc_node_range(size, align, start, end, gfp_flags,
97 pgprot, vm_flags, NUMA_NO_NODE,
98 __builtin_return_address(0));
99 }
100
101 if (!p) {
102 pr_warn_ratelimited("execmem: unable to allocate memory\n");
103 return NULL;
104 }
105
106 if (kasan && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) {
107 vfree(p);
108 return NULL;
109 }
110
111 return p;
112 }
113
114 static int execmem_cache_add(void *ptr, size_t size)
115 {
116 struct maple_tree *free_areas = &execmem_cache.free_areas;
117 struct mutex *mutex = &execmem_cache.mutex;
118 unsigned long addr = (unsigned long)ptr;
119 MA_STATE(mas, free_areas, addr - 1, addr + 1);
120 unsigned long lower, lower_size = 0;
121 unsigned long upper, upper_size = 0;
122 unsigned long area_size;
123 void *area = NULL;
124 int err;
125
126 lower = addr;
127 upper = addr + size - 1;
128
129 mutex_lock(mutex);
130 area = mas_walk(&mas);
131 if (area && xa_is_value(area) && mas.last == addr - 1) {
132 lower = mas.index;
133 lower_size = xa_to_value(area);
134 }
135
136 area = mas_next(&mas, ULONG_MAX);
137 if (area && xa_is_value(area) && mas.index == addr + size) {
138 upper = mas.last;
139 upper_size = xa_to_value(area);
140 }
141
142 mas_set_range(&mas, lower, upper);
143 area_size = lower_size + upper_size + size;
144 err = mas_store_gfp(&mas, xa_mk_value(area_size), GFP_KERNEL);
145 mutex_unlock(mutex);
146 if (err)
147 return -ENOMEM;
148
149 return 0;
150 }
151
152 static bool within_range(struct execmem_range *range, struct ma_state *mas,
153 size_t size)
154 {
155 unsigned long addr = mas->index;
156
157 if (addr >= range->start && addr + size < range->end)
158 return true;
159
160 if (range->fallback_start &&
161 addr >= range->fallback_start && addr + size < range->fallback_end)
162 return true;
163
164 return false;
165 }
166
167 static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
168 {
169 struct maple_tree *free_areas = &execmem_cache.free_areas;
170 struct maple_tree *busy_areas = &execmem_cache.busy_areas;
171 MA_STATE(mas_free, free_areas, 0, ULONG_MAX);
172 MA_STATE(mas_busy, busy_areas, 0, ULONG_MAX);
173 struct mutex *mutex = &execmem_cache.mutex;
174 unsigned long addr, last, area_size = 0;
175 void *area, *ptr = NULL;
176 int err;
177
178 mutex_lock(mutex);
179 mas_for_each(&mas_free, area, ULONG_MAX) {
180 area_size = xa_to_value(area);
181
182 if (area_size >= size && within_range(range, &mas_free, size))
183 break;
184 }
185
186 if (area_size < size)
187 goto out_unlock;
188
189 addr = mas_free.index;
190 last = mas_free.last;
191
192 /* insert allocated size to busy_areas at range [addr, addr + size) */
193 mas_set_range(&mas_busy, addr, addr + size - 1);
194 err = mas_store_gfp(&mas_busy, xa_mk_value(size), GFP_KERNEL);
195 if (err)
196 goto out_unlock;
197
198 mas_erase(&mas_free);
199 if (area_size > size) {
200 /*
201 * re-insert remaining free size to free_areas at range
202 * [addr + size, last]
203 */
204 mas_set_range(&mas_free, addr + size, last);
205 size = area_size - size;
206 err = mas_store_gfp(&mas_free, xa_mk_value(size), GFP_KERNEL);
207 if (err) {
208 mas_erase(&mas_busy);
209 goto out_unlock;
210 }
211 }
212 ptr = (void *)addr;
213
214 out_unlock:
215 mutex_unlock(mutex);
216 return ptr;
217 }
218
219 static int execmem_cache_populate(struct execmem_range *range, size_t size)
220 {
221 unsigned long vm_flags = VM_FLUSH_RESET_PERMS | VM_ALLOW_HUGE_VMAP;
222 unsigned long start, end;
223 struct vm_struct *vm;
224 size_t alloc_size;
225 int err = -ENOMEM;
226 void *p;
227
228 alloc_size = round_up(size, PMD_SIZE);
229 p = execmem_vmalloc(range, alloc_size, PAGE_KERNEL, vm_flags);
230 if (!p)
231 return err;
232
233 vm = find_vm_area(p);
234 if (!vm)
235 goto err_free_mem;
236
237 /* fill memory with instructions that will trap */
238 execmem_fill_trapping_insns(p, alloc_size, /* writable = */ true);
239
240 start = (unsigned long)p;
241 end = start + alloc_size;
242
243 vunmap_range_noflush(start, end);
244 flush_tlb_kernel_range(start, end);
245
246 /* FIXME: handle direct map alias */
247
248 err = vmap_pages_range_noflush(start, end, range->pgprot, vm->pages,
> 249 PMD_SHIFT);
250 if (err)
251 goto err_free_mem;
252
253 err = execmem_cache_add(p, alloc_size);
254 if (err)
255 goto err_free_mem;
256
257 return 0;
258
259 err_free_mem:
260 vfree(p);
261 return err;
262 }
263
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
reply other threads:[~2024-05-08 13:09 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=202405082149.P22IAtVv-lkp@intel.com \
--to=lkp@intel.com \
--cc=llvm@lists.linux.dev \
--cc=oe-kbuild-all@lists.linux.dev \
--cc=rppt@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox