* [PATCH 01/12] mm/shmem: add `const` to lots of pointer parameters
2025-08-29 18:31 [PATCH 00/12] mm: add `const` to lots of pointer parameters Max Kellermann
@ 2025-08-29 18:31 ` Max Kellermann
2025-08-29 20:44 ` Yuanchu Xie
2025-08-29 22:45 ` Vishal Moola (Oracle)
2025-08-29 18:31 ` [PATCH 02/12] include/pagemap.h: " Max Kellermann
` (10 subsequent siblings)
11 siblings, 2 replies; 30+ messages in thread
From: Max Kellermann @ 2025-08-29 18:31 UTC (permalink / raw)
To: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
Cc: Max Kellermann
For improved const-correctness.
Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
---
include/linux/mm.h | 8 ++++----
include/linux/shmem_fs.h | 4 ++--
mm/shmem.c | 6 +++---
3 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 00c8a54127d3..a40a3c42c904 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -979,11 +979,11 @@ static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
* The vma_is_shmem is not inline because it is used only by slow
* paths in userfault.
*/
-bool vma_is_shmem(struct vm_area_struct *vma);
-bool vma_is_anon_shmem(struct vm_area_struct *vma);
+bool vma_is_shmem(const struct vm_area_struct *vma);
+bool vma_is_anon_shmem(const struct vm_area_struct *vma);
#else
-static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
-static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; }
+static inline bool vma_is_shmem(const struct vm_area_struct *vma) { return false; }
+static inline bool vma_is_anon_shmem(const struct vm_area_struct *vma) { return false; }
#endif
int vma_is_stack_for_current(struct vm_area_struct *vma);
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 6d0f9c599ff7..0e47465ef0fd 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -99,9 +99,9 @@ extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts);
#ifdef CONFIG_SHMEM
-bool shmem_mapping(struct address_space *mapping);
+bool shmem_mapping(const struct address_space *mapping);
#else
-static inline bool shmem_mapping(struct address_space *mapping)
+static inline bool shmem_mapping(const struct address_space *mapping)
{
return false;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 13cc51df3893..2a4476e223a3 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -275,18 +275,18 @@ static const struct vm_operations_struct shmem_vm_ops;
static const struct vm_operations_struct shmem_anon_vm_ops;
static struct file_system_type shmem_fs_type;
-bool shmem_mapping(struct address_space *mapping)
+bool shmem_mapping(const struct address_space *mapping)
{
return mapping->a_ops == &shmem_aops;
}
EXPORT_SYMBOL_GPL(shmem_mapping);
-bool vma_is_anon_shmem(struct vm_area_struct *vma)
+bool vma_is_anon_shmem(const struct vm_area_struct *vma)
{
return vma->vm_ops == &shmem_anon_vm_ops;
}
-bool vma_is_shmem(struct vm_area_struct *vma)
+bool vma_is_shmem(const struct vm_area_struct *vma)
{
return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
}
--
2.47.2
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH 01/12] mm/shmem: add `const` to lots of pointer parameters
2025-08-29 18:31 ` [PATCH 01/12] mm/shmem: " Max Kellermann
@ 2025-08-29 20:44 ` Yuanchu Xie
2025-08-29 22:52 ` Vishal Moola (Oracle)
2025-08-29 22:45 ` Vishal Moola (Oracle)
1 sibling, 1 reply; 30+ messages in thread
From: Yuanchu Xie @ 2025-08-29 20:44 UTC (permalink / raw)
To: Max Kellermann
Cc: akpm, david, axelrasmussen, willy, hughd, mhocko, linux-kernel,
linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka, rppt, surenb
On Fri, Aug 29, 2025 at 1:32 PM Max Kellermann <max.kellermann@ionos.com> wrote:
>
> For improved const-correctness.
>
> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
Reviewed-by: Yuanchu Xie <yuanchu@google.com>
> ...
>-bool shmem_mapping(struct address_space *mapping)
>+bool shmem_mapping(const struct address_space *mapping)
>{
> return mapping->a_ops == &shmem_aops;
>}
>EXPORT_SYMBOL_GPL(shmem_mapping);
The exported symbol is being changed, but this doesn't seem like it
would break anything.
Appreciate the work. On a side note, Andrew previously mentioned[1]
making the actual parameter value const (which is different from
adding the const qualifier to the pointer). Longer function
readability would benefit from that, but it's IMO infeasible to do so
everywhere.
[1] https://lore.kernel.org/lkml/20250827144832.87d2f1692fe61325628710f4@linux-foundation.org/#r
Yuanchu
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH 01/12] mm/shmem: add `const` to lots of pointer parameters
2025-08-29 20:44 ` Yuanchu Xie
@ 2025-08-29 22:52 ` Vishal Moola (Oracle)
0 siblings, 0 replies; 30+ messages in thread
From: Vishal Moola (Oracle) @ 2025-08-29 22:52 UTC (permalink / raw)
To: Yuanchu Xie
Cc: Max Kellermann, akpm, david, axelrasmussen, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
On Fri, Aug 29, 2025 at 03:44:12PM -0500, Yuanchu Xie wrote:
> On Fri, Aug 29, 2025 at 1:32 PM Max Kellermann <max.kellermann@ionos.com> wrote:
> >
> > For improved const-correctness.
> >
> > Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
> Reviewed-by: Yuanchu Xie <yuanchu@google.com>
>
> Appreciate the work. On a side note, Andrew previously mentioned[1]
> making the actual parameter value const (which is different from
> adding the const qualifier to the pointer). Longer function
> readability would benefit from that, but it's IMO infeasible to do so
> everywhere.
+1
> [1] https://lore.kernel.org/lkml/20250827144832.87d2f1692fe61325628710f4@linux-foundation.org/#r
Imo the lack of these value const's isn't a blocker, but it'd be nice to
do that "For improved const-correctness". Especially when you're changing the
line anyways (see mapping_tagged() or folio_contains()).
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH 01/12] mm/shmem: add `const` to lots of pointer parameters
2025-08-29 18:31 ` [PATCH 01/12] mm/shmem: " Max Kellermann
2025-08-29 20:44 ` Yuanchu Xie
@ 2025-08-29 22:45 ` Vishal Moola (Oracle)
1 sibling, 0 replies; 30+ messages in thread
From: Vishal Moola (Oracle) @ 2025-08-29 22:45 UTC (permalink / raw)
To: Max Kellermann
Cc: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
On Fri, Aug 29, 2025 at 08:31:48PM +0200, Max Kellermann wrote:
> For improved const-correctness.
>
> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH 02/12] include/pagemap.h: add `const` to lots of pointer parameters
2025-08-29 18:31 [PATCH 00/12] mm: add `const` to lots of pointer parameters Max Kellermann
2025-08-29 18:31 ` [PATCH 01/12] mm/shmem: " Max Kellermann
@ 2025-08-29 18:31 ` Max Kellermann
2025-08-29 22:55 ` Vishal Moola (Oracle)
2025-08-29 18:31 ` [PATCH 03/12] include/mmzone.h: " Max Kellermann
` (9 subsequent siblings)
11 siblings, 1 reply; 30+ messages in thread
From: Max Kellermann @ 2025-08-29 18:31 UTC (permalink / raw)
To: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
Cc: Max Kellermann
For improved const-correctness.
Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
---
include/linux/pagemap.h | 52 ++++++++++++++++++++---------------------
1 file changed, 26 insertions(+), 26 deletions(-)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index c9ba69e02e3e..87411e7f2dba 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -140,7 +140,7 @@ static inline int inode_drain_writes(struct inode *inode)
return filemap_write_and_wait(inode->i_mapping);
}
-static inline bool mapping_empty(struct address_space *mapping)
+static inline bool mapping_empty(const struct address_space *mapping)
{
return xa_empty(&mapping->i_pages);
}
@@ -166,7 +166,7 @@ static inline bool mapping_empty(struct address_space *mapping)
* refcount and the referenced bit, which will be elevated or set in
* the process of adding new cache pages to an inode.
*/
-static inline bool mapping_shrinkable(struct address_space *mapping)
+static inline bool mapping_shrinkable(const struct address_space *mapping)
{
void *head;
@@ -265,7 +265,7 @@ static inline void mapping_clear_unevictable(struct address_space *mapping)
clear_bit(AS_UNEVICTABLE, &mapping->flags);
}
-static inline bool mapping_unevictable(struct address_space *mapping)
+static inline bool mapping_unevictable(const struct address_space *mapping)
{
return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
}
@@ -275,7 +275,7 @@ static inline void mapping_set_exiting(struct address_space *mapping)
set_bit(AS_EXITING, &mapping->flags);
}
-static inline int mapping_exiting(struct address_space *mapping)
+static inline int mapping_exiting(const struct address_space *mapping)
{
return test_bit(AS_EXITING, &mapping->flags);
}
@@ -285,7 +285,7 @@ static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
}
-static inline int mapping_use_writeback_tags(struct address_space *mapping)
+static inline int mapping_use_writeback_tags(const struct address_space *mapping)
{
return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
}
@@ -331,7 +331,7 @@ static inline void mapping_set_inaccessible(struct address_space *mapping)
set_bit(AS_INACCESSIBLE, &mapping->flags);
}
-static inline bool mapping_inaccessible(struct address_space *mapping)
+static inline bool mapping_inaccessible(const struct address_space *mapping)
{
return test_bit(AS_INACCESSIBLE, &mapping->flags);
}
@@ -341,18 +341,18 @@ static inline void mapping_set_writeback_may_deadlock_on_reclaim(struct address_
set_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
}
-static inline bool mapping_writeback_may_deadlock_on_reclaim(struct address_space *mapping)
+static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct address_space *mapping)
{
return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
}
-static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
+static inline gfp_t mapping_gfp_mask(const struct address_space *mapping)
{
return mapping->gfp_mask;
}
/* Restricts the given gfp_mask to what the mapping allows. */
-static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
+static inline gfp_t mapping_gfp_constraint(const struct address_space *mapping,
gfp_t gfp_mask)
{
return mapping_gfp_mask(mapping) & gfp_mask;
@@ -475,7 +475,7 @@ mapping_min_folio_order(const struct address_space *mapping)
}
static inline unsigned long
-mapping_min_folio_nrpages(struct address_space *mapping)
+mapping_min_folio_nrpages(const struct address_space *mapping)
{
return 1UL << mapping_min_folio_order(mapping);
}
@@ -495,7 +495,7 @@ mapping_min_folio_nrbytes(struct address_space *mapping)
* new folio to the page cache and need to know what index to give it,
* call this function.
*/
-static inline pgoff_t mapping_align_index(struct address_space *mapping,
+static inline pgoff_t mapping_align_index(const struct address_space *mapping,
pgoff_t index)
{
return round_down(index, mapping_min_folio_nrpages(mapping));
@@ -505,7 +505,7 @@ static inline pgoff_t mapping_align_index(struct address_space *mapping,
* Large folio support currently depends on THP. These dependencies are
* being worked on but are not yet fixed.
*/
-static inline bool mapping_large_folio_support(struct address_space *mapping)
+static inline bool mapping_large_folio_support(const struct address_space *mapping)
{
/* AS_FOLIO_ORDER is only reasonable for pagecache folios */
VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON,
@@ -520,7 +520,7 @@ static inline size_t mapping_max_folio_size(const struct address_space *mapping)
return PAGE_SIZE << mapping_max_folio_order(mapping);
}
-static inline int filemap_nr_thps(struct address_space *mapping)
+static inline int filemap_nr_thps(const struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
return atomic_read(&mapping->nr_thps);
@@ -934,7 +934,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
*
* Return: The index of the folio which follows this folio in the file.
*/
-static inline pgoff_t folio_next_index(struct folio *folio)
+static inline pgoff_t folio_next_index(const struct folio *folio)
{
return folio->index + folio_nr_pages(folio);
}
@@ -963,7 +963,7 @@ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
* e.g., shmem did not move this folio to the swap cache.
* Return: true or false.
*/
-static inline bool folio_contains(struct folio *folio, pgoff_t index)
+static inline bool folio_contains(const struct folio *folio, pgoff_t index)
{
VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio);
return index - folio->index < folio_nr_pages(folio);
@@ -1040,12 +1040,12 @@ static inline loff_t page_offset(struct page *page)
/*
* Get the offset in PAGE_SIZE (even for hugetlb folios).
*/
-static inline pgoff_t folio_pgoff(struct folio *folio)
+static inline pgoff_t folio_pgoff(const struct folio *folio)
{
return folio->index;
}
-static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
+static inline pgoff_t linear_page_index(const struct vm_area_struct *vma,
unsigned long address)
{
pgoff_t pgoff;
@@ -1466,7 +1466,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
* readahead_pos - The byte offset into the file of this readahead request.
* @rac: The readahead request.
*/
-static inline loff_t readahead_pos(struct readahead_control *rac)
+static inline loff_t readahead_pos(const struct readahead_control *rac)
{
return (loff_t)rac->_index * PAGE_SIZE;
}
@@ -1475,7 +1475,7 @@ static inline loff_t readahead_pos(struct readahead_control *rac)
* readahead_length - The number of bytes in this readahead request.
* @rac: The readahead request.
*/
-static inline size_t readahead_length(struct readahead_control *rac)
+static inline size_t readahead_length(const struct readahead_control *rac)
{
return rac->_nr_pages * PAGE_SIZE;
}
@@ -1484,7 +1484,7 @@ static inline size_t readahead_length(struct readahead_control *rac)
* readahead_index - The index of the first page in this readahead request.
* @rac: The readahead request.
*/
-static inline pgoff_t readahead_index(struct readahead_control *rac)
+static inline pgoff_t readahead_index(const struct readahead_control *rac)
{
return rac->_index;
}
@@ -1493,7 +1493,7 @@ static inline pgoff_t readahead_index(struct readahead_control *rac)
* readahead_count - The number of pages in this readahead request.
* @rac: The readahead request.
*/
-static inline unsigned int readahead_count(struct readahead_control *rac)
+static inline unsigned int readahead_count(const struct readahead_control *rac)
{
return rac->_nr_pages;
}
@@ -1502,12 +1502,12 @@ static inline unsigned int readahead_count(struct readahead_control *rac)
* readahead_batch_length - The number of bytes in the current batch.
* @rac: The readahead request.
*/
-static inline size_t readahead_batch_length(struct readahead_control *rac)
+static inline size_t readahead_batch_length(const struct readahead_control *rac)
{
return rac->_batch_count * PAGE_SIZE;
}
-static inline unsigned long dir_pages(struct inode *inode)
+static inline unsigned long dir_pages(const struct inode *inode)
{
return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
PAGE_SHIFT;
@@ -1521,8 +1521,8 @@ static inline unsigned long dir_pages(struct inode *inode)
* Return: the number of bytes in the folio up to EOF,
* or -EFAULT if the folio was truncated.
*/
-static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
- struct inode *inode)
+static inline ssize_t folio_mkwrite_check_truncate(const struct folio *folio,
+ const struct inode *inode)
{
loff_t size = i_size_read(inode);
pgoff_t index = size >> PAGE_SHIFT;
@@ -1553,7 +1553,7 @@ static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
* Return: The number of filesystem blocks covered by this folio.
*/
static inline
-unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
+unsigned int i_blocks_per_folio(const struct inode *inode, const struct folio *folio)
{
return folio_size(folio) >> inode->i_blkbits;
}
--
2.47.2
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH 02/12] include/pagemap.h: add `const` to lots of pointer parameters
2025-08-29 18:31 ` [PATCH 02/12] include/pagemap.h: " Max Kellermann
@ 2025-08-29 22:55 ` Vishal Moola (Oracle)
0 siblings, 0 replies; 30+ messages in thread
From: Vishal Moola (Oracle) @ 2025-08-29 22:55 UTC (permalink / raw)
To: Max Kellermann
Cc: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
On Fri, Aug 29, 2025 at 08:31:49PM +0200, Max Kellermann wrote:
> For improved const-correctness.
>
> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
> ---
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH 03/12] include/mmzone.h: add `const` to lots of pointer parameters
2025-08-29 18:31 [PATCH 00/12] mm: add `const` to lots of pointer parameters Max Kellermann
2025-08-29 18:31 ` [PATCH 01/12] mm/shmem: " Max Kellermann
2025-08-29 18:31 ` [PATCH 02/12] include/pagemap.h: " Max Kellermann
@ 2025-08-29 18:31 ` Max Kellermann
2025-08-29 22:56 ` Vishal Moola (Oracle)
2025-08-29 18:31 ` [PATCH 04/12] include/fs.h: add `const` to several " Max Kellermann
` (8 subsequent siblings)
11 siblings, 1 reply; 30+ messages in thread
From: Max Kellermann @ 2025-08-29 18:31 UTC (permalink / raw)
To: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
Cc: Max Kellermann
For improved const-correctness.
Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
---
include/linux/mmzone.h | 42 +++++++++++++++++++++---------------------
1 file changed, 21 insertions(+), 21 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index fe13ad175fed..3e16578767b0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1103,7 +1103,7 @@ static inline unsigned long promo_wmark_pages(const struct zone *z)
return wmark_pages(z, WMARK_PROMO);
}
-static inline unsigned long zone_managed_pages(struct zone *zone)
+static inline unsigned long zone_managed_pages(const struct zone *zone)
{
return (unsigned long)atomic_long_read(&zone->managed_pages);
}
@@ -1127,12 +1127,12 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
}
-static inline bool zone_is_initialized(struct zone *zone)
+static inline bool zone_is_initialized(const struct zone *zone)
{
return zone->initialized;
}
-static inline bool zone_is_empty(struct zone *zone)
+static inline bool zone_is_empty(const struct zone *zone)
{
return zone->spanned_pages == 0;
}
@@ -1272,7 +1272,7 @@ static inline bool folio_is_zone_movable(const struct folio *folio)
* Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
* intersection with the given zone
*/
-static inline bool zone_intersects(struct zone *zone,
+static inline bool zone_intersects(const struct zone *zone,
unsigned long start_pfn, unsigned long nr_pages)
{
if (zone_is_empty(zone))
@@ -1580,12 +1580,12 @@ static inline int local_memory_node(int node_id) { return node_id; };
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
#ifdef CONFIG_ZONE_DEVICE
-static inline bool zone_is_zone_device(struct zone *zone)
+static inline bool zone_is_zone_device(const struct zone *zone)
{
return zone_idx(zone) == ZONE_DEVICE;
}
#else
-static inline bool zone_is_zone_device(struct zone *zone)
+static inline bool zone_is_zone_device(const struct zone *zone)
{
return false;
}
@@ -1597,19 +1597,19 @@ static inline bool zone_is_zone_device(struct zone *zone)
* populated_zone(). If the whole zone is reserved then we can easily
* end up with populated_zone() && !managed_zone().
*/
-static inline bool managed_zone(struct zone *zone)
+static inline bool managed_zone(const struct zone *zone)
{
return zone_managed_pages(zone);
}
/* Returns true if a zone has memory */
-static inline bool populated_zone(struct zone *zone)
+static inline bool populated_zone(const struct zone *zone)
{
return zone->present_pages;
}
#ifdef CONFIG_NUMA
-static inline int zone_to_nid(struct zone *zone)
+static inline int zone_to_nid(const struct zone *zone)
{
return zone->node;
}
@@ -1619,7 +1619,7 @@ static inline void zone_set_nid(struct zone *zone, int nid)
zone->node = nid;
}
#else
-static inline int zone_to_nid(struct zone *zone)
+static inline int zone_to_nid(const struct zone *zone)
{
return 0;
}
@@ -1646,7 +1646,7 @@ static inline int is_highmem_idx(enum zone_type idx)
* @zone: pointer to struct zone variable
* Return: 1 for a highmem zone, 0 otherwise
*/
-static inline int is_highmem(struct zone *zone)
+static inline int is_highmem(const struct zone *zone)
{
return is_highmem_idx(zone_idx(zone));
}
@@ -1712,12 +1712,12 @@ static inline struct zone *zonelist_zone(struct zoneref *zoneref)
return zoneref->zone;
}
-static inline int zonelist_zone_idx(struct zoneref *zoneref)
+static inline int zonelist_zone_idx(const struct zoneref *zoneref)
{
return zoneref->zone_idx;
}
-static inline int zonelist_node_idx(struct zoneref *zoneref)
+static inline int zonelist_node_idx(const struct zoneref *zoneref)
{
return zone_to_nid(zoneref->zone);
}
@@ -2020,7 +2020,7 @@ static inline struct page *__section_mem_map_addr(struct mem_section *section)
return (struct page *)map;
}
-static inline int present_section(struct mem_section *section)
+static inline int present_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
}
@@ -2030,12 +2030,12 @@ static inline int present_section_nr(unsigned long nr)
return present_section(__nr_to_section(nr));
}
-static inline int valid_section(struct mem_section *section)
+static inline int valid_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
}
-static inline int early_section(struct mem_section *section)
+static inline int early_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_IS_EARLY));
}
@@ -2045,27 +2045,27 @@ static inline int valid_section_nr(unsigned long nr)
return valid_section(__nr_to_section(nr));
}
-static inline int online_section(struct mem_section *section)
+static inline int online_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_IS_ONLINE));
}
#ifdef CONFIG_ZONE_DEVICE
-static inline int online_device_section(struct mem_section *section)
+static inline int online_device_section(const struct mem_section *section)
{
unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
return section && ((section->section_mem_map & flags) == flags);
}
#else
-static inline int online_device_section(struct mem_section *section)
+static inline int online_device_section(const struct mem_section *section)
{
return 0;
}
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
-static inline int preinited_vmemmap_section(struct mem_section *section)
+static inline int preinited_vmemmap_section(const struct mem_section *section)
{
return (section &&
(section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT));
@@ -2075,7 +2075,7 @@ void sparse_vmemmap_init_nid_early(int nid);
void sparse_vmemmap_init_nid_late(int nid);
#else
-static inline int preinited_vmemmap_section(struct mem_section *section)
+static inline int preinited_vmemmap_section(const struct mem_section *section)
{
return 0;
}
--
2.47.2
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH 03/12] include/mmzone.h: add `const` to lots of pointer parameters
2025-08-29 18:31 ` [PATCH 03/12] include/mmzone.h: " Max Kellermann
@ 2025-08-29 22:56 ` Vishal Moola (Oracle)
0 siblings, 0 replies; 30+ messages in thread
From: Vishal Moola (Oracle) @ 2025-08-29 22:56 UTC (permalink / raw)
To: Max Kellermann
Cc: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
On Fri, Aug 29, 2025 at 08:31:50PM +0200, Max Kellermann wrote:
> For improved const-correctness.
>
> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH 04/12] include/fs.h: add `const` to several pointer parameters
2025-08-29 18:31 [PATCH 00/12] mm: add `const` to lots of pointer parameters Max Kellermann
` (2 preceding siblings ...)
2025-08-29 18:31 ` [PATCH 03/12] include/mmzone.h: " Max Kellermann
@ 2025-08-29 18:31 ` Max Kellermann
2025-08-29 23:11 ` Vishal Moola (Oracle)
2025-08-29 18:31 ` [PATCH 05/12] mm/oom_kill: add `const` to pointer parameter Max Kellermann
` (7 subsequent siblings)
11 siblings, 1 reply; 30+ messages in thread
From: Max Kellermann @ 2025-08-29 18:31 UTC (permalink / raw)
To: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
Cc: Max Kellermann
For improved const-correctness.
Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
---
include/linux/fs.h | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3b9f54446db0..0b43edb33be2 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -537,7 +537,7 @@ struct address_space {
/*
* Returns true if any of the pages in the mapping are marked with the tag.
*/
-static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag)
+static inline bool mapping_tagged(const struct address_space *mapping, xa_mark_t tag)
{
return xa_marked(&mapping->i_pages, tag);
}
@@ -585,7 +585,7 @@ static inline void i_mmap_assert_write_locked(struct address_space *mapping)
/*
* Might pages of this file be mapped into userspace?
*/
-static inline int mapping_mapped(struct address_space *mapping)
+static inline int mapping_mapped(const struct address_space *mapping)
{
return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root);
}
@@ -599,7 +599,7 @@ static inline int mapping_mapped(struct address_space *mapping)
* If i_mmap_writable is negative, no new writable mappings are allowed. You
* can only deny writable mappings, if none exists right now.
*/
-static inline int mapping_writably_mapped(struct address_space *mapping)
+static inline int mapping_writably_mapped(const struct address_space *mapping)
{
return atomic_read(&mapping->i_mmap_writable) > 0;
}
--
2.47.2
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH 04/12] include/fs.h: add `const` to several pointer parameters
2025-08-29 18:31 ` [PATCH 04/12] include/fs.h: add `const` to several " Max Kellermann
@ 2025-08-29 23:11 ` Vishal Moola (Oracle)
0 siblings, 0 replies; 30+ messages in thread
From: Vishal Moola (Oracle) @ 2025-08-29 23:11 UTC (permalink / raw)
To: Max Kellermann
Cc: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
On Fri, Aug 29, 2025 at 08:31:51PM +0200, Max Kellermann wrote:
> For improved const-correctness.
>
> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH 05/12] mm/oom_kill: add `const` to pointer parameter
2025-08-29 18:31 [PATCH 00/12] mm: add `const` to lots of pointer parameters Max Kellermann
` (3 preceding siblings ...)
2025-08-29 18:31 ` [PATCH 04/12] include/fs.h: add `const` to several " Max Kellermann
@ 2025-08-29 18:31 ` Max Kellermann
2025-08-29 23:03 ` Vishal Moola (Oracle)
2025-08-29 18:31 ` [PATCH 06/12] mm/util: add `const` to several pointer parameters Max Kellermann
` (6 subsequent siblings)
11 siblings, 1 reply; 30+ messages in thread
From: Max Kellermann @ 2025-08-29 18:31 UTC (permalink / raw)
To: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
Cc: Max Kellermann
For improved const-correctness.
Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
---
include/linux/mm.h | 2 +-
mm/oom_kill.c | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a40a3c42c904..a795deef93eb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3840,7 +3840,7 @@ static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
}
#endif /* __HAVE_ARCH_GATE_AREA */
-extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
+extern bool process_shares_mm(struct task_struct *p, const struct mm_struct *mm);
void drop_slab(void);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 17650f0b516e..69c4fc9d90e6 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -490,7 +490,7 @@ static bool oom_killer_disabled __read_mostly;
* task's threads: if one of those is using this mm then this task was also
* using it.
*/
-bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
+bool process_shares_mm(struct task_struct *p, const struct mm_struct *mm)
{
struct task_struct *t;
--
2.47.2
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH 05/12] mm/oom_kill: add `const` to pointer parameter
2025-08-29 18:31 ` [PATCH 05/12] mm/oom_kill: add `const` to pointer parameter Max Kellermann
@ 2025-08-29 23:03 ` Vishal Moola (Oracle)
2025-08-30 6:26 ` Max Kellermann
0 siblings, 1 reply; 30+ messages in thread
From: Vishal Moola (Oracle) @ 2025-08-29 23:03 UTC (permalink / raw)
To: Max Kellermann
Cc: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
On Fri, Aug 29, 2025 at 08:31:52PM +0200, Max Kellermann wrote:
> For improved const-correctness.
>
> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
> ---
> include/linux/mm.h | 2 +-
> mm/oom_kill.c | 2 +-
> 2 files changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index a40a3c42c904..a795deef93eb 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -3840,7 +3840,7 @@ static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
> }
> #endif /* __HAVE_ARCH_GATE_AREA */
>
> -extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
> +extern bool process_shares_mm(struct task_struct *p, const struct mm_struct *mm);
Nowadays we're dropping the extern keyword.
Also, Is there any reason you didn't also make the task_struct pointer const?
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH 05/12] mm/oom_kill: add `const` to pointer parameter
2025-08-29 23:03 ` Vishal Moola (Oracle)
@ 2025-08-30 6:26 ` Max Kellermann
2025-08-31 1:49 ` Andrew Morton
0 siblings, 1 reply; 30+ messages in thread
From: Max Kellermann @ 2025-08-30 6:26 UTC (permalink / raw)
To: Vishal Moola (Oracle)
Cc: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
On Sat, Aug 30, 2025 at 1:03 AM Vishal Moola (Oracle)
<vishal.moola@gmail.com> wrote:
> > -extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
> > +extern bool process_shares_mm(struct task_struct *p, const struct mm_struct *mm);
>
> Nowadays we're dropping the extern keyword.
I can do that - is it acceptable to do that in the same patch?
> Also, Is there any reason you didn't also make the task_struct pointer const?
I wasn't sure whether for_each_thread() is const-safe. I think I
looked at the wrong definition; for_each_thread() looks safe, only
for_other_threads() is not because it uses next_thread() which takes a
non-const pointer. I'll amend the patch.
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH 05/12] mm/oom_kill: add `const` to pointer parameter
2025-08-30 6:26 ` Max Kellermann
@ 2025-08-31 1:49 ` Andrew Morton
0 siblings, 0 replies; 30+ messages in thread
From: Andrew Morton @ 2025-08-31 1:49 UTC (permalink / raw)
To: Max Kellermann
Cc: Vishal Moola (Oracle), david, axelrasmussen, yuanchu, willy,
hughd, mhocko, linux-kernel, linux-mm, lorenzo.stoakes,
Liam.Howlett, vbabka, rppt, surenb
On Sat, 30 Aug 2025 08:26:07 +0200 Max Kellermann <max.kellermann@ionos.com> wrote:
> On Sat, Aug 30, 2025 at 1:03 AM Vishal Moola (Oracle)
> <vishal.moola@gmail.com> wrote:
> > > -extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
> > > +extern bool process_shares_mm(struct task_struct *p, const struct mm_struct *mm);
> >
> > Nowadays we're dropping the extern keyword.
>
> I can do that - is it acceptable to do that in the same patch?
The culture is "ooh ooh ooh, you can't do two things in the same
patch". My culture is well, gee, it's simple and obvious and makes the
kernel better, so I think we can handle it.
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH 06/12] mm/util: add `const` to several pointer parameters
2025-08-29 18:31 [PATCH 00/12] mm: add `const` to lots of pointer parameters Max Kellermann
` (4 preceding siblings ...)
2025-08-29 18:31 ` [PATCH 05/12] mm/oom_kill: add `const` to pointer parameter Max Kellermann
@ 2025-08-29 18:31 ` Max Kellermann
2025-08-29 23:10 ` Vishal Moola (Oracle)
2025-08-29 18:31 ` [PATCH 07/12] parisc/sys_parisc.c: add `const` to mmap_upper_limit() parameter Max Kellermann
` (5 subsequent siblings)
11 siblings, 1 reply; 30+ messages in thread
From: Max Kellermann @ 2025-08-29 18:31 UTC (permalink / raw)
To: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
Cc: Max Kellermann
For improved const-correctness.
Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
---
include/linux/mm.h | 6 +++---
include/linux/pagemap.h | 2 +-
mm/util.c | 10 +++++-----
3 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a795deef93eb..48fe838723ed 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -986,7 +986,7 @@ static inline bool vma_is_shmem(const struct vm_area_struct *vma) { return false
static inline bool vma_is_anon_shmem(const struct vm_area_struct *vma) { return false; }
#endif
-int vma_is_stack_for_current(struct vm_area_struct *vma);
+int vma_is_stack_for_current(const struct vm_area_struct *vma);
/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
@@ -2585,7 +2585,7 @@ void folio_add_pin(struct folio *folio);
int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
- struct task_struct *task, bool bypass_rlim);
+ const struct task_struct *task, bool bypass_rlim);
struct kvec;
struct page *get_dump_page(unsigned long addr, int *locked);
@@ -3348,7 +3348,7 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
/* mmap.c */
-extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
+extern int __vm_enough_memory(const struct mm_struct *mm, long pages, int cap_sys_admin);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void exit_mmap(struct mm_struct *);
bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *vma,
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 87411e7f2dba..eb50e0124cd0 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -549,7 +549,7 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping)
#endif
}
-struct address_space *folio_mapping(struct folio *);
+struct address_space *folio_mapping(const struct folio *);
/**
* folio_flush_mapping - Find the file mapping this folio belongs to.
diff --git a/mm/util.c b/mm/util.c
index d235b74f7aff..241d2eaf26ca 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -315,7 +315,7 @@ void *memdup_user_nul(const void __user *src, size_t len)
EXPORT_SYMBOL(memdup_user_nul);
/* Check if the vma is being used as a stack by this task */
-int vma_is_stack_for_current(struct vm_area_struct *vma)
+int vma_is_stack_for_current(const struct vm_area_struct *vma)
{
struct task_struct * __maybe_unused t = current;
@@ -410,7 +410,7 @@ unsigned long arch_mmap_rnd(void)
return rnd << PAGE_SHIFT;
}
-static int mmap_is_legacy(struct rlimit *rlim_stack)
+static int mmap_is_legacy(const struct rlimit *rlim_stack)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
@@ -504,7 +504,7 @@ EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout);
* * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
*/
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
- struct task_struct *task, bool bypass_rlim)
+ const struct task_struct *task, bool bypass_rlim)
{
unsigned long locked_vm, limit;
int ret = 0;
@@ -688,7 +688,7 @@ struct anon_vma *folio_anon_vma(const struct folio *folio)
* You can call this for folios which aren't in the swap cache or page
* cache and it will return NULL.
*/
-struct address_space *folio_mapping(struct folio *folio)
+struct address_space *folio_mapping(const struct folio *folio)
{
struct address_space *mapping;
@@ -926,7 +926,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed);
* Note this is a helper function intended to be used by LSMs which
* wish to use this logic.
*/
-int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
+int __vm_enough_memory(const struct mm_struct *mm, long pages, int cap_sys_admin)
{
long allowed;
unsigned long bytes_failed;
--
2.47.2
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH 06/12] mm/util: add `const` to several pointer parameters
2025-08-29 18:31 ` [PATCH 06/12] mm/util: add `const` to several pointer parameters Max Kellermann
@ 2025-08-29 23:10 ` Vishal Moola (Oracle)
0 siblings, 0 replies; 30+ messages in thread
From: Vishal Moola (Oracle) @ 2025-08-29 23:10 UTC (permalink / raw)
To: Max Kellermann
Cc: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
On Fri, Aug 29, 2025 at 08:31:53PM +0200, Max Kellermann wrote:
> For improved const-correctness.
>
> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -549,7 +549,7 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping)
> #endif
> }
>
> -struct address_space *folio_mapping(struct folio *);
> +struct address_space *folio_mapping(const struct folio *);
Can we make this (const struct folio *folio) like below. Its more
readable imo.
With or without that change, feel free to add:
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
> +struct address_space *folio_mapping(const struct folio *folio)
> {
> struct address_space *mapping;
>
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH 07/12] parisc/sys_parisc.c: add `const` to mmap_upper_limit() parameter
2025-08-29 18:31 [PATCH 00/12] mm: add `const` to lots of pointer parameters Max Kellermann
` (5 preceding siblings ...)
2025-08-29 18:31 ` [PATCH 06/12] mm/util: add `const` to several pointer parameters Max Kellermann
@ 2025-08-29 18:31 ` Max Kellermann
2025-08-29 23:13 ` Vishal Moola (Oracle)
2025-08-29 18:31 ` [PATCH 08/12] arch, mm/util: add const to arch_pick_mmap_layout() parameter Max Kellermann
` (4 subsequent siblings)
11 siblings, 1 reply; 30+ messages in thread
From: Max Kellermann @ 2025-08-29 18:31 UTC (permalink / raw)
To: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
Cc: Max Kellermann
For improved const-correctness. This piece is necessary to make the
`rlim_stack` parameter to mmap_base() const.
Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
---
arch/parisc/include/asm/processor.h | 2 +-
arch/parisc/kernel/sys_parisc.c | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 4c14bde39aac..dd0b5e199559 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -48,7 +48,7 @@
#ifndef __ASSEMBLER__
struct rlimit;
-unsigned long mmap_upper_limit(struct rlimit *rlim_stack);
+unsigned long mmap_upper_limit(const struct rlimit *rlim_stack);
unsigned long calc_max_stack_size(unsigned long stack_max);
/*
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index f852fe274abe..b2cdbb8a12b1 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -77,7 +77,7 @@ unsigned long calc_max_stack_size(unsigned long stack_max)
* indicating that "current" should be used instead of a passed-in
* value from the exec bprm as done with arch_pick_mmap_layout().
*/
-unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
+unsigned long mmap_upper_limit(const struct rlimit *rlim_stack)
{
unsigned long stack_base;
--
2.47.2
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH 07/12] parisc/sys_parisc.c: add `const` to mmap_upper_limit() parameter
2025-08-29 18:31 ` [PATCH 07/12] parisc/sys_parisc.c: add `const` to mmap_upper_limit() parameter Max Kellermann
@ 2025-08-29 23:13 ` Vishal Moola (Oracle)
0 siblings, 0 replies; 30+ messages in thread
From: Vishal Moola (Oracle) @ 2025-08-29 23:13 UTC (permalink / raw)
To: Max Kellermann
Cc: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
On Fri, Aug 29, 2025 at 08:31:54PM +0200, Max Kellermann wrote:
> For improved const-correctness. This piece is necessary to make the
> `rlim_stack` parameter to mmap_base() const.
>
> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH 08/12] arch, mm/util: add const to arch_pick_mmap_layout() parameter
2025-08-29 18:31 [PATCH 00/12] mm: add `const` to lots of pointer parameters Max Kellermann
` (6 preceding siblings ...)
2025-08-29 18:31 ` [PATCH 07/12] parisc/sys_parisc.c: add `const` to mmap_upper_limit() parameter Max Kellermann
@ 2025-08-29 18:31 ` Max Kellermann
2025-08-29 23:36 ` Vishal Moola (Oracle)
` (2 more replies)
2025-08-29 18:31 ` [PATCH 09/12] include/mm_types.h: add `const` to several pointer parameters Max Kellermann
` (3 subsequent siblings)
11 siblings, 3 replies; 30+ messages in thread
From: Max Kellermann @ 2025-08-29 18:31 UTC (permalink / raw)
To: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
Cc: Max Kellermann
For improved const-correctness.
Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
---
arch/s390/mm/mmap.c | 4 ++--
arch/sparc/kernel/sys_sparc_64.c | 2 +-
arch/x86/mm/mmap.c | 6 +++---
include/linux/sched/mm.h | 4 ++--
mm/util.c | 6 +++---
5 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 547104ccc22a..767c5c26f9a3 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -27,7 +27,7 @@ static unsigned long stack_maxrandom_size(void)
return STACK_RND_MASK << PAGE_SHIFT;
}
-static inline int mmap_is_legacy(struct rlimit *rlim_stack)
+static inline int mmap_is_legacy(const struct rlimit *rlim_stack)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
@@ -169,7 +169,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long ad
* This function, called very early during the creation of a new
* process VM image, sets up which VM layout function to use:
*/
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
+void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
{
unsigned long random_factor = 0UL;
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 785e9909340f..55faf2effa46 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -294,7 +294,7 @@ static unsigned long mmap_rnd(void)
return rnd << PAGE_SHIFT;
}
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
+void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
{
unsigned long random_factor = mmap_rnd();
unsigned long gap;
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 708f85dc9380..82f3a987f7cf 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -80,7 +80,7 @@ unsigned long arch_mmap_rnd(void)
}
static unsigned long mmap_base(unsigned long rnd, unsigned long task_size,
- struct rlimit *rlim_stack)
+ const struct rlimit *rlim_stack)
{
unsigned long gap = rlim_stack->rlim_cur;
unsigned long pad = stack_maxrandom_size(task_size) + stack_guard_gap;
@@ -110,7 +110,7 @@ static unsigned long mmap_legacy_base(unsigned long rnd,
*/
static void arch_pick_mmap_base(unsigned long *base, unsigned long *legacy_base,
unsigned long random_factor, unsigned long task_size,
- struct rlimit *rlim_stack)
+ const struct rlimit *rlim_stack)
{
*legacy_base = mmap_legacy_base(random_factor, task_size);
if (mmap_is_legacy())
@@ -119,7 +119,7 @@ static void arch_pick_mmap_base(unsigned long *base, unsigned long *legacy_base,
*base = mmap_base(random_factor, task_size, rlim_stack);
}
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
+void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
{
if (mmap_is_legacy())
mm_flags_clear(MMF_TOPDOWN, mm);
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 2201da0afecc..0232d983b715 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -178,7 +178,7 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
#endif
extern void arch_pick_mmap_layout(struct mm_struct *mm,
- struct rlimit *rlim_stack);
+ const struct rlimit *rlim_stack);
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
@@ -211,7 +211,7 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long flags, vm_flags_t vm_flags);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm,
- struct rlimit *rlim_stack) {}
+ const struct rlimit *rlim_stack) {}
#endif
static inline bool in_vfork(struct task_struct *tsk)
diff --git a/mm/util.c b/mm/util.c
index 241d2eaf26ca..38f8b9fa297c 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -431,7 +431,7 @@ static int mmap_is_legacy(const struct rlimit *rlim_stack)
#define MIN_GAP (SZ_128M)
#define MAX_GAP (STACK_TOP / 6 * 5)
-static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
+static unsigned long mmap_base(unsigned long rnd, const struct rlimit *rlim_stack)
{
#ifdef CONFIG_STACK_GROWSUP
/*
@@ -462,7 +462,7 @@ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
#endif
}
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
+void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
{
unsigned long random_factor = 0UL;
@@ -478,7 +478,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
}
}
#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
+void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
{
mm->mmap_base = TASK_UNMAPPED_BASE;
mm_flags_clear(MMF_TOPDOWN, mm);
--
2.47.2
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH 08/12] arch, mm/util: add const to arch_pick_mmap_layout() parameter
2025-08-29 18:31 ` [PATCH 08/12] arch, mm/util: add const to arch_pick_mmap_layout() parameter Max Kellermann
@ 2025-08-29 23:36 ` Vishal Moola (Oracle)
2025-08-31 9:07 ` Max Kellermann
2025-08-30 16:03 ` kernel test robot
2025-08-30 17:26 ` kernel test robot
2 siblings, 1 reply; 30+ messages in thread
From: Vishal Moola (Oracle) @ 2025-08-29 23:36 UTC (permalink / raw)
To: Max Kellermann
Cc: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
On Fri, Aug 29, 2025 at 08:31:55PM +0200, Max Kellermann wrote:
> For improved const-correctness.
>
> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
> ---
> arch/s390/mm/mmap.c | 4 ++--
> arch/sparc/kernel/sys_sparc_64.c | 2 +-
> arch/x86/mm/mmap.c | 6 +++---
> include/linux/sched/mm.h | 4 ++--
> mm/util.c | 6 +++---
> 5 files changed, 11 insertions(+), 11 deletions(-)
>
> diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
> index 547104ccc22a..767c5c26f9a3 100644
> --- a/arch/s390/mm/mmap.c
> +++ b/arch/s390/mm/mmap.c
> @@ -27,7 +27,7 @@ static unsigned long stack_maxrandom_size(void)
> return STACK_RND_MASK << PAGE_SHIFT;
> }
>
> -static inline int mmap_is_legacy(struct rlimit *rlim_stack)
> +static inline int mmap_is_legacy(const struct rlimit *rlim_stack)
Thanks for splitting the patch into all these smaller snippets, it makes
review a lot easier. But this function should be part of the 6th patch
since we'd want function signatures to change together :).
Once moved, feel free to add to both patches:
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH 08/12] arch, mm/util: add const to arch_pick_mmap_layout() parameter
2025-08-29 23:36 ` Vishal Moola (Oracle)
@ 2025-08-31 9:07 ` Max Kellermann
0 siblings, 0 replies; 30+ messages in thread
From: Max Kellermann @ 2025-08-31 9:07 UTC (permalink / raw)
To: Vishal Moola (Oracle)
Cc: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
On Sat, Aug 30, 2025 at 1:36 AM Vishal Moola (Oracle)
<vishal.moola@gmail.com> wrote:
> > -static inline int mmap_is_legacy(struct rlimit *rlim_stack)
> > +static inline int mmap_is_legacy(const struct rlimit *rlim_stack)
>
> Thanks for splitting the patch into all these smaller snippets, it makes
> review a lot easier. But this function should be part of the 6th patch
> since we'd want function signatures to change together :).
Will do.
There are 3 copies of this function which are slightly different:
- arch/s390/mm/mmap.c (checks rlim_stack)
- arch/x86/mm/mmap.c (does not check rlim_stack)
- mm/util.c (checks rlim_stack if CONFIG_STACK_GROWSUP)
I wonder if it would be best to merge all 3 into one? The one in
mm/util.c seems to be generic enough. Export it and have its prototype
in linux/mm.h?
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH 08/12] arch, mm/util: add const to arch_pick_mmap_layout() parameter
2025-08-29 18:31 ` [PATCH 08/12] arch, mm/util: add const to arch_pick_mmap_layout() parameter Max Kellermann
2025-08-29 23:36 ` Vishal Moola (Oracle)
@ 2025-08-30 16:03 ` kernel test robot
2025-08-30 17:26 ` kernel test robot
2 siblings, 0 replies; 30+ messages in thread
From: kernel test robot @ 2025-08-30 16:03 UTC (permalink / raw)
To: Max Kellermann, akpm, david, axelrasmussen, yuanchu, willy, hughd,
mhocko, linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett,
vbabka, rppt, surenb
Cc: llvm, oe-kbuild-all, Max Kellermann
Hi Max,
kernel test robot noticed the following build errors:
[auto build test ERROR on akpm-mm/mm-everything]
url: https://github.com/intel-lab-lkp/linux/commits/Max-Kellermann/mm-shmem-add-const-to-lots-of-pointer-parameters/20250830-023442
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20250829183159.2223948-9-max.kellermann%40ionos.com
patch subject: [PATCH 08/12] arch, mm/util: add const to arch_pick_mmap_layout() parameter
config: s390-allnoconfig (https://download.01.org/0day-ci/archive/20250830/202508302325.b9umktOw-lkp@intel.com/config)
compiler: clang version 22.0.0git (https://github.com/llvm/llvm-project ac23f7465eedd0dd565ffb201f573e7a69695fa3)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250830/202508302325.b9umktOw-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202508302325.b9umktOw-lkp@intel.com/
All errors (new ones prefixed by >>):
>> arch/s390/mm/mmap.c:187:44: error: passing 'const struct rlimit *' to parameter of type 'struct rlimit *' discards qualifiers [-Werror,-Wincompatible-pointer-types-discards-qualifiers]
187 | mm->mmap_base = mmap_base(random_factor, rlim_stack);
| ^~~~~~~~~~
arch/s390/mm/mmap.c:50:26: note: passing argument to parameter 'rlim_stack' here
50 | struct rlimit *rlim_stack)
| ^
1 error generated.
vim +187 arch/s390/mm/mmap.c
9b11c7912d00d0 Martin Schwidefsky 2017-04-24 167
6252d702c5311c Martin Schwidefsky 2008-02-09 168 /*
6252d702c5311c Martin Schwidefsky 2008-02-09 169 * This function, called very early during the creation of a new
6252d702c5311c Martin Schwidefsky 2008-02-09 170 * process VM image, sets up which VM layout function to use:
6252d702c5311c Martin Schwidefsky 2008-02-09 171 */
f547a726bf8dd8 Max Kellermann 2025-08-29 172 void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
6252d702c5311c Martin Schwidefsky 2008-02-09 173 {
8e89a356feb6f1 Kees Cook 2015-04-14 174 unsigned long random_factor = 0UL;
8e89a356feb6f1 Kees Cook 2015-04-14 175
8e89a356feb6f1 Kees Cook 2015-04-14 176 if (current->flags & PF_RANDOMIZE)
2b68f6caeac271 Kees Cook 2015-04-14 177 random_factor = arch_mmap_rnd();
8e89a356feb6f1 Kees Cook 2015-04-14 178
6252d702c5311c Martin Schwidefsky 2008-02-09 179 /*
6252d702c5311c Martin Schwidefsky 2008-02-09 180 * Fall back to the standard layout if the personality
6252d702c5311c Martin Schwidefsky 2008-02-09 181 * bit is set, or if the expected stack growth is unlimited:
6252d702c5311c Martin Schwidefsky 2008-02-09 182 */
8f2af155b51358 Kees Cook 2018-04-10 183 if (mmap_is_legacy(rlim_stack)) {
8e89a356feb6f1 Kees Cook 2015-04-14 184 mm->mmap_base = mmap_base_legacy(random_factor);
40ae96eabea408 Lorenzo Stoakes 2025-08-12 185 mm_flags_clear(MMF_TOPDOWN, mm);
6252d702c5311c Martin Schwidefsky 2008-02-09 186 } else {
8f2af155b51358 Kees Cook 2018-04-10 @187 mm->mmap_base = mmap_base(random_factor, rlim_stack);
c52d638d6b5da2 Lorenzo Stoakes 2025-08-13 188 mm_flags_set(MMF_TOPDOWN, mm);
6252d702c5311c Martin Schwidefsky 2008-02-09 189 }
6252d702c5311c Martin Schwidefsky 2008-02-09 190 }
fd5d210fa66bee Anshuman Khandual 2022-07-11 191
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH 08/12] arch, mm/util: add const to arch_pick_mmap_layout() parameter
2025-08-29 18:31 ` [PATCH 08/12] arch, mm/util: add const to arch_pick_mmap_layout() parameter Max Kellermann
2025-08-29 23:36 ` Vishal Moola (Oracle)
2025-08-30 16:03 ` kernel test robot
@ 2025-08-30 17:26 ` kernel test robot
2 siblings, 0 replies; 30+ messages in thread
From: kernel test robot @ 2025-08-30 17:26 UTC (permalink / raw)
To: Max Kellermann, akpm, david, axelrasmussen, yuanchu, willy, hughd,
mhocko, linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett,
vbabka, rppt, surenb
Cc: oe-kbuild-all, Max Kellermann
Hi Max,
kernel test robot noticed the following build warnings:
[auto build test WARNING on akpm-mm/mm-everything]
url: https://github.com/intel-lab-lkp/linux/commits/Max-Kellermann/mm-shmem-add-const-to-lots-of-pointer-parameters/20250830-023442
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20250829183159.2223948-9-max.kellermann%40ionos.com
patch subject: [PATCH 08/12] arch, mm/util: add const to arch_pick_mmap_layout() parameter
config: s390-randconfig-002-20250830 (https://download.01.org/0day-ci/archive/20250831/202508310153.i186f6o5-lkp@intel.com/config)
compiler: s390-linux-gcc (GCC) 8.5.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250831/202508310153.i186f6o5-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202508310153.i186f6o5-lkp@intel.com/
All warnings (new ones prefixed by >>):
arch/s390/mm/mmap.c: In function 'arch_pick_mmap_layout':
>> arch/s390/mm/mmap.c:187:44: warning: passing argument 2 of 'mmap_base' discards 'const' qualifier from pointer target type [-Wdiscarded-qualifiers]
mm->mmap_base = mmap_base(random_factor, rlim_stack);
^~~~~~~~~~
arch/s390/mm/mmap.c:50:26: note: expected 'struct rlimit *' but argument is of type 'const struct rlimit *'
struct rlimit *rlim_stack)
~~~~~~~~~~~~~~~^~~~~~~~~~
vim +187 arch/s390/mm/mmap.c
9b11c7912d00d0 Martin Schwidefsky 2017-04-24 167
6252d702c5311c Martin Schwidefsky 2008-02-09 168 /*
6252d702c5311c Martin Schwidefsky 2008-02-09 169 * This function, called very early during the creation of a new
6252d702c5311c Martin Schwidefsky 2008-02-09 170 * process VM image, sets up which VM layout function to use:
6252d702c5311c Martin Schwidefsky 2008-02-09 171 */
f547a726bf8dd8 Max Kellermann 2025-08-29 172 void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
6252d702c5311c Martin Schwidefsky 2008-02-09 173 {
8e89a356feb6f1 Kees Cook 2015-04-14 174 unsigned long random_factor = 0UL;
8e89a356feb6f1 Kees Cook 2015-04-14 175
8e89a356feb6f1 Kees Cook 2015-04-14 176 if (current->flags & PF_RANDOMIZE)
2b68f6caeac271 Kees Cook 2015-04-14 177 random_factor = arch_mmap_rnd();
8e89a356feb6f1 Kees Cook 2015-04-14 178
6252d702c5311c Martin Schwidefsky 2008-02-09 179 /*
6252d702c5311c Martin Schwidefsky 2008-02-09 180 * Fall back to the standard layout if the personality
6252d702c5311c Martin Schwidefsky 2008-02-09 181 * bit is set, or if the expected stack growth is unlimited:
6252d702c5311c Martin Schwidefsky 2008-02-09 182 */
8f2af155b51358 Kees Cook 2018-04-10 183 if (mmap_is_legacy(rlim_stack)) {
8e89a356feb6f1 Kees Cook 2015-04-14 184 mm->mmap_base = mmap_base_legacy(random_factor);
40ae96eabea408 Lorenzo Stoakes 2025-08-12 185 mm_flags_clear(MMF_TOPDOWN, mm);
6252d702c5311c Martin Schwidefsky 2008-02-09 186 } else {
8f2af155b51358 Kees Cook 2018-04-10 @187 mm->mmap_base = mmap_base(random_factor, rlim_stack);
c52d638d6b5da2 Lorenzo Stoakes 2025-08-13 188 mm_flags_set(MMF_TOPDOWN, mm);
6252d702c5311c Martin Schwidefsky 2008-02-09 189 }
6252d702c5311c Martin Schwidefsky 2008-02-09 190 }
fd5d210fa66bee Anshuman Khandual 2022-07-11 191
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH 09/12] include/mm_types.h: add `const` to several pointer parameters
2025-08-29 18:31 [PATCH 00/12] mm: add `const` to lots of pointer parameters Max Kellermann
` (7 preceding siblings ...)
2025-08-29 18:31 ` [PATCH 08/12] arch, mm/util: add const to arch_pick_mmap_layout() parameter Max Kellermann
@ 2025-08-29 18:31 ` Max Kellermann
2025-08-29 23:37 ` Vishal Moola (Oracle)
2025-08-29 18:31 ` [PATCH 10/12] include/mm_inline.h: add `const` to lots of " Max Kellermann
` (2 subsequent siblings)
11 siblings, 1 reply; 30+ messages in thread
From: Max Kellermann @ 2025-08-29 18:31 UTC (permalink / raw)
To: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
Cc: Max Kellermann
For improved const-correctness.
Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
---
include/linux/mm_types.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d934a3a5b443..275e8060d918 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -632,7 +632,7 @@ static inline void ptdesc_pmd_pts_dec(struct ptdesc *ptdesc)
atomic_dec(&ptdesc->pt_share_count);
}
-static inline int ptdesc_pmd_pts_count(struct ptdesc *ptdesc)
+static inline int ptdesc_pmd_pts_count(const struct ptdesc *ptdesc)
{
return atomic_read(&ptdesc->pt_share_count);
}
@@ -660,7 +660,7 @@ static inline void set_page_private(struct page *page, unsigned long private)
page->private = private;
}
-static inline void *folio_get_private(struct folio *folio)
+static inline void *folio_get_private(const struct folio *folio)
{
return folio->private;
}
--
2.47.2
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH 09/12] include/mm_types.h: add `const` to several pointer parameters
2025-08-29 18:31 ` [PATCH 09/12] include/mm_types.h: add `const` to several pointer parameters Max Kellermann
@ 2025-08-29 23:37 ` Vishal Moola (Oracle)
0 siblings, 0 replies; 30+ messages in thread
From: Vishal Moola (Oracle) @ 2025-08-29 23:37 UTC (permalink / raw)
To: Max Kellermann
Cc: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
On Fri, Aug 29, 2025 at 08:31:56PM +0200, Max Kellermann wrote:
> For improved const-correctness.
>
> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH 10/12] include/mm_inline.h: add `const` to lots of pointer parameters
2025-08-29 18:31 [PATCH 00/12] mm: add `const` to lots of pointer parameters Max Kellermann
` (8 preceding siblings ...)
2025-08-29 18:31 ` [PATCH 09/12] include/mm_types.h: add `const` to several pointer parameters Max Kellermann
@ 2025-08-29 18:31 ` Max Kellermann
2025-08-29 23:37 ` Vishal Moola (Oracle)
2025-08-29 18:31 ` [PATCH 11/12] include/mm.h: " Max Kellermann
2025-08-29 18:31 ` [PATCH 12/12] mm/highmem: " Max Kellermann
11 siblings, 1 reply; 30+ messages in thread
From: Max Kellermann @ 2025-08-29 18:31 UTC (permalink / raw)
To: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
Cc: Max Kellermann
For improved const-correctness.
Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
---
include/linux/mm_inline.h | 25 +++++++++++++------------
1 file changed, 13 insertions(+), 12 deletions(-)
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 150302b4a905..d6c1011b38f2 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -25,7 +25,7 @@
* 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
* ram or swap backed folio.
*/
-static inline int folio_is_file_lru(struct folio *folio)
+static inline int folio_is_file_lru(const struct folio *folio)
{
return !folio_test_swapbacked(folio);
}
@@ -84,7 +84,7 @@ static __always_inline void __folio_clear_lru_flags(struct folio *folio)
* Return: The LRU list a folio should be on, as an index
* into the array of LRU lists.
*/
-static __always_inline enum lru_list folio_lru_list(struct folio *folio)
+static __always_inline enum lru_list folio_lru_list(const struct folio *folio)
{
enum lru_list lru;
@@ -141,7 +141,7 @@ static inline int lru_tier_from_refs(int refs, bool workingset)
return workingset ? MAX_NR_TIERS - 1 : order_base_2(refs);
}
-static inline int folio_lru_refs(struct folio *folio)
+static inline int folio_lru_refs(const struct folio *folio)
{
unsigned long flags = READ_ONCE(folio->flags.f);
@@ -154,14 +154,14 @@ static inline int folio_lru_refs(struct folio *folio)
return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + 1;
}
-static inline int folio_lru_gen(struct folio *folio)
+static inline int folio_lru_gen(const struct folio *folio)
{
unsigned long flags = READ_ONCE(folio->flags.f);
return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
}
-static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
+static inline bool lru_gen_is_active(const struct lruvec *lruvec, int gen)
{
unsigned long max_seq = lruvec->lrugen.max_seq;
@@ -217,12 +217,13 @@ static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *foli
VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
}
-static inline unsigned long lru_gen_folio_seq(struct lruvec *lruvec, struct folio *folio,
+static inline unsigned long lru_gen_folio_seq(const struct lruvec *lruvec,
+ const struct folio *folio,
bool reclaiming)
{
int gen;
int type = folio_is_file_lru(folio);
- struct lru_gen_folio *lrugen = &lruvec->lrugen;
+ const struct lru_gen_folio *lrugen = &lruvec->lrugen;
/*
* +-----------------------------------+-----------------------------------+
@@ -302,7 +303,7 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
return true;
}
-static inline void folio_migrate_refs(struct folio *new, struct folio *old)
+static inline void folio_migrate_refs(struct folio *new, const struct folio *old)
{
unsigned long refs = READ_ONCE(old->flags.f) & LRU_REFS_MASK;
@@ -330,7 +331,7 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
return false;
}
-static inline void folio_migrate_refs(struct folio *new, struct folio *old)
+static inline void folio_migrate_refs(struct folio *new, const struct folio *old)
{
}
@@ -508,7 +509,7 @@ static inline void dec_tlb_flush_pending(struct mm_struct *mm)
atomic_dec(&mm->tlb_flush_pending);
}
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+static inline bool mm_tlb_flush_pending(const struct mm_struct *mm)
{
/*
* Must be called after having acquired the PTL; orders against that
@@ -521,7 +522,7 @@ static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
return atomic_read(&mm->tlb_flush_pending);
}
-static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+static inline bool mm_tlb_flush_nested(const struct mm_struct *mm)
{
/*
* Similar to mm_tlb_flush_pending(), we must have acquired the PTL
@@ -605,7 +606,7 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
return false;
}
-static inline bool vma_has_recency(struct vm_area_struct *vma)
+static inline bool vma_has_recency(const struct vm_area_struct *vma)
{
if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
return false;
--
2.47.2
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH 10/12] include/mm_inline.h: add `const` to lots of pointer parameters
2025-08-29 18:31 ` [PATCH 10/12] include/mm_inline.h: add `const` to lots of " Max Kellermann
@ 2025-08-29 23:37 ` Vishal Moola (Oracle)
0 siblings, 0 replies; 30+ messages in thread
From: Vishal Moola (Oracle) @ 2025-08-29 23:37 UTC (permalink / raw)
To: Max Kellermann
Cc: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
On Fri, Aug 29, 2025 at 08:31:57PM +0200, Max Kellermann wrote:
> For improved const-correctness.
>
> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH 11/12] include/mm.h: add `const` to lots of pointer parameters
2025-08-29 18:31 [PATCH 00/12] mm: add `const` to lots of pointer parameters Max Kellermann
` (9 preceding siblings ...)
2025-08-29 18:31 ` [PATCH 10/12] include/mm_inline.h: add `const` to lots of " Max Kellermann
@ 2025-08-29 18:31 ` Max Kellermann
2025-08-29 18:31 ` [PATCH 12/12] mm/highmem: " Max Kellermann
11 siblings, 0 replies; 30+ messages in thread
From: Max Kellermann @ 2025-08-29 18:31 UTC (permalink / raw)
To: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
Cc: Max Kellermann
For improved const-correctness.
Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
---
include/linux/mm.h | 30 +++++++++++++++---------------
1 file changed, 15 insertions(+), 15 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 48fe838723ed..953368e578f7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -716,7 +716,7 @@ static inline void release_fault_lock(struct vm_fault *vmf)
mmap_read_unlock(vmf->vma->vm_mm);
}
-static inline void assert_fault_locked(struct vm_fault *vmf)
+static inline void assert_fault_locked(const struct vm_fault *vmf)
{
mmap_assert_locked(vmf->vma->vm_mm);
}
@@ -859,7 +859,7 @@ static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
vma->vm_end >= vma->vm_mm->start_stack;
}
-static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
+static inline bool vma_is_temporary_stack(const struct vm_area_struct *vma)
{
int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
@@ -873,7 +873,7 @@ static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
return false;
}
-static inline bool vma_is_foreign(struct vm_area_struct *vma)
+static inline bool vma_is_foreign(const struct vm_area_struct *vma)
{
if (!current->mm)
return true;
@@ -884,7 +884,7 @@ static inline bool vma_is_foreign(struct vm_area_struct *vma)
return false;
}
-static inline bool vma_is_accessible(struct vm_area_struct *vma)
+static inline bool vma_is_accessible(const struct vm_area_struct *vma)
{
return vma->vm_flags & VM_ACCESS_FLAGS;
}
@@ -895,7 +895,7 @@ static inline bool is_shared_maywrite(vm_flags_t vm_flags)
(VM_SHARED | VM_MAYWRITE);
}
-static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
+static inline bool vma_is_shared_maywrite(const struct vm_area_struct *vma)
{
return is_shared_maywrite(vma->vm_flags);
}
@@ -3488,7 +3488,7 @@ struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
return mtree_load(&mm->mm_mt, addr);
}
-static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
+static inline unsigned long stack_guard_start_gap(const struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_GROWSDOWN)
return stack_guard_gap;
@@ -3500,7 +3500,7 @@ static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
return 0;
}
-static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
+static inline unsigned long vm_start_gap(const struct vm_area_struct *vma)
{
unsigned long gap = stack_guard_start_gap(vma);
unsigned long vm_start = vma->vm_start;
@@ -3511,7 +3511,7 @@ static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
return vm_start;
}
-static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
+static inline unsigned long vm_end_gap(const struct vm_area_struct *vma)
{
unsigned long vm_end = vma->vm_end;
@@ -3523,7 +3523,7 @@ static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
return vm_end;
}
-static inline unsigned long vma_pages(struct vm_area_struct *vma)
+static inline unsigned long vma_pages(const struct vm_area_struct *vma)
{
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
}
@@ -3540,7 +3540,7 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
return vma;
}
-static inline bool range_in_vma(struct vm_area_struct *vma,
+static inline bool range_in_vma(const struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
return (vma && vma->vm_start <= start && end <= vma->vm_end);
@@ -3656,7 +3656,7 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
* Indicates whether GUP can follow a PROT_NONE mapped page, or whether
* a (NUMA hinting) fault is required.
*/
-static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
+static inline bool gup_can_follow_protnone(const struct vm_area_struct *vma,
unsigned int flags)
{
/*
@@ -3786,7 +3786,7 @@ static inline bool debug_guardpage_enabled(void)
return static_branch_unlikely(&_debug_guardpage_enabled);
}
-static inline bool page_is_guard(struct page *page)
+static inline bool page_is_guard(const struct page *page)
{
if (!debug_guardpage_enabled())
return false;
@@ -3817,7 +3817,7 @@ static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
static inline bool debug_guardpage_enabled(void) { return false; }
-static inline bool page_is_guard(struct page *page) { return false; }
+static inline bool page_is_guard(const struct page *page) { return false; }
static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order) { return false; }
static inline void clear_page_guard(struct zone *zone, struct page *page,
@@ -3899,7 +3899,7 @@ void vmemmap_free(unsigned long start, unsigned long end,
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
-static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
{
/* number of pfns from base where pfn_to_page() is valid */
if (altmap)
@@ -3913,7 +3913,7 @@ static inline void vmem_altmap_free(struct vmem_altmap *altmap,
altmap->alloc -= nr_pfns;
}
#else
-static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
{
return 0;
}
--
2.47.2
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH 12/12] mm/highmem: add `const` to lots of pointer parameters
2025-08-29 18:31 [PATCH 00/12] mm: add `const` to lots of pointer parameters Max Kellermann
` (10 preceding siblings ...)
2025-08-29 18:31 ` [PATCH 11/12] include/mm.h: " Max Kellermann
@ 2025-08-29 18:31 ` Max Kellermann
11 siblings, 0 replies; 30+ messages in thread
From: Max Kellermann @ 2025-08-29 18:31 UTC (permalink / raw)
To: akpm, david, axelrasmussen, yuanchu, willy, hughd, mhocko,
linux-kernel, linux-mm, lorenzo.stoakes, Liam.Howlett, vbabka,
rppt, surenb
Cc: Max Kellermann
For improved const-correctness.
Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
---
arch/arm/include/asm/highmem.h | 6 +++---
arch/xtensa/include/asm/highmem.h | 2 +-
include/linux/highmem-internal.h | 34 +++++++++++++++----------------
include/linux/highmem.h | 8 ++++----
mm/highmem.c | 10 ++++-----
5 files changed, 30 insertions(+), 30 deletions(-)
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index b4b66220952d..bdb209e002a4 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -46,9 +46,9 @@ extern pte_t *pkmap_page_table;
#endif
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
-extern void *kmap_high_get(struct page *page);
+extern void *kmap_high_get(const struct page *page);
-static inline void *arch_kmap_local_high_get(struct page *page)
+static inline void *arch_kmap_local_high_get(const struct page *page)
{
if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !cache_is_vivt())
return NULL;
@@ -57,7 +57,7 @@ static inline void *arch_kmap_local_high_get(struct page *page)
#define arch_kmap_local_high_get arch_kmap_local_high_get
#else /* ARCH_NEEDS_KMAP_HIGH_GET */
-static inline void *kmap_high_get(struct page *page)
+static inline void *kmap_high_get(const struct page *page)
{
return NULL;
}
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
index 34b8b620e7f1..b55235f4adac 100644
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -29,7 +29,7 @@
#if DCACHE_WAY_SIZE > PAGE_SIZE
#define get_pkmap_color get_pkmap_color
-static inline int get_pkmap_color(struct page *page)
+static inline int get_pkmap_color(const struct page *page)
{
return DCACHE_ALIAS(page_to_phys(page));
}
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index 36053c3d6d64..f5fb9dc7963c 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -7,7 +7,7 @@
*/
#ifdef CONFIG_KMAP_LOCAL
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
-void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
+void *__kmap_local_page_prot(const struct page *page, pgprot_t prot);
void kunmap_local_indexed(const void *vaddr);
void kmap_local_fork(struct task_struct *tsk);
void __kmap_local_sched_out(void);
@@ -33,7 +33,7 @@ static inline void kmap_flush_tlb(unsigned long addr) { }
#endif
void *kmap_high(struct page *page);
-void kunmap_high(struct page *page);
+void kunmap_high(const struct page *page);
void __kmap_flush_unused(void);
struct page *__kmap_to_page(void *addr);
@@ -50,7 +50,7 @@ static inline void *kmap(struct page *page)
return addr;
}
-static inline void kunmap(struct page *page)
+static inline void kunmap(const struct page *page)
{
might_sleep();
if (!PageHighMem(page))
@@ -68,12 +68,12 @@ static inline void kmap_flush_unused(void)
__kmap_flush_unused();
}
-static inline void *kmap_local_page(struct page *page)
+static inline void *kmap_local_page(const struct page *page)
{
return __kmap_local_page_prot(page, kmap_prot);
}
-static inline void *kmap_local_page_try_from_panic(struct page *page)
+static inline void *kmap_local_page_try_from_panic(const struct page *page)
{
if (!PageHighMem(page))
return page_address(page);
@@ -81,13 +81,13 @@ static inline void *kmap_local_page_try_from_panic(struct page *page)
return NULL;
}
-static inline void *kmap_local_folio(struct folio *folio, size_t offset)
+static inline void *kmap_local_folio(const struct folio *folio, size_t offset)
{
- struct page *page = folio_page(folio, offset / PAGE_SIZE);
+ const struct page *page = folio_page(folio, offset / PAGE_SIZE);
return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
}
-static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot)
{
return __kmap_local_page_prot(page, prot);
}
@@ -102,7 +102,7 @@ static inline void __kunmap_local(const void *vaddr)
kunmap_local_indexed(vaddr);
}
-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_disable();
@@ -113,7 +113,7 @@ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
return __kmap_local_page_prot(page, prot);
}
-static inline void *kmap_atomic(struct page *page)
+static inline void *kmap_atomic(const struct page *page)
{
return kmap_atomic_prot(page, kmap_prot);
}
@@ -173,17 +173,17 @@ static inline void *kmap(struct page *page)
return page_address(page);
}
-static inline void kunmap_high(struct page *page) { }
+static inline void kunmap_high(const struct page *page) { }
static inline void kmap_flush_unused(void) { }
-static inline void kunmap(struct page *page)
+static inline void kunmap(const struct page *page)
{
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
kunmap_flush_on_unmap(page_address(page));
#endif
}
-static inline void *kmap_local_page(struct page *page)
+static inline void *kmap_local_page(const struct page *page)
{
return page_address(page);
}
@@ -193,12 +193,12 @@ static inline void *kmap_local_page_try_from_panic(struct page *page)
return page_address(page);
}
-static inline void *kmap_local_folio(struct folio *folio, size_t offset)
+static inline void *kmap_local_folio(const struct folio *folio, size_t offset)
{
return folio_address(folio) + offset;
}
-static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot)
{
return kmap_local_page(page);
}
@@ -215,7 +215,7 @@ static inline void __kunmap_local(const void *addr)
#endif
}
-static inline void *kmap_atomic(struct page *page)
+static inline void *kmap_atomic(const struct page *page)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_disable();
@@ -225,7 +225,7 @@ static inline void *kmap_atomic(struct page *page)
return page_address(page);
}
-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot)
{
return kmap_atomic(page);
}
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 6234f316468c..105cc4c00cc3 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -43,7 +43,7 @@ static inline void *kmap(struct page *page);
* Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
* pages in the low memory area.
*/
-static inline void kunmap(struct page *page);
+static inline void kunmap(const struct page *page);
/**
* kmap_to_page - Get the page for a kmap'ed address
@@ -93,7 +93,7 @@ static inline void kmap_flush_unused(void);
* disabling migration in order to keep the virtual address stable across
* preemption. No caller of kmap_local_page() can rely on this side effect.
*/
-static inline void *kmap_local_page(struct page *page);
+static inline void *kmap_local_page(const struct page *page);
/**
* kmap_local_folio - Map a page in this folio for temporary usage
@@ -129,7 +129,7 @@ static inline void *kmap_local_page(struct page *page);
* Context: Can be invoked from any context.
* Return: The virtual address of @offset.
*/
-static inline void *kmap_local_folio(struct folio *folio, size_t offset);
+static inline void *kmap_local_folio(const struct folio *folio, size_t offset);
/**
* kmap_atomic - Atomically map a page for temporary usage - Deprecated!
@@ -176,7 +176,7 @@ static inline void *kmap_local_folio(struct folio *folio, size_t offset);
* kunmap_atomic(vaddr2);
* kunmap_atomic(vaddr1);
*/
-static inline void *kmap_atomic(struct page *page);
+static inline void *kmap_atomic(const struct page *page);
/* Highmem related interfaces for management code */
static inline unsigned long nr_free_highpages(void);
diff --git a/mm/highmem.c b/mm/highmem.c
index ef3189b36cad..b5c8e4c2d5d4 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -61,7 +61,7 @@ static inline int kmap_local_calc_idx(int idx)
/*
* Determine color of virtual address where the page should be mapped.
*/
-static inline unsigned int get_pkmap_color(struct page *page)
+static inline unsigned int get_pkmap_color(const struct page *page)
{
return 0;
}
@@ -334,7 +334,7 @@ EXPORT_SYMBOL(kmap_high);
*
* This can be called from any context.
*/
-void *kmap_high_get(struct page *page)
+void *kmap_high_get(const struct page *page)
{
unsigned long vaddr, flags;
@@ -356,7 +356,7 @@ void *kmap_high_get(struct page *page)
* If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
* only from user context.
*/
-void kunmap_high(struct page *page)
+void kunmap_high(const struct page *page)
{
unsigned long vaddr;
unsigned long nr;
@@ -508,7 +508,7 @@ static inline void kmap_local_idx_pop(void)
#endif
#ifndef arch_kmap_local_high_get
-static inline void *arch_kmap_local_high_get(struct page *page)
+static inline void *arch_kmap_local_high_get(const struct page *page)
{
return NULL;
}
@@ -572,7 +572,7 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
}
EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot);
-void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
+void *__kmap_local_page_prot(const struct page *page, pgprot_t prot)
{
void *kmap;
--
2.47.2
^ permalink raw reply related [flat|nested] 30+ messages in thread