* [PATCH] mm: add `const` to lots of pointer parameters
@ 2025-08-27 19:22 Max Kellermann
2025-08-27 21:48 ` Andrew Morton
` (2 more replies)
0 siblings, 3 replies; 16+ messages in thread
From: Max Kellermann @ 2025-08-27 19:22 UTC (permalink / raw)
To: akpm, david, lorenzo.stoakes, Liam.Howlett, vbabka, rppt, surenb,
mhocko, linux-mm, linux-kernel
Cc: Max Kellermann
For improved const-correctness.
Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
---
include/linux/fs.h | 6 ++---
include/linux/mm.h | 42 +++++++++++++++----------------
include/linux/mm_inline.h | 24 +++++++++---------
include/linux/mm_types.h | 4 +--
include/linux/mmzone.h | 42 +++++++++++++++----------------
include/linux/pagemap.h | 52 +++++++++++++++++++--------------------
include/linux/pagevec.h | 4 +--
include/linux/shmem_fs.h | 4 +--
mm/oom_kill.c | 2 +-
mm/shmem.c | 6 ++---
mm/util.c | 2 +-
11 files changed, 94 insertions(+), 94 deletions(-)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index d7ab4f96d705..0783c5d05d3f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -537,7 +537,7 @@ struct address_space {
/*
* Returns true if any of the pages in the mapping are marked with the tag.
*/
-static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag)
+static inline bool mapping_tagged(const struct address_space *mapping, xa_mark_t tag)
{
return xa_marked(&mapping->i_pages, tag);
}
@@ -585,7 +585,7 @@ static inline void i_mmap_assert_write_locked(struct address_space *mapping)
/*
* Might pages of this file be mapped into userspace?
*/
-static inline int mapping_mapped(struct address_space *mapping)
+static inline int mapping_mapped(const struct address_space *mapping)
{
return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root);
}
@@ -599,7 +599,7 @@ static inline int mapping_mapped(struct address_space *mapping)
* If i_mmap_writable is negative, no new writable mappings are allowed. You
* can only deny writable mappings, if none exists right now.
*/
-static inline int mapping_writably_mapped(struct address_space *mapping)
+static inline int mapping_writably_mapped(const struct address_space *mapping)
{
return atomic_read(&mapping->i_mmap_writable) > 0;
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1ae97a0b8ec7..ad478a4afa6c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -697,7 +697,7 @@ static inline void release_fault_lock(struct vm_fault *vmf)
mmap_read_unlock(vmf->vma->vm_mm);
}
-static inline void assert_fault_locked(struct vm_fault *vmf)
+static inline void assert_fault_locked(const struct vm_fault *vmf)
{
mmap_assert_locked(vmf->vma->vm_mm);
}
@@ -810,7 +810,7 @@ static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
vma->vm_end >= vma->vm_mm->start_stack;
}
-static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
+static inline bool vma_is_temporary_stack(const struct vm_area_struct *vma)
{
int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
@@ -824,7 +824,7 @@ static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
return false;
}
-static inline bool vma_is_foreign(struct vm_area_struct *vma)
+static inline bool vma_is_foreign(const struct vm_area_struct *vma)
{
if (!current->mm)
return true;
@@ -835,7 +835,7 @@ static inline bool vma_is_foreign(struct vm_area_struct *vma)
return false;
}
-static inline bool vma_is_accessible(struct vm_area_struct *vma)
+static inline bool vma_is_accessible(const struct vm_area_struct *vma)
{
return vma->vm_flags & VM_ACCESS_FLAGS;
}
@@ -846,7 +846,7 @@ static inline bool is_shared_maywrite(vm_flags_t vm_flags)
(VM_SHARED | VM_MAYWRITE);
}
-static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
+static inline bool vma_is_shared_maywrite(const struct vm_area_struct *vma)
{
return is_shared_maywrite(vma->vm_flags);
}
@@ -930,14 +930,14 @@ static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
* The vma_is_shmem is not inline because it is used only by slow
* paths in userfault.
*/
-bool vma_is_shmem(struct vm_area_struct *vma);
-bool vma_is_anon_shmem(struct vm_area_struct *vma);
+bool vma_is_shmem(const struct vm_area_struct *vma);
+bool vma_is_anon_shmem(const struct vm_area_struct *vma);
#else
-static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
-static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; }
+static inline bool vma_is_shmem(const struct vm_area_struct *vma) { return false; }
+static inline bool vma_is_anon_shmem(const struct vm_area_struct *vma) { return false; }
#endif
-int vma_is_stack_for_current(struct vm_area_struct *vma);
+int vma_is_stack_for_current(const struct vm_area_struct *vma);
/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
@@ -3432,7 +3432,7 @@ struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
return mtree_load(&mm->mm_mt, addr);
}
-static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
+static inline unsigned long stack_guard_start_gap(const struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_GROWSDOWN)
return stack_guard_gap;
@@ -3444,7 +3444,7 @@ static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
return 0;
}
-static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
+static inline unsigned long vm_start_gap(const struct vm_area_struct *vma)
{
unsigned long gap = stack_guard_start_gap(vma);
unsigned long vm_start = vma->vm_start;
@@ -3455,7 +3455,7 @@ static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
return vm_start;
}
-static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
+static inline unsigned long vm_end_gap(const struct vm_area_struct *vma)
{
unsigned long vm_end = vma->vm_end;
@@ -3467,7 +3467,7 @@ static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
return vm_end;
}
-static inline unsigned long vma_pages(struct vm_area_struct *vma)
+static inline unsigned long vma_pages(const struct vm_area_struct *vma)
{
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
}
@@ -3484,7 +3484,7 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
return vma;
}
-static inline bool range_in_vma(struct vm_area_struct *vma,
+static inline bool range_in_vma(const struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
return (vma && vma->vm_start <= start && end <= vma->vm_end);
@@ -3600,7 +3600,7 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
* Indicates whether GUP can follow a PROT_NONE mapped page, or whether
* a (NUMA hinting) fault is required.
*/
-static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
+static inline bool gup_can_follow_protnone(const struct vm_area_struct *vma,
unsigned int flags)
{
/*
@@ -3730,7 +3730,7 @@ static inline bool debug_guardpage_enabled(void)
return static_branch_unlikely(&_debug_guardpage_enabled);
}
-static inline bool page_is_guard(struct page *page)
+static inline bool page_is_guard(const struct page *page)
{
if (!debug_guardpage_enabled())
return false;
@@ -3761,7 +3761,7 @@ static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
static inline bool debug_guardpage_enabled(void) { return false; }
-static inline bool page_is_guard(struct page *page) { return false; }
+static inline bool page_is_guard(const struct page *page) { return false; }
static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order) { return false; }
static inline void clear_page_guard(struct zone *zone, struct page *page,
@@ -3784,7 +3784,7 @@ static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
}
#endif /* __HAVE_ARCH_GATE_AREA */
-extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
+extern bool process_shares_mm(struct task_struct *p, const struct mm_struct *mm);
void drop_slab(void);
@@ -3843,7 +3843,7 @@ void vmemmap_free(unsigned long start, unsigned long end,
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
-static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
{
/* number of pfns from base where pfn_to_page() is valid */
if (altmap)
@@ -3857,7 +3857,7 @@ static inline void vmem_altmap_free(struct vmem_altmap *altmap,
altmap->alloc -= nr_pfns;
}
#else
-static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
{
return 0;
}
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 89b518ff097e..c91fa1de51fd 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -25,7 +25,7 @@
* 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
* ram or swap backed folio.
*/
-static inline int folio_is_file_lru(struct folio *folio)
+static inline int folio_is_file_lru(const struct folio *folio)
{
return !folio_test_swapbacked(folio);
}
@@ -84,7 +84,7 @@ static __always_inline void __folio_clear_lru_flags(struct folio *folio)
* Return: The LRU list a folio should be on, as an index
* into the array of LRU lists.
*/
-static __always_inline enum lru_list folio_lru_list(struct folio *folio)
+static __always_inline enum lru_list folio_lru_list(const struct folio *folio)
{
enum lru_list lru;
@@ -141,7 +141,7 @@ static inline int lru_tier_from_refs(int refs, bool workingset)
return workingset ? MAX_NR_TIERS - 1 : order_base_2(refs);
}
-static inline int folio_lru_refs(struct folio *folio)
+static inline int folio_lru_refs(const struct folio *folio)
{
unsigned long flags = READ_ONCE(folio->flags);
@@ -154,14 +154,14 @@ static inline int folio_lru_refs(struct folio *folio)
return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + 1;
}
-static inline int folio_lru_gen(struct folio *folio)
+static inline int folio_lru_gen(const struct folio *folio)
{
unsigned long flags = READ_ONCE(folio->flags);
return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
}
-static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
+static inline bool lru_gen_is_active(const struct lruvec *lruvec, int gen)
{
unsigned long max_seq = lruvec->lrugen.max_seq;
@@ -217,12 +217,12 @@ static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *foli
VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
}
-static inline unsigned long lru_gen_folio_seq(struct lruvec *lruvec, struct folio *folio,
+static inline unsigned long lru_gen_folio_seq(const struct lruvec *lruvec, const struct folio *folio,
bool reclaiming)
{
int gen;
int type = folio_is_file_lru(folio);
- struct lru_gen_folio *lrugen = &lruvec->lrugen;
+ const struct lru_gen_folio *lrugen = &lruvec->lrugen;
/*
* +-----------------------------------+-----------------------------------+
@@ -302,7 +302,7 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
return true;
}
-static inline void folio_migrate_refs(struct folio *new, struct folio *old)
+static inline void folio_migrate_refs(struct folio *new, const struct folio *old)
{
unsigned long refs = READ_ONCE(old->flags) & LRU_REFS_MASK;
@@ -330,7 +330,7 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
return false;
}
-static inline void folio_migrate_refs(struct folio *new, struct folio *old)
+static inline void folio_migrate_refs(struct folio *new, const struct folio *old)
{
}
@@ -508,7 +508,7 @@ static inline void dec_tlb_flush_pending(struct mm_struct *mm)
atomic_dec(&mm->tlb_flush_pending);
}
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+static inline bool mm_tlb_flush_pending(const struct mm_struct *mm)
{
/*
* Must be called after having acquired the PTL; orders against that
@@ -521,7 +521,7 @@ static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
return atomic_read(&mm->tlb_flush_pending);
}
-static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+static inline bool mm_tlb_flush_nested(const struct mm_struct *mm)
{
/*
* Similar to mm_tlb_flush_pending(), we must have acquired the PTL
@@ -605,7 +605,7 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
return false;
}
-static inline bool vma_has_recency(struct vm_area_struct *vma)
+static inline bool vma_has_recency(const struct vm_area_struct *vma)
{
if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
return false;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 08bc2442db93..56766e498ee1 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -627,7 +627,7 @@ static inline void ptdesc_pmd_pts_dec(struct ptdesc *ptdesc)
atomic_dec(&ptdesc->pt_share_count);
}
-static inline int ptdesc_pmd_pts_count(struct ptdesc *ptdesc)
+static inline int ptdesc_pmd_pts_count(const struct ptdesc *ptdesc)
{
return atomic_read(&ptdesc->pt_share_count);
}
@@ -655,7 +655,7 @@ static inline void set_page_private(struct page *page, unsigned long private)
page->private = private;
}
-static inline void *folio_get_private(struct folio *folio)
+static inline void *folio_get_private(const struct folio *folio)
{
return folio->private;
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0c5da9141983..cef949d6ba82 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1089,7 +1089,7 @@ static inline unsigned long promo_wmark_pages(const struct zone *z)
return wmark_pages(z, WMARK_PROMO);
}
-static inline unsigned long zone_managed_pages(struct zone *zone)
+static inline unsigned long zone_managed_pages(const struct zone *zone)
{
return (unsigned long)atomic_long_read(&zone->managed_pages);
}
@@ -1113,12 +1113,12 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
}
-static inline bool zone_is_initialized(struct zone *zone)
+static inline bool zone_is_initialized(const struct zone *zone)
{
return zone->initialized;
}
-static inline bool zone_is_empty(struct zone *zone)
+static inline bool zone_is_empty(const struct zone *zone)
{
return zone->spanned_pages == 0;
}
@@ -1248,7 +1248,7 @@ static inline bool folio_is_zone_movable(const struct folio *folio)
* Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
* intersection with the given zone
*/
-static inline bool zone_intersects(struct zone *zone,
+static inline bool zone_intersects(const struct zone *zone,
unsigned long start_pfn, unsigned long nr_pages)
{
if (zone_is_empty(zone))
@@ -1556,12 +1556,12 @@ static inline int local_memory_node(int node_id) { return node_id; };
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
#ifdef CONFIG_ZONE_DEVICE
-static inline bool zone_is_zone_device(struct zone *zone)
+static inline bool zone_is_zone_device(const struct zone *zone)
{
return zone_idx(zone) == ZONE_DEVICE;
}
#else
-static inline bool zone_is_zone_device(struct zone *zone)
+static inline bool zone_is_zone_device(const struct zone *zone)
{
return false;
}
@@ -1573,19 +1573,19 @@ static inline bool zone_is_zone_device(struct zone *zone)
* populated_zone(). If the whole zone is reserved then we can easily
* end up with populated_zone() && !managed_zone().
*/
-static inline bool managed_zone(struct zone *zone)
+static inline bool managed_zone(const struct zone *zone)
{
return zone_managed_pages(zone);
}
/* Returns true if a zone has memory */
-static inline bool populated_zone(struct zone *zone)
+static inline bool populated_zone(const struct zone *zone)
{
return zone->present_pages;
}
#ifdef CONFIG_NUMA
-static inline int zone_to_nid(struct zone *zone)
+static inline int zone_to_nid(const struct zone *zone)
{
return zone->node;
}
@@ -1595,7 +1595,7 @@ static inline void zone_set_nid(struct zone *zone, int nid)
zone->node = nid;
}
#else
-static inline int zone_to_nid(struct zone *zone)
+static inline int zone_to_nid(const struct zone *zone)
{
return 0;
}
@@ -1622,7 +1622,7 @@ static inline int is_highmem_idx(enum zone_type idx)
* @zone: pointer to struct zone variable
* Return: 1 for a highmem zone, 0 otherwise
*/
-static inline int is_highmem(struct zone *zone)
+static inline int is_highmem(const struct zone *zone)
{
return is_highmem_idx(zone_idx(zone));
}
@@ -1688,12 +1688,12 @@ static inline struct zone *zonelist_zone(struct zoneref *zoneref)
return zoneref->zone;
}
-static inline int zonelist_zone_idx(struct zoneref *zoneref)
+static inline int zonelist_zone_idx(const struct zoneref *zoneref)
{
return zoneref->zone_idx;
}
-static inline int zonelist_node_idx(struct zoneref *zoneref)
+static inline int zonelist_node_idx(const struct zoneref *zoneref)
{
return zone_to_nid(zoneref->zone);
}
@@ -1996,7 +1996,7 @@ static inline struct page *__section_mem_map_addr(struct mem_section *section)
return (struct page *)map;
}
-static inline int present_section(struct mem_section *section)
+static inline int present_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
}
@@ -2006,12 +2006,12 @@ static inline int present_section_nr(unsigned long nr)
return present_section(__nr_to_section(nr));
}
-static inline int valid_section(struct mem_section *section)
+static inline int valid_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
}
-static inline int early_section(struct mem_section *section)
+static inline int early_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_IS_EARLY));
}
@@ -2021,27 +2021,27 @@ static inline int valid_section_nr(unsigned long nr)
return valid_section(__nr_to_section(nr));
}
-static inline int online_section(struct mem_section *section)
+static inline int online_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_IS_ONLINE));
}
#ifdef CONFIG_ZONE_DEVICE
-static inline int online_device_section(struct mem_section *section)
+static inline int online_device_section(const struct mem_section *section)
{
unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
return section && ((section->section_mem_map & flags) == flags);
}
#else
-static inline int online_device_section(struct mem_section *section)
+static inline int online_device_section(const struct mem_section *section)
{
return 0;
}
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
-static inline int preinited_vmemmap_section(struct mem_section *section)
+static inline int preinited_vmemmap_section(const struct mem_section *section)
{
return (section &&
(section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT));
@@ -2051,7 +2051,7 @@ void sparse_vmemmap_init_nid_early(int nid);
void sparse_vmemmap_init_nid_late(int nid);
#else
-static inline int preinited_vmemmap_section(struct mem_section *section)
+static inline int preinited_vmemmap_section(const struct mem_section *section)
{
return 0;
}
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 12a12dae727d..37fa744ab124 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -140,7 +140,7 @@ static inline int inode_drain_writes(struct inode *inode)
return filemap_write_and_wait(inode->i_mapping);
}
-static inline bool mapping_empty(struct address_space *mapping)
+static inline bool mapping_empty(const struct address_space *mapping)
{
return xa_empty(&mapping->i_pages);
}
@@ -166,7 +166,7 @@ static inline bool mapping_empty(struct address_space *mapping)
* refcount and the referenced bit, which will be elevated or set in
* the process of adding new cache pages to an inode.
*/
-static inline bool mapping_shrinkable(struct address_space *mapping)
+static inline bool mapping_shrinkable(const struct address_space *mapping)
{
void *head;
@@ -265,7 +265,7 @@ static inline void mapping_clear_unevictable(struct address_space *mapping)
clear_bit(AS_UNEVICTABLE, &mapping->flags);
}
-static inline bool mapping_unevictable(struct address_space *mapping)
+static inline bool mapping_unevictable(const struct address_space *mapping)
{
return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
}
@@ -275,7 +275,7 @@ static inline void mapping_set_exiting(struct address_space *mapping)
set_bit(AS_EXITING, &mapping->flags);
}
-static inline int mapping_exiting(struct address_space *mapping)
+static inline int mapping_exiting(const struct address_space *mapping)
{
return test_bit(AS_EXITING, &mapping->flags);
}
@@ -285,7 +285,7 @@ static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
}
-static inline int mapping_use_writeback_tags(struct address_space *mapping)
+static inline int mapping_use_writeback_tags(const struct address_space *mapping)
{
return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
}
@@ -331,7 +331,7 @@ static inline void mapping_set_inaccessible(struct address_space *mapping)
set_bit(AS_INACCESSIBLE, &mapping->flags);
}
-static inline bool mapping_inaccessible(struct address_space *mapping)
+static inline bool mapping_inaccessible(const struct address_space *mapping)
{
return test_bit(AS_INACCESSIBLE, &mapping->flags);
}
@@ -341,18 +341,18 @@ static inline void mapping_set_writeback_may_deadlock_on_reclaim(struct address_
set_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
}
-static inline bool mapping_writeback_may_deadlock_on_reclaim(struct address_space *mapping)
+static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct address_space *mapping)
{
return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
}
-static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
+static inline gfp_t mapping_gfp_mask(const struct address_space * mapping)
{
return mapping->gfp_mask;
}
/* Restricts the given gfp_mask to what the mapping allows. */
-static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
+static inline gfp_t mapping_gfp_constraint(const struct address_space *mapping,
gfp_t gfp_mask)
{
return mapping_gfp_mask(mapping) & gfp_mask;
@@ -475,7 +475,7 @@ mapping_min_folio_order(const struct address_space *mapping)
}
static inline unsigned long
-mapping_min_folio_nrpages(struct address_space *mapping)
+mapping_min_folio_nrpages(const struct address_space *mapping)
{
return 1UL << mapping_min_folio_order(mapping);
}
@@ -489,7 +489,7 @@ mapping_min_folio_nrpages(struct address_space *mapping)
* new folio to the page cache and need to know what index to give it,
* call this function.
*/
-static inline pgoff_t mapping_align_index(struct address_space *mapping,
+static inline pgoff_t mapping_align_index(const struct address_space *mapping,
pgoff_t index)
{
return round_down(index, mapping_min_folio_nrpages(mapping));
@@ -499,7 +499,7 @@ static inline pgoff_t mapping_align_index(struct address_space *mapping,
* Large folio support currently depends on THP. These dependencies are
* being worked on but are not yet fixed.
*/
-static inline bool mapping_large_folio_support(struct address_space *mapping)
+static inline bool mapping_large_folio_support(const struct address_space *mapping)
{
/* AS_FOLIO_ORDER is only reasonable for pagecache folios */
VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON,
@@ -514,7 +514,7 @@ static inline size_t mapping_max_folio_size(const struct address_space *mapping)
return PAGE_SIZE << mapping_max_folio_order(mapping);
}
-static inline int filemap_nr_thps(struct address_space *mapping)
+static inline int filemap_nr_thps(const struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
return atomic_read(&mapping->nr_thps);
@@ -928,7 +928,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
*
* Return: The index of the folio which follows this folio in the file.
*/
-static inline pgoff_t folio_next_index(struct folio *folio)
+static inline pgoff_t folio_next_index(const struct folio *folio)
{
return folio->index + folio_nr_pages(folio);
}
@@ -957,7 +957,7 @@ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
* e.g., shmem did not move this folio to the swap cache.
* Return: true or false.
*/
-static inline bool folio_contains(struct folio *folio, pgoff_t index)
+static inline bool folio_contains(const struct folio *folio, pgoff_t index)
{
VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio);
return index - folio->index < folio_nr_pages(folio);
@@ -1034,12 +1034,12 @@ static inline loff_t page_offset(struct page *page)
/*
* Get the offset in PAGE_SIZE (even for hugetlb folios).
*/
-static inline pgoff_t folio_pgoff(struct folio *folio)
+static inline pgoff_t folio_pgoff(const struct folio *folio)
{
return folio->index;
}
-static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
+static inline pgoff_t linear_page_index(const struct vm_area_struct *vma,
unsigned long address)
{
pgoff_t pgoff;
@@ -1460,7 +1460,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
* readahead_pos - The byte offset into the file of this readahead request.
* @rac: The readahead request.
*/
-static inline loff_t readahead_pos(struct readahead_control *rac)
+static inline loff_t readahead_pos(const struct readahead_control *rac)
{
return (loff_t)rac->_index * PAGE_SIZE;
}
@@ -1469,7 +1469,7 @@ static inline loff_t readahead_pos(struct readahead_control *rac)
* readahead_length - The number of bytes in this readahead request.
* @rac: The readahead request.
*/
-static inline size_t readahead_length(struct readahead_control *rac)
+static inline size_t readahead_length(const struct readahead_control *rac)
{
return rac->_nr_pages * PAGE_SIZE;
}
@@ -1478,7 +1478,7 @@ static inline size_t readahead_length(struct readahead_control *rac)
* readahead_index - The index of the first page in this readahead request.
* @rac: The readahead request.
*/
-static inline pgoff_t readahead_index(struct readahead_control *rac)
+static inline pgoff_t readahead_index(const struct readahead_control *rac)
{
return rac->_index;
}
@@ -1487,7 +1487,7 @@ static inline pgoff_t readahead_index(struct readahead_control *rac)
* readahead_count - The number of pages in this readahead request.
* @rac: The readahead request.
*/
-static inline unsigned int readahead_count(struct readahead_control *rac)
+static inline unsigned int readahead_count(const struct readahead_control *rac)
{
return rac->_nr_pages;
}
@@ -1496,12 +1496,12 @@ static inline unsigned int readahead_count(struct readahead_control *rac)
* readahead_batch_length - The number of bytes in the current batch.
* @rac: The readahead request.
*/
-static inline size_t readahead_batch_length(struct readahead_control *rac)
+static inline size_t readahead_batch_length(const struct readahead_control *rac)
{
return rac->_batch_count * PAGE_SIZE;
}
-static inline unsigned long dir_pages(struct inode *inode)
+static inline unsigned long dir_pages(const struct inode *inode)
{
return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
PAGE_SHIFT;
@@ -1515,8 +1515,8 @@ static inline unsigned long dir_pages(struct inode *inode)
* Return: the number of bytes in the folio up to EOF,
* or -EFAULT if the folio was truncated.
*/
-static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
- struct inode *inode)
+static inline ssize_t folio_mkwrite_check_truncate(const struct folio *folio,
+ const struct inode *inode)
{
loff_t size = i_size_read(inode);
pgoff_t index = size >> PAGE_SHIFT;
@@ -1547,7 +1547,7 @@ static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
* Return: The number of filesystem blocks covered by this folio.
*/
static inline
-unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
+unsigned int i_blocks_per_folio(const struct inode *inode, const struct folio *folio)
{
return folio_size(folio) >> inode->i_blkbits;
}
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 5d3a0cccc6bf..63be5a451627 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -51,12 +51,12 @@ static inline void folio_batch_reinit(struct folio_batch *fbatch)
fbatch->i = 0;
}
-static inline unsigned int folio_batch_count(struct folio_batch *fbatch)
+static inline unsigned int folio_batch_count(const struct folio_batch *fbatch)
{
return fbatch->nr;
}
-static inline unsigned int folio_batch_space(struct folio_batch *fbatch)
+static inline unsigned int folio_batch_space(const struct folio_batch *fbatch)
{
return PAGEVEC_SIZE - fbatch->nr;
}
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 6d0f9c599ff7..0e47465ef0fd 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -99,9 +99,9 @@ extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts);
#ifdef CONFIG_SHMEM
-bool shmem_mapping(struct address_space *mapping);
+bool shmem_mapping(const struct address_space *mapping);
#else
-static inline bool shmem_mapping(struct address_space *mapping)
+static inline bool shmem_mapping(const struct address_space *mapping)
{
return false;
}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 25923cfec9c6..0390e4f55ebe 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -490,7 +490,7 @@ static bool oom_killer_disabled __read_mostly;
* task's threads: if one of those is using this mm then this task was also
* using it.
*/
-bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
+bool process_shares_mm(struct task_struct *p, const struct mm_struct *mm)
{
struct task_struct *t;
diff --git a/mm/shmem.c b/mm/shmem.c
index e2c76a30802b..91abac780fe6 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -275,18 +275,18 @@ static const struct vm_operations_struct shmem_vm_ops;
static const struct vm_operations_struct shmem_anon_vm_ops;
static struct file_system_type shmem_fs_type;
-bool shmem_mapping(struct address_space *mapping)
+bool shmem_mapping(const struct address_space *mapping)
{
return mapping->a_ops == &shmem_aops;
}
EXPORT_SYMBOL_GPL(shmem_mapping);
-bool vma_is_anon_shmem(struct vm_area_struct *vma)
+bool vma_is_anon_shmem(const struct vm_area_struct *vma)
{
return vma->vm_ops == &shmem_anon_vm_ops;
}
-bool vma_is_shmem(struct vm_area_struct *vma)
+bool vma_is_shmem(const struct vm_area_struct *vma)
{
return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
}
diff --git a/mm/util.c b/mm/util.c
index f814e6a59ab1..49ba90c4f763 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -315,7 +315,7 @@ void *memdup_user_nul(const void __user *src, size_t len)
EXPORT_SYMBOL(memdup_user_nul);
/* Check if the vma is being used as a stack by this task */
-int vma_is_stack_for_current(struct vm_area_struct *vma)
+int vma_is_stack_for_current(const struct vm_area_struct *vma)
{
struct task_struct * __maybe_unused t = current;
--
2.47.2
^ permalink raw reply related [flat|nested] 16+ messages in thread
* Re: [PATCH] mm: add `const` to lots of pointer parameters
2025-08-27 19:22 [PATCH] mm: add `const` to lots of pointer parameters Max Kellermann
@ 2025-08-27 21:48 ` Andrew Morton
2025-08-28 1:48 ` Liam R. Howlett
2025-08-28 12:09 ` Lorenzo Stoakes
2025-08-28 12:24 ` Lorenzo Stoakes
2025-08-28 12:55 ` Lorenzo Stoakes
2 siblings, 2 replies; 16+ messages in thread
From: Andrew Morton @ 2025-08-27 21:48 UTC (permalink / raw)
To: Max Kellermann
Cc: david, lorenzo.stoakes, Liam.Howlett, vbabka, rppt, surenb,
mhocko, linux-mm, linux-kernel
On Wed, 27 Aug 2025 21:22:33 +0200 Max Kellermann <max.kellermann@ionos.com> wrote:
> For improved const-correctness.
OK...
>
> --- a/include/linux/fs.h
> +++ b/include/linux/fs.h
> @@ -537,7 +537,7 @@ struct address_space {
> /*
> * Returns true if any of the pages in the mapping are marked with the tag.
> */
> -static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag)
> +static inline bool mapping_tagged(const struct address_space *mapping, xa_mark_t tag)
> {
> return xa_marked(&mapping->i_pages, tag);
> }
I'd actually be in favor of making all incoming args const (C should
have made this the default).
Because modifying an incoming arg is just obnoxious. That value should
be viewed as part of the calling environment and should not be altered.
Try modifying a lengthy function and wanting to get at an incoming arg
only to find that something in the preceding 100 lines has gone and
messed with it. Or forget to check fr this and get a nasty surprise when
testing.
Not that I'm suggesting that someone go in and make this change.
On the other hand, it would be neat if gcc had an option to warn when
someone does this. I bet it would be simple to add.
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] mm: add `const` to lots of pointer parameters
2025-08-27 21:48 ` Andrew Morton
@ 2025-08-28 1:48 ` Liam R. Howlett
2025-08-28 2:47 ` Andrew Morton
2025-08-28 12:09 ` Lorenzo Stoakes
1 sibling, 1 reply; 16+ messages in thread
From: Liam R. Howlett @ 2025-08-28 1:48 UTC (permalink / raw)
To: Andrew Morton
Cc: Max Kellermann, david, lorenzo.stoakes, vbabka, rppt, surenb,
mhocko, linux-mm, linux-kernel
* Andrew Morton <akpm@linux-foundation.org> [250827 17:48]:
> On Wed, 27 Aug 2025 21:22:33 +0200 Max Kellermann <max.kellermann@ionos.com> wrote:
>
> > For improved const-correctness.
>
> OK...
>
> >
> > --- a/include/linux/fs.h
> > +++ b/include/linux/fs.h
> > @@ -537,7 +537,7 @@ struct address_space {
> > /*
> > * Returns true if any of the pages in the mapping are marked with the tag.
> > */
> > -static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag)
> > +static inline bool mapping_tagged(const struct address_space *mapping, xa_mark_t tag)
> > {
> > return xa_marked(&mapping->i_pages, tag);
> > }
>
> I'd actually be in favor of making all incoming args const (C should
> have made this the default).
>
> Because modifying an incoming arg is just obnoxious. That value should
> be viewed as part of the calling environment and should not be altered.
>
> Try modifying a lengthy function and wanting to get at an incoming arg
> only to find that something in the preceding 100 lines has gone and
> messed with it. Or forget to check fr this and get a nasty surprise when
> testing.
>
> Not that I'm suggesting that someone go in and make this change.
>
> On the other hand, it would be neat if gcc had an option to warn when
> someone does this. I bet it would be simple to add.
But what of the lazy programmer that just uses the variable at hand
instead of declaring another one?
I also wonder how this would affect inlining choices?
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] mm: add `const` to lots of pointer parameters
2025-08-28 1:48 ` Liam R. Howlett
@ 2025-08-28 2:47 ` Andrew Morton
0 siblings, 0 replies; 16+ messages in thread
From: Andrew Morton @ 2025-08-28 2:47 UTC (permalink / raw)
To: Liam R. Howlett
Cc: Max Kellermann, david, lorenzo.stoakes, vbabka, rppt, surenb,
mhocko, linux-mm, linux-kernel
On Wed, 27 Aug 2025 21:48:21 -0400 "Liam R. Howlett" <Liam.Howlett@oracle.com> wrote:
> > I'd actually be in favor of making all incoming args const (C should
> > have made this the default).
> >
> > Because modifying an incoming arg is just obnoxious. That value should
> > be viewed as part of the calling environment and should not be altered.
> >
> > Try modifying a lengthy function and wanting to get at an incoming arg
> > only to find that something in the preceding 100 lines has gone and
> > messed with it. Or forget to check fr this and get a nasty surprise when
> > testing.
> >
> > Not that I'm suggesting that someone go in and make this change.
> >
> > On the other hand, it would be neat if gcc had an option to warn when
> > someone does this. I bet it would be simple to add.
>
> But what of the lazy programmer that just uses the variable at hand
> instead of declaring another one?
Sure, I'd say that's OK for a small function (home-made strcpy) but for
a larger function, expecting other developers to read back through
everything to make sure that Nobody Did That is way unacceptable. It's
laying a trap.
> I also wonder how this would affect inlining choices?
I'd be surprised if the compiler didn't trivially optimize through such
things.
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] mm: add `const` to lots of pointer parameters
2025-08-27 21:48 ` Andrew Morton
2025-08-28 1:48 ` Liam R. Howlett
@ 2025-08-28 12:09 ` Lorenzo Stoakes
2025-08-28 12:16 ` Max Kellermann
1 sibling, 1 reply; 16+ messages in thread
From: Lorenzo Stoakes @ 2025-08-28 12:09 UTC (permalink / raw)
To: Andrew Morton
Cc: Max Kellermann, david, Liam.Howlett, vbabka, rppt, surenb, mhocko,
linux-mm, linux-kernel
On Wed, Aug 27, 2025 at 02:48:32PM -0700, Andrew Morton wrote:
> On Wed, 27 Aug 2025 21:22:33 +0200 Max Kellermann <max.kellermann@ionos.com> wrote:
>
> > For improved const-correctness.
>
> OK...
>
> >
> > --- a/include/linux/fs.h
> > +++ b/include/linux/fs.h
> > @@ -537,7 +537,7 @@ struct address_space {
> > /*
> > * Returns true if any of the pages in the mapping are marked with the tag.
> > */
> > -static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag)
> > +static inline bool mapping_tagged(const struct address_space *mapping, xa_mark_t tag)
> > {
> > return xa_marked(&mapping->i_pages, tag);
> > }
>
> I'd actually be in favor of making all incoming args const (C should
> have made this the default).
>
> Because modifying an incoming arg is just obnoxious. That value should
> be viewed as part of the calling environment and should not be altered.
>
> Try modifying a lengthy function and wanting to get at an incoming arg
> only to find that something in the preceding 100 lines has gone and
> messed with it. Or forget to check fr this and get a nasty surprise when
> testing.
>
> Not that I'm suggesting that someone go in and make this change.
>
> On the other hand, it would be neat if gcc had an option to warn when
> someone does this. I bet it would be simple to add.
This would break half the world, I've tried making parameters const, then
finding basically every helper function doesn't function correctly with it
+ having to give up without needing to change half the code base.
It's kind of a dominoes thing at this point.
Broadly I'm in agreement that 'const by default' is good, but there's quite
a big price to pay for this in terms of changes needed for really quite
dubious gains.
I think perhaps 'const if we can moving forward' is better.
Also note that C 'const' is _extremely_ weak. So it's more of a 'readonly
kinda'.
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] mm: add `const` to lots of pointer parameters
2025-08-28 12:09 ` Lorenzo Stoakes
@ 2025-08-28 12:16 ` Max Kellermann
2025-08-28 12:35 ` Lorenzo Stoakes
0 siblings, 1 reply; 16+ messages in thread
From: Max Kellermann @ 2025-08-28 12:16 UTC (permalink / raw)
To: Lorenzo Stoakes
Cc: Andrew Morton, david, Liam.Howlett, vbabka, rppt, surenb, mhocko,
linux-mm, linux-kernel
On Thu, Aug 28, 2025 at 2:09 PM Lorenzo Stoakes
<lorenzo.stoakes@oracle.com> wrote:
> > I'd actually be in favor of making all incoming args const (C should
> > have made this the default).
> >
> > Because modifying an incoming arg is just obnoxious. That value should
> > be viewed as part of the calling environment and should not be altered.
> >
> > Try modifying a lengthy function and wanting to get at an incoming arg
> > only to find that something in the preceding 100 lines has gone and
> > messed with it. Or forget to check fr this and get a nasty surprise when
> > testing.
> >
> > Not that I'm suggesting that someone go in and make this change.
> >
> > On the other hand, it would be neat if gcc had an option to warn when
> > someone does this. I bet it would be simple to add.
>
> This would break half the world, I've tried making parameters const, then
> finding basically every helper function doesn't function correctly with it
> + having to give up without needing to change half the code base.
Are you guys talking about making parameter values const (i.e. pointer
addresses if the parameter is a pointer) or making pointed-to values
const?
It seemed Andrew meant the former, but my patch was about the latter.
Yes, making parameters (not pointed-to values) const by default would
be a good default, but we're 50 years late for that, and
unfortunately, C's syntax for pointers with constant address is
awkwardly confusing. But adding "const" to parameter (values, not
pointed-to) should not break anything (unless the function really
changes parameter values - there may be good or bad reasons to do
that).
Making more pointed-to values const will break a lot, yes, because
this needs const-correctness in the whole callee space. This patch
here tries to do this for a low-level part of the kernel, to enable it
in higher-level parts. Long way to go.
Max
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] mm: add `const` to lots of pointer parameters
2025-08-27 19:22 [PATCH] mm: add `const` to lots of pointer parameters Max Kellermann
2025-08-27 21:48 ` Andrew Morton
@ 2025-08-28 12:24 ` Lorenzo Stoakes
2025-08-28 12:28 ` David Hildenbrand
2025-08-28 12:55 ` Lorenzo Stoakes
2 siblings, 1 reply; 16+ messages in thread
From: Lorenzo Stoakes @ 2025-08-28 12:24 UTC (permalink / raw)
To: Max Kellermann
Cc: akpm, david, Liam.Howlett, vbabka, rppt, surenb, mhocko, linux-mm,
linux-kernel
You've not cc'd the right people here.
Please run scripts/get_maintainers.pl on your patch series and cc all
maintainers and reviewers on every series please, as well as all of the mailing
lists referenced.
For this change you needed to CC:
Alexander Viro <viro@zeniv.linux.org.uk> (maintainer:FILESYSTEMS (VFS and infrastructure))
Christian Brauner <brauner@kernel.org> (maintainer:FILESYSTEMS (VFS and infrastructure))
Jan Kara <jack@suse.cz> (reviewer:FILESYSTEMS (VFS and infrastructure))
Andrew Morton <akpm@linux-foundation.org> (maintainer:MEMORY MANAGEMENT - CORE,commit_signer:1/1=100%)
David Hildenbrand <david@redhat.com> (maintainer:MEMORY MANAGEMENT - CORE)
Lorenzo Stoakes <lorenzo.stoakes@oracle.com> (reviewer:MEMORY MANAGEMENT - CORE)
"Liam R. Howlett" <Liam.Howlett@oracle.com> (reviewer:MEMORY MANAGEMENT - CORE)
Vlastimil Babka <vbabka@suse.cz> (reviewer:MEMORY MANAGEMENT - CORE)
Mike Rapoport <rppt@kernel.org> (reviewer:MEMORY MANAGEMENT - CORE)
Suren Baghdasaryan <surenb@google.com> (reviewer:MEMORY MANAGEMENT - CORE)
Michal Hocko <mhocko@suse.com> (reviewer:MEMORY MANAGEMENT - CORE)
Axel Rasmussen <axelrasmussen@google.com> (maintainer:MEMORY MANAGEMENT - MGLRU (MULTI-GEN LRU))
Yuanchu Xie <yuanchu@google.com> (maintainer:MEMORY MANAGEMENT - MGLRU (MULTI-GEN LRU))
Wei Xu <weixugc@google.com> (reviewer:MEMORY MANAGEMENT - MGLRU (MULTI-GEN LRU))
"Matthew Wilcox (Oracle)" <willy@infradead.org> (maintainer:PAGE CACHE)
Hugh Dickins <hughd@google.com> (maintainer:TMPFS (SHMEM FILESYSTEM))
Baolin Wang <baolin.wang@linux.alibaba.com> (reviewer:TMPFS (SHMEM FILESYSTEM))
David Rientjes <rientjes@google.com> (reviewer:MEMORY MANAGEMENT - OOM KILLER)
Shakeel Butt <shakeel.butt@linux.dev> (reviewer:MEMORY MANAGEMENT - OOM KILLER)
linux-fsdevel@vger.kernel.org (open list:FILESYSTEMS (VFS and infrastructure))
linux-kernel@vger.kernel.org (open list)
linux-mm@kvack.org (open list:MEMORY MANAGEMENT - CORE)
Usually I'd cc for you here but that's too many :)
On Wed, Aug 27, 2025 at 09:22:33PM +0200, Max Kellermann wrote:
> For improved const-correctness.
'const-correctness' in C is extremely weak.
>
> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
It feels like this is a bit of an arbitrary subset and this churn is going
to make merge conflicts much more likely.
I mean, if it's just that all this functions accept non-const VMA and for
all callers _from_ these functions most accept const - it's not really
accomplishing much other than adding a bunch of noise.
Any meaningful change would require pretty huge amounts of change that
would be horrendous git blame, merge conflict, etc. wise for really not a
huge amount of benefit.
I feel like the ship has sailed for this and I really question the value of
doing this in this way.
> ---
> include/linux/fs.h | 6 ++---
> include/linux/mm.h | 42 +++++++++++++++----------------
> include/linux/mm_inline.h | 24 +++++++++---------
> include/linux/mm_types.h | 4 +--
> include/linux/mmzone.h | 42 +++++++++++++++----------------
> include/linux/pagemap.h | 52 +++++++++++++++++++--------------------
> include/linux/pagevec.h | 4 +--
> include/linux/shmem_fs.h | 4 +--
> mm/oom_kill.c | 2 +-
> mm/shmem.c | 6 ++---
> mm/util.c | 2 +-
> 11 files changed, 94 insertions(+), 94 deletions(-)
>
> diff --git a/include/linux/fs.h b/include/linux/fs.h
> index d7ab4f96d705..0783c5d05d3f 100644
> --- a/include/linux/fs.h
> +++ b/include/linux/fs.h
> @@ -537,7 +537,7 @@ struct address_space {
> /*
> * Returns true if any of the pages in the mapping are marked with the tag.
> */
> -static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag)
> +static inline bool mapping_tagged(const struct address_space *mapping, xa_mark_t tag)
> {
> return xa_marked(&mapping->i_pages, tag);
> }
> @@ -585,7 +585,7 @@ static inline void i_mmap_assert_write_locked(struct address_space *mapping)
> /*
> * Might pages of this file be mapped into userspace?
> */
> -static inline int mapping_mapped(struct address_space *mapping)
> +static inline int mapping_mapped(const struct address_space *mapping)
> {
> return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root);
> }
> @@ -599,7 +599,7 @@ static inline int mapping_mapped(struct address_space *mapping)
> * If i_mmap_writable is negative, no new writable mappings are allowed. You
> * can only deny writable mappings, if none exists right now.
> */
> -static inline int mapping_writably_mapped(struct address_space *mapping)
> +static inline int mapping_writably_mapped(const struct address_space *mapping)
> {
> return atomic_read(&mapping->i_mmap_writable) > 0;
> }
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 1ae97a0b8ec7..ad478a4afa6c 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -697,7 +697,7 @@ static inline void release_fault_lock(struct vm_fault *vmf)
> mmap_read_unlock(vmf->vma->vm_mm);
> }
>
> -static inline void assert_fault_locked(struct vm_fault *vmf)
> +static inline void assert_fault_locked(const struct vm_fault *vmf)
> {
> mmap_assert_locked(vmf->vma->vm_mm);
> }
> @@ -810,7 +810,7 @@ static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
> vma->vm_end >= vma->vm_mm->start_stack;
> }
>
> -static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
> +static inline bool vma_is_temporary_stack(const struct vm_area_struct *vma)
> {
> int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
>
> @@ -824,7 +824,7 @@ static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
> return false;
> }
>
> -static inline bool vma_is_foreign(struct vm_area_struct *vma)
> +static inline bool vma_is_foreign(const struct vm_area_struct *vma)
> {
> if (!current->mm)
> return true;
> @@ -835,7 +835,7 @@ static inline bool vma_is_foreign(struct vm_area_struct *vma)
> return false;
> }
>
> -static inline bool vma_is_accessible(struct vm_area_struct *vma)
> +static inline bool vma_is_accessible(const struct vm_area_struct *vma)
> {
> return vma->vm_flags & VM_ACCESS_FLAGS;
> }
> @@ -846,7 +846,7 @@ static inline bool is_shared_maywrite(vm_flags_t vm_flags)
> (VM_SHARED | VM_MAYWRITE);
> }
>
> -static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
> +static inline bool vma_is_shared_maywrite(const struct vm_area_struct *vma)
> {
> return is_shared_maywrite(vma->vm_flags);
> }
> @@ -930,14 +930,14 @@ static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
> * The vma_is_shmem is not inline because it is used only by slow
> * paths in userfault.
> */
> -bool vma_is_shmem(struct vm_area_struct *vma);
> -bool vma_is_anon_shmem(struct vm_area_struct *vma);
> +bool vma_is_shmem(const struct vm_area_struct *vma);
> +bool vma_is_anon_shmem(const struct vm_area_struct *vma);
> #else
> -static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
> -static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; }
> +static inline bool vma_is_shmem(const struct vm_area_struct *vma) { return false; }
> +static inline bool vma_is_anon_shmem(const struct vm_area_struct *vma) { return false; }
> #endif
>
> -int vma_is_stack_for_current(struct vm_area_struct *vma);
> +int vma_is_stack_for_current(const struct vm_area_struct *vma);
>
> /* flush_tlb_range() takes a vma, not a mm, and can care about flags */
> #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
> @@ -3432,7 +3432,7 @@ struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
> return mtree_load(&mm->mm_mt, addr);
> }
>
> -static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
> +static inline unsigned long stack_guard_start_gap(const struct vm_area_struct *vma)
> {
> if (vma->vm_flags & VM_GROWSDOWN)
> return stack_guard_gap;
> @@ -3444,7 +3444,7 @@ static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
> return 0;
> }
>
> -static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
> +static inline unsigned long vm_start_gap(const struct vm_area_struct *vma)
> {
> unsigned long gap = stack_guard_start_gap(vma);
> unsigned long vm_start = vma->vm_start;
> @@ -3455,7 +3455,7 @@ static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
> return vm_start;
> }
>
> -static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
> +static inline unsigned long vm_end_gap(const struct vm_area_struct *vma)
> {
> unsigned long vm_end = vma->vm_end;
>
> @@ -3467,7 +3467,7 @@ static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
> return vm_end;
> }
>
> -static inline unsigned long vma_pages(struct vm_area_struct *vma)
> +static inline unsigned long vma_pages(const struct vm_area_struct *vma)
> {
> return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
> }
> @@ -3484,7 +3484,7 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
> return vma;
> }
>
> -static inline bool range_in_vma(struct vm_area_struct *vma,
> +static inline bool range_in_vma(const struct vm_area_struct *vma,
> unsigned long start, unsigned long end)
> {
> return (vma && vma->vm_start <= start && end <= vma->vm_end);
> @@ -3600,7 +3600,7 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
> * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
> * a (NUMA hinting) fault is required.
> */
> -static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
> +static inline bool gup_can_follow_protnone(const struct vm_area_struct *vma,
> unsigned int flags)
> {
> /*
> @@ -3730,7 +3730,7 @@ static inline bool debug_guardpage_enabled(void)
> return static_branch_unlikely(&_debug_guardpage_enabled);
> }
>
> -static inline bool page_is_guard(struct page *page)
> +static inline bool page_is_guard(const struct page *page)
> {
> if (!debug_guardpage_enabled())
> return false;
> @@ -3761,7 +3761,7 @@ static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
> static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
> static inline unsigned int debug_guardpage_minorder(void) { return 0; }
> static inline bool debug_guardpage_enabled(void) { return false; }
> -static inline bool page_is_guard(struct page *page) { return false; }
> +static inline bool page_is_guard(const struct page *page) { return false; }
> static inline bool set_page_guard(struct zone *zone, struct page *page,
> unsigned int order) { return false; }
> static inline void clear_page_guard(struct zone *zone, struct page *page,
> @@ -3784,7 +3784,7 @@ static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
> }
> #endif /* __HAVE_ARCH_GATE_AREA */
>
> -extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
> +extern bool process_shares_mm(struct task_struct *p, const struct mm_struct *mm);
>
> void drop_slab(void);
>
> @@ -3843,7 +3843,7 @@ void vmemmap_free(unsigned long start, unsigned long end,
> #endif
>
> #ifdef CONFIG_SPARSEMEM_VMEMMAP
> -static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
> +static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
> {
> /* number of pfns from base where pfn_to_page() is valid */
> if (altmap)
> @@ -3857,7 +3857,7 @@ static inline void vmem_altmap_free(struct vmem_altmap *altmap,
> altmap->alloc -= nr_pfns;
> }
> #else
> -static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
> +static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
> {
> return 0;
> }
> diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
> index 89b518ff097e..c91fa1de51fd 100644
> --- a/include/linux/mm_inline.h
> +++ b/include/linux/mm_inline.h
> @@ -25,7 +25,7 @@
> * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
> * ram or swap backed folio.
> */
> -static inline int folio_is_file_lru(struct folio *folio)
> +static inline int folio_is_file_lru(const struct folio *folio)
> {
> return !folio_test_swapbacked(folio);
> }
> @@ -84,7 +84,7 @@ static __always_inline void __folio_clear_lru_flags(struct folio *folio)
> * Return: The LRU list a folio should be on, as an index
> * into the array of LRU lists.
> */
> -static __always_inline enum lru_list folio_lru_list(struct folio *folio)
> +static __always_inline enum lru_list folio_lru_list(const struct folio *folio)
> {
> enum lru_list lru;
>
> @@ -141,7 +141,7 @@ static inline int lru_tier_from_refs(int refs, bool workingset)
> return workingset ? MAX_NR_TIERS - 1 : order_base_2(refs);
> }
>
> -static inline int folio_lru_refs(struct folio *folio)
> +static inline int folio_lru_refs(const struct folio *folio)
> {
> unsigned long flags = READ_ONCE(folio->flags);
>
> @@ -154,14 +154,14 @@ static inline int folio_lru_refs(struct folio *folio)
> return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + 1;
> }
>
> -static inline int folio_lru_gen(struct folio *folio)
> +static inline int folio_lru_gen(const struct folio *folio)
> {
> unsigned long flags = READ_ONCE(folio->flags);
>
> return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
> }
>
> -static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
> +static inline bool lru_gen_is_active(const struct lruvec *lruvec, int gen)
> {
> unsigned long max_seq = lruvec->lrugen.max_seq;
>
> @@ -217,12 +217,12 @@ static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *foli
> VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
> }
>
> -static inline unsigned long lru_gen_folio_seq(struct lruvec *lruvec, struct folio *folio,
> +static inline unsigned long lru_gen_folio_seq(const struct lruvec *lruvec, const struct folio *folio,
> bool reclaiming)
> {
> int gen;
> int type = folio_is_file_lru(folio);
> - struct lru_gen_folio *lrugen = &lruvec->lrugen;
> + const struct lru_gen_folio *lrugen = &lruvec->lrugen;
>
> /*
> * +-----------------------------------+-----------------------------------+
> @@ -302,7 +302,7 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
> return true;
> }
>
> -static inline void folio_migrate_refs(struct folio *new, struct folio *old)
> +static inline void folio_migrate_refs(struct folio *new, const struct folio *old)
> {
> unsigned long refs = READ_ONCE(old->flags) & LRU_REFS_MASK;
>
> @@ -330,7 +330,7 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
> return false;
> }
>
> -static inline void folio_migrate_refs(struct folio *new, struct folio *old)
> +static inline void folio_migrate_refs(struct folio *new, const struct folio *old)
> {
>
> }
> @@ -508,7 +508,7 @@ static inline void dec_tlb_flush_pending(struct mm_struct *mm)
> atomic_dec(&mm->tlb_flush_pending);
> }
>
> -static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
> +static inline bool mm_tlb_flush_pending(const struct mm_struct *mm)
> {
> /*
> * Must be called after having acquired the PTL; orders against that
> @@ -521,7 +521,7 @@ static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
> return atomic_read(&mm->tlb_flush_pending);
> }
>
> -static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
> +static inline bool mm_tlb_flush_nested(const struct mm_struct *mm)
> {
> /*
> * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
> @@ -605,7 +605,7 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
> return false;
> }
>
> -static inline bool vma_has_recency(struct vm_area_struct *vma)
> +static inline bool vma_has_recency(const struct vm_area_struct *vma)
> {
> if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
> return false;
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 08bc2442db93..56766e498ee1 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -627,7 +627,7 @@ static inline void ptdesc_pmd_pts_dec(struct ptdesc *ptdesc)
> atomic_dec(&ptdesc->pt_share_count);
> }
>
> -static inline int ptdesc_pmd_pts_count(struct ptdesc *ptdesc)
> +static inline int ptdesc_pmd_pts_count(const struct ptdesc *ptdesc)
> {
> return atomic_read(&ptdesc->pt_share_count);
> }
> @@ -655,7 +655,7 @@ static inline void set_page_private(struct page *page, unsigned long private)
> page->private = private;
> }
>
> -static inline void *folio_get_private(struct folio *folio)
> +static inline void *folio_get_private(const struct folio *folio)
> {
> return folio->private;
> }
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index 0c5da9141983..cef949d6ba82 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -1089,7 +1089,7 @@ static inline unsigned long promo_wmark_pages(const struct zone *z)
> return wmark_pages(z, WMARK_PROMO);
> }
>
> -static inline unsigned long zone_managed_pages(struct zone *zone)
> +static inline unsigned long zone_managed_pages(const struct zone *zone)
> {
> return (unsigned long)atomic_long_read(&zone->managed_pages);
> }
> @@ -1113,12 +1113,12 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
> return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
> }
>
> -static inline bool zone_is_initialized(struct zone *zone)
> +static inline bool zone_is_initialized(const struct zone *zone)
> {
> return zone->initialized;
> }
>
> -static inline bool zone_is_empty(struct zone *zone)
> +static inline bool zone_is_empty(const struct zone *zone)
> {
> return zone->spanned_pages == 0;
> }
> @@ -1248,7 +1248,7 @@ static inline bool folio_is_zone_movable(const struct folio *folio)
> * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
> * intersection with the given zone
> */
> -static inline bool zone_intersects(struct zone *zone,
> +static inline bool zone_intersects(const struct zone *zone,
> unsigned long start_pfn, unsigned long nr_pages)
> {
> if (zone_is_empty(zone))
> @@ -1556,12 +1556,12 @@ static inline int local_memory_node(int node_id) { return node_id; };
> #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
>
> #ifdef CONFIG_ZONE_DEVICE
> -static inline bool zone_is_zone_device(struct zone *zone)
> +static inline bool zone_is_zone_device(const struct zone *zone)
> {
> return zone_idx(zone) == ZONE_DEVICE;
> }
> #else
> -static inline bool zone_is_zone_device(struct zone *zone)
> +static inline bool zone_is_zone_device(const struct zone *zone)
> {
> return false;
> }
> @@ -1573,19 +1573,19 @@ static inline bool zone_is_zone_device(struct zone *zone)
> * populated_zone(). If the whole zone is reserved then we can easily
> * end up with populated_zone() && !managed_zone().
> */
> -static inline bool managed_zone(struct zone *zone)
> +static inline bool managed_zone(const struct zone *zone)
> {
> return zone_managed_pages(zone);
> }
>
> /* Returns true if a zone has memory */
> -static inline bool populated_zone(struct zone *zone)
> +static inline bool populated_zone(const struct zone *zone)
> {
> return zone->present_pages;
> }
>
> #ifdef CONFIG_NUMA
> -static inline int zone_to_nid(struct zone *zone)
> +static inline int zone_to_nid(const struct zone *zone)
> {
> return zone->node;
> }
> @@ -1595,7 +1595,7 @@ static inline void zone_set_nid(struct zone *zone, int nid)
> zone->node = nid;
> }
> #else
> -static inline int zone_to_nid(struct zone *zone)
> +static inline int zone_to_nid(const struct zone *zone)
> {
> return 0;
> }
> @@ -1622,7 +1622,7 @@ static inline int is_highmem_idx(enum zone_type idx)
> * @zone: pointer to struct zone variable
> * Return: 1 for a highmem zone, 0 otherwise
> */
> -static inline int is_highmem(struct zone *zone)
> +static inline int is_highmem(const struct zone *zone)
> {
> return is_highmem_idx(zone_idx(zone));
> }
> @@ -1688,12 +1688,12 @@ static inline struct zone *zonelist_zone(struct zoneref *zoneref)
> return zoneref->zone;
> }
>
> -static inline int zonelist_zone_idx(struct zoneref *zoneref)
> +static inline int zonelist_zone_idx(const struct zoneref *zoneref)
> {
> return zoneref->zone_idx;
> }
>
> -static inline int zonelist_node_idx(struct zoneref *zoneref)
> +static inline int zonelist_node_idx(const struct zoneref *zoneref)
> {
> return zone_to_nid(zoneref->zone);
> }
> @@ -1996,7 +1996,7 @@ static inline struct page *__section_mem_map_addr(struct mem_section *section)
> return (struct page *)map;
> }
>
> -static inline int present_section(struct mem_section *section)
> +static inline int present_section(const struct mem_section *section)
> {
> return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
> }
> @@ -2006,12 +2006,12 @@ static inline int present_section_nr(unsigned long nr)
> return present_section(__nr_to_section(nr));
> }
>
> -static inline int valid_section(struct mem_section *section)
> +static inline int valid_section(const struct mem_section *section)
> {
> return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
> }
>
> -static inline int early_section(struct mem_section *section)
> +static inline int early_section(const struct mem_section *section)
> {
> return (section && (section->section_mem_map & SECTION_IS_EARLY));
> }
> @@ -2021,27 +2021,27 @@ static inline int valid_section_nr(unsigned long nr)
> return valid_section(__nr_to_section(nr));
> }
>
> -static inline int online_section(struct mem_section *section)
> +static inline int online_section(const struct mem_section *section)
> {
> return (section && (section->section_mem_map & SECTION_IS_ONLINE));
> }
>
> #ifdef CONFIG_ZONE_DEVICE
> -static inline int online_device_section(struct mem_section *section)
> +static inline int online_device_section(const struct mem_section *section)
> {
> unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
>
> return section && ((section->section_mem_map & flags) == flags);
> }
> #else
> -static inline int online_device_section(struct mem_section *section)
> +static inline int online_device_section(const struct mem_section *section)
> {
> return 0;
> }
> #endif
>
> #ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
> -static inline int preinited_vmemmap_section(struct mem_section *section)
> +static inline int preinited_vmemmap_section(const struct mem_section *section)
> {
> return (section &&
> (section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT));
> @@ -2051,7 +2051,7 @@ void sparse_vmemmap_init_nid_early(int nid);
> void sparse_vmemmap_init_nid_late(int nid);
>
> #else
> -static inline int preinited_vmemmap_section(struct mem_section *section)
> +static inline int preinited_vmemmap_section(const struct mem_section *section)
> {
> return 0;
> }
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index 12a12dae727d..37fa744ab124 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -140,7 +140,7 @@ static inline int inode_drain_writes(struct inode *inode)
> return filemap_write_and_wait(inode->i_mapping);
> }
>
> -static inline bool mapping_empty(struct address_space *mapping)
> +static inline bool mapping_empty(const struct address_space *mapping)
> {
> return xa_empty(&mapping->i_pages);
> }
> @@ -166,7 +166,7 @@ static inline bool mapping_empty(struct address_space *mapping)
> * refcount and the referenced bit, which will be elevated or set in
> * the process of adding new cache pages to an inode.
> */
> -static inline bool mapping_shrinkable(struct address_space *mapping)
> +static inline bool mapping_shrinkable(const struct address_space *mapping)
> {
> void *head;
>
> @@ -265,7 +265,7 @@ static inline void mapping_clear_unevictable(struct address_space *mapping)
> clear_bit(AS_UNEVICTABLE, &mapping->flags);
> }
>
> -static inline bool mapping_unevictable(struct address_space *mapping)
> +static inline bool mapping_unevictable(const struct address_space *mapping)
> {
> return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
> }
> @@ -275,7 +275,7 @@ static inline void mapping_set_exiting(struct address_space *mapping)
> set_bit(AS_EXITING, &mapping->flags);
> }
>
> -static inline int mapping_exiting(struct address_space *mapping)
> +static inline int mapping_exiting(const struct address_space *mapping)
> {
> return test_bit(AS_EXITING, &mapping->flags);
> }
> @@ -285,7 +285,7 @@ static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
> set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
> }
>
> -static inline int mapping_use_writeback_tags(struct address_space *mapping)
> +static inline int mapping_use_writeback_tags(const struct address_space *mapping)
> {
> return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
> }
> @@ -331,7 +331,7 @@ static inline void mapping_set_inaccessible(struct address_space *mapping)
> set_bit(AS_INACCESSIBLE, &mapping->flags);
> }
>
> -static inline bool mapping_inaccessible(struct address_space *mapping)
> +static inline bool mapping_inaccessible(const struct address_space *mapping)
> {
> return test_bit(AS_INACCESSIBLE, &mapping->flags);
> }
> @@ -341,18 +341,18 @@ static inline void mapping_set_writeback_may_deadlock_on_reclaim(struct address_
> set_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
> }
>
> -static inline bool mapping_writeback_may_deadlock_on_reclaim(struct address_space *mapping)
> +static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct address_space *mapping)
> {
> return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
> }
>
> -static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
> +static inline gfp_t mapping_gfp_mask(const struct address_space * mapping)
> {
> return mapping->gfp_mask;
> }
>
> /* Restricts the given gfp_mask to what the mapping allows. */
> -static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
> +static inline gfp_t mapping_gfp_constraint(const struct address_space *mapping,
> gfp_t gfp_mask)
> {
> return mapping_gfp_mask(mapping) & gfp_mask;
> @@ -475,7 +475,7 @@ mapping_min_folio_order(const struct address_space *mapping)
> }
>
> static inline unsigned long
> -mapping_min_folio_nrpages(struct address_space *mapping)
> +mapping_min_folio_nrpages(const struct address_space *mapping)
> {
> return 1UL << mapping_min_folio_order(mapping);
> }
> @@ -489,7 +489,7 @@ mapping_min_folio_nrpages(struct address_space *mapping)
> * new folio to the page cache and need to know what index to give it,
> * call this function.
> */
> -static inline pgoff_t mapping_align_index(struct address_space *mapping,
> +static inline pgoff_t mapping_align_index(const struct address_space *mapping,
> pgoff_t index)
> {
> return round_down(index, mapping_min_folio_nrpages(mapping));
> @@ -499,7 +499,7 @@ static inline pgoff_t mapping_align_index(struct address_space *mapping,
> * Large folio support currently depends on THP. These dependencies are
> * being worked on but are not yet fixed.
> */
> -static inline bool mapping_large_folio_support(struct address_space *mapping)
> +static inline bool mapping_large_folio_support(const struct address_space *mapping)
> {
> /* AS_FOLIO_ORDER is only reasonable for pagecache folios */
> VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON,
> @@ -514,7 +514,7 @@ static inline size_t mapping_max_folio_size(const struct address_space *mapping)
> return PAGE_SIZE << mapping_max_folio_order(mapping);
> }
>
> -static inline int filemap_nr_thps(struct address_space *mapping)
> +static inline int filemap_nr_thps(const struct address_space *mapping)
> {
> #ifdef CONFIG_READ_ONLY_THP_FOR_FS
> return atomic_read(&mapping->nr_thps);
> @@ -928,7 +928,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
> *
> * Return: The index of the folio which follows this folio in the file.
> */
> -static inline pgoff_t folio_next_index(struct folio *folio)
> +static inline pgoff_t folio_next_index(const struct folio *folio)
> {
> return folio->index + folio_nr_pages(folio);
> }
> @@ -957,7 +957,7 @@ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
> * e.g., shmem did not move this folio to the swap cache.
> * Return: true or false.
> */
> -static inline bool folio_contains(struct folio *folio, pgoff_t index)
> +static inline bool folio_contains(const struct folio *folio, pgoff_t index)
> {
> VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio);
> return index - folio->index < folio_nr_pages(folio);
> @@ -1034,12 +1034,12 @@ static inline loff_t page_offset(struct page *page)
> /*
> * Get the offset in PAGE_SIZE (even for hugetlb folios).
> */
> -static inline pgoff_t folio_pgoff(struct folio *folio)
> +static inline pgoff_t folio_pgoff(const struct folio *folio)
> {
> return folio->index;
> }
>
> -static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
> +static inline pgoff_t linear_page_index(const struct vm_area_struct *vma,
> unsigned long address)
> {
> pgoff_t pgoff;
> @@ -1460,7 +1460,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
> * readahead_pos - The byte offset into the file of this readahead request.
> * @rac: The readahead request.
> */
> -static inline loff_t readahead_pos(struct readahead_control *rac)
> +static inline loff_t readahead_pos(const struct readahead_control *rac)
> {
> return (loff_t)rac->_index * PAGE_SIZE;
> }
> @@ -1469,7 +1469,7 @@ static inline loff_t readahead_pos(struct readahead_control *rac)
> * readahead_length - The number of bytes in this readahead request.
> * @rac: The readahead request.
> */
> -static inline size_t readahead_length(struct readahead_control *rac)
> +static inline size_t readahead_length(const struct readahead_control *rac)
> {
> return rac->_nr_pages * PAGE_SIZE;
> }
> @@ -1478,7 +1478,7 @@ static inline size_t readahead_length(struct readahead_control *rac)
> * readahead_index - The index of the first page in this readahead request.
> * @rac: The readahead request.
> */
> -static inline pgoff_t readahead_index(struct readahead_control *rac)
> +static inline pgoff_t readahead_index(const struct readahead_control *rac)
> {
> return rac->_index;
> }
> @@ -1487,7 +1487,7 @@ static inline pgoff_t readahead_index(struct readahead_control *rac)
> * readahead_count - The number of pages in this readahead request.
> * @rac: The readahead request.
> */
> -static inline unsigned int readahead_count(struct readahead_control *rac)
> +static inline unsigned int readahead_count(const struct readahead_control *rac)
> {
> return rac->_nr_pages;
> }
> @@ -1496,12 +1496,12 @@ static inline unsigned int readahead_count(struct readahead_control *rac)
> * readahead_batch_length - The number of bytes in the current batch.
> * @rac: The readahead request.
> */
> -static inline size_t readahead_batch_length(struct readahead_control *rac)
> +static inline size_t readahead_batch_length(const struct readahead_control *rac)
> {
> return rac->_batch_count * PAGE_SIZE;
> }
>
> -static inline unsigned long dir_pages(struct inode *inode)
> +static inline unsigned long dir_pages(const struct inode *inode)
> {
> return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
> PAGE_SHIFT;
> @@ -1515,8 +1515,8 @@ static inline unsigned long dir_pages(struct inode *inode)
> * Return: the number of bytes in the folio up to EOF,
> * or -EFAULT if the folio was truncated.
> */
> -static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
> - struct inode *inode)
> +static inline ssize_t folio_mkwrite_check_truncate(const struct folio *folio,
> + const struct inode *inode)
> {
> loff_t size = i_size_read(inode);
> pgoff_t index = size >> PAGE_SHIFT;
> @@ -1547,7 +1547,7 @@ static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
> * Return: The number of filesystem blocks covered by this folio.
> */
> static inline
> -unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
> +unsigned int i_blocks_per_folio(const struct inode *inode, const struct folio *folio)
> {
> return folio_size(folio) >> inode->i_blkbits;
> }
> diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
> index 5d3a0cccc6bf..63be5a451627 100644
> --- a/include/linux/pagevec.h
> +++ b/include/linux/pagevec.h
> @@ -51,12 +51,12 @@ static inline void folio_batch_reinit(struct folio_batch *fbatch)
> fbatch->i = 0;
> }
>
> -static inline unsigned int folio_batch_count(struct folio_batch *fbatch)
> +static inline unsigned int folio_batch_count(const struct folio_batch *fbatch)
> {
> return fbatch->nr;
> }
>
> -static inline unsigned int folio_batch_space(struct folio_batch *fbatch)
> +static inline unsigned int folio_batch_space(const struct folio_batch *fbatch)
> {
> return PAGEVEC_SIZE - fbatch->nr;
> }
> diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
> index 6d0f9c599ff7..0e47465ef0fd 100644
> --- a/include/linux/shmem_fs.h
> +++ b/include/linux/shmem_fs.h
> @@ -99,9 +99,9 @@ extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
> unsigned long len, unsigned long pgoff, unsigned long flags);
> extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts);
> #ifdef CONFIG_SHMEM
> -bool shmem_mapping(struct address_space *mapping);
> +bool shmem_mapping(const struct address_space *mapping);
> #else
> -static inline bool shmem_mapping(struct address_space *mapping)
> +static inline bool shmem_mapping(const struct address_space *mapping)
> {
> return false;
> }
> diff --git a/mm/oom_kill.c b/mm/oom_kill.c
> index 25923cfec9c6..0390e4f55ebe 100644
> --- a/mm/oom_kill.c
> +++ b/mm/oom_kill.c
> @@ -490,7 +490,7 @@ static bool oom_killer_disabled __read_mostly;
> * task's threads: if one of those is using this mm then this task was also
> * using it.
> */
> -bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
> +bool process_shares_mm(struct task_struct *p, const struct mm_struct *mm)
> {
> struct task_struct *t;
>
> diff --git a/mm/shmem.c b/mm/shmem.c
> index e2c76a30802b..91abac780fe6 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -275,18 +275,18 @@ static const struct vm_operations_struct shmem_vm_ops;
> static const struct vm_operations_struct shmem_anon_vm_ops;
> static struct file_system_type shmem_fs_type;
>
> -bool shmem_mapping(struct address_space *mapping)
> +bool shmem_mapping(const struct address_space *mapping)
> {
> return mapping->a_ops == &shmem_aops;
> }
> EXPORT_SYMBOL_GPL(shmem_mapping);
>
> -bool vma_is_anon_shmem(struct vm_area_struct *vma)
> +bool vma_is_anon_shmem(const struct vm_area_struct *vma)
> {
> return vma->vm_ops == &shmem_anon_vm_ops;
> }
>
> -bool vma_is_shmem(struct vm_area_struct *vma)
> +bool vma_is_shmem(const struct vm_area_struct *vma)
> {
> return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
> }
> diff --git a/mm/util.c b/mm/util.c
> index f814e6a59ab1..49ba90c4f763 100644
> --- a/mm/util.c
> +++ b/mm/util.c
> @@ -315,7 +315,7 @@ void *memdup_user_nul(const void __user *src, size_t len)
> EXPORT_SYMBOL(memdup_user_nul);
>
> /* Check if the vma is being used as a stack by this task */
> -int vma_is_stack_for_current(struct vm_area_struct *vma)
> +int vma_is_stack_for_current(const struct vm_area_struct *vma)
> {
> struct task_struct * __maybe_unused t = current;
>
> --
> 2.47.2
>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] mm: add `const` to lots of pointer parameters
2025-08-28 12:24 ` Lorenzo Stoakes
@ 2025-08-28 12:28 ` David Hildenbrand
2025-08-28 12:40 ` Max Kellermann
0 siblings, 1 reply; 16+ messages in thread
From: David Hildenbrand @ 2025-08-28 12:28 UTC (permalink / raw)
To: Lorenzo Stoakes, Max Kellermann
Cc: akpm, Liam.Howlett, vbabka, rppt, surenb, mhocko, linux-mm,
linux-kernel
On 28.08.25 14:24, Lorenzo Stoakes wrote:
> You've not cc'd the right people here.
>
> Please run scripts/get_maintainers.pl on your patch series and cc all
> maintainers and reviewers on every series please, as well as all of the mailing
> lists referenced.
>
> For this change you needed to CC:
>
> Alexander Viro <viro@zeniv.linux.org.uk> (maintainer:FILESYSTEMS (VFS and infrastructure))
> Christian Brauner <brauner@kernel.org> (maintainer:FILESYSTEMS (VFS and infrastructure))
> Jan Kara <jack@suse.cz> (reviewer:FILESYSTEMS (VFS and infrastructure))
> Andrew Morton <akpm@linux-foundation.org> (maintainer:MEMORY MANAGEMENT - CORE,commit_signer:1/1=100%)
> David Hildenbrand <david@redhat.com> (maintainer:MEMORY MANAGEMENT - CORE)
> Lorenzo Stoakes <lorenzo.stoakes@oracle.com> (reviewer:MEMORY MANAGEMENT - CORE)
> "Liam R. Howlett" <Liam.Howlett@oracle.com> (reviewer:MEMORY MANAGEMENT - CORE)
> Vlastimil Babka <vbabka@suse.cz> (reviewer:MEMORY MANAGEMENT - CORE)
> Mike Rapoport <rppt@kernel.org> (reviewer:MEMORY MANAGEMENT - CORE)
> Suren Baghdasaryan <surenb@google.com> (reviewer:MEMORY MANAGEMENT - CORE)
> Michal Hocko <mhocko@suse.com> (reviewer:MEMORY MANAGEMENT - CORE)
> Axel Rasmussen <axelrasmussen@google.com> (maintainer:MEMORY MANAGEMENT - MGLRU (MULTI-GEN LRU))
> Yuanchu Xie <yuanchu@google.com> (maintainer:MEMORY MANAGEMENT - MGLRU (MULTI-GEN LRU))
> Wei Xu <weixugc@google.com> (reviewer:MEMORY MANAGEMENT - MGLRU (MULTI-GEN LRU))
> "Matthew Wilcox (Oracle)" <willy@infradead.org> (maintainer:PAGE CACHE)
> Hugh Dickins <hughd@google.com> (maintainer:TMPFS (SHMEM FILESYSTEM))
> Baolin Wang <baolin.wang@linux.alibaba.com> (reviewer:TMPFS (SHMEM FILESYSTEM))
> David Rientjes <rientjes@google.com> (reviewer:MEMORY MANAGEMENT - OOM KILLER)
> Shakeel Butt <shakeel.butt@linux.dev> (reviewer:MEMORY MANAGEMENT - OOM KILLER)
> linux-fsdevel@vger.kernel.org (open list:FILESYSTEMS (VFS and infrastructure))
> linux-kernel@vger.kernel.org (open list)
> linux-mm@kvack.org (open list:MEMORY MANAGEMENT - CORE)
>
> Usually I'd cc for you here but that's too many :)
>
> On Wed, Aug 27, 2025 at 09:22:33PM +0200, Max Kellermann wrote:
>> For improved const-correctness.
>
> 'const-correctness' in C is extremely weak.
>
>>
>> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
>
> It feels like this is a bit of an arbitrary subset and this churn is going
> to make merge conflicts much more likely.
>
> I mean, if it's just that all this functions accept non-const VMA and for
> all callers _from_ these functions most accept const - it's not really
> accomplishing much other than adding a bunch of noise.
>
> Any meaningful change would require pretty huge amounts of change that
> would be horrendous git blame, merge conflict, etc. wise for really not a
> huge amount of benefit.
>
> I feel like the ship has sailed for this and I really question the value of
> doing this in this way.
I recall that Willy did some more targeted conversions of that kind in the past regarding folios.
E.g.,
commit 68158bfa3dbd4af8461ef75a91ffc03be942c8fe
Author: Matthew Wilcox (Oracle) <willy@infradead.org>
Date: Sat Oct 5 21:01:15 2024 +0100
mm: mass constification of folio/page pointers
Now that page_pgoff() takes const pointers, we can constify the pointers
to a lot of functions.
I would appreciate similarly doing it in logical chunks.
--
Cheers
David / dhildenb
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] mm: add `const` to lots of pointer parameters
2025-08-28 12:16 ` Max Kellermann
@ 2025-08-28 12:35 ` Lorenzo Stoakes
2025-08-28 12:42 ` Max Kellermann
0 siblings, 1 reply; 16+ messages in thread
From: Lorenzo Stoakes @ 2025-08-28 12:35 UTC (permalink / raw)
To: Max Kellermann
Cc: Andrew Morton, david, Liam.Howlett, vbabka, rppt, surenb, mhocko,
linux-mm, linux-kernel
On Thu, Aug 28, 2025 at 02:16:42PM +0200, Max Kellermann wrote:
> On Thu, Aug 28, 2025 at 2:09 PM Lorenzo Stoakes
> <lorenzo.stoakes@oracle.com> wrote:
>
> > > I'd actually be in favor of making all incoming args const (C should
> > > have made this the default).
> > >
> > > Because modifying an incoming arg is just obnoxious. That value should
> > > be viewed as part of the calling environment and should not be altered.
> > >
> > > Try modifying a lengthy function and wanting to get at an incoming arg
> > > only to find that something in the preceding 100 lines has gone and
> > > messed with it. Or forget to check fr this and get a nasty surprise when
> > > testing.
> > >
> > > Not that I'm suggesting that someone go in and make this change.
> > >
> > > On the other hand, it would be neat if gcc had an option to warn when
> > > someone does this. I bet it would be simple to add.
> >
> > This would break half the world, I've tried making parameters const, then
> > finding basically every helper function doesn't function correctly with it
> > + having to give up without needing to change half the code base.
>
> Are you guys talking about making parameter values const (i.e. pointer
> addresses if the parameter is a pointer) or making pointed-to values
> const?
> It seemed Andrew meant the former, but my patch was about the latter.
>
> Yes, making parameters (not pointed-to values) const by default would
> be a good default, but we're 50 years late for that, and
> unfortunately, C's syntax for pointers with constant address is
> awkwardly confusing. But adding "const" to parameter (values, not
> pointed-to) should not break anything (unless the function really
> changes parameter values - there may be good or bad reasons to do
> that).
>
> Making more pointed-to values const will break a lot, yes, because
> this needs const-correctness in the whole callee space. This patch
> here tries to do this for a low-level part of the kernel, to enable it
> in higher-level parts. Long way to go.
>
> Max
I'm talking about const parameter values.
I gave a more full reply in general to this at top-level.
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] mm: add `const` to lots of pointer parameters
2025-08-28 12:28 ` David Hildenbrand
@ 2025-08-28 12:40 ` Max Kellermann
2025-08-28 12:43 ` Lorenzo Stoakes
2025-08-28 13:00 ` David Hildenbrand
0 siblings, 2 replies; 16+ messages in thread
From: Max Kellermann @ 2025-08-28 12:40 UTC (permalink / raw)
To: David Hildenbrand
Cc: Lorenzo Stoakes, akpm, Liam.Howlett, vbabka, rppt, surenb, mhocko,
linux-mm, linux-kernel
> On 28.08.25 14:24, Lorenzo Stoakes wrote:
> > On Wed, Aug 27, 2025 at 09:22:33PM +0200, Max Kellermann wrote:
> >> For improved const-correctness.
> >
> > 'const-correctness' in C is extremely weak.
"const correctness" is a commonly used term for this concept, and I
find your arguments against const-correctness "extremely weak". I
don't think discussing the benefits of const-correctness is worth the
time; the concept is trivial enough, and if you don't already agree,
there is no way I can convince you. Let's agree to disagree.
On Thu, Aug 28, 2025 at 2:29 PM David Hildenbrand <david@redhat.com> wrote:
> I recall that Willy did some more targeted conversions of that kind in the past regarding folios.
And I found many more commits adding "const" to pointer parameters.
That shows this kind of patch is acceptable in the Linux kernel.
> I would appreciate similarly doing it in logical chunks.
So you suggest splitting the patch into many? I can do that, but will
it be merged then, or will Lorenzo be able block it? Will further
const-correctness changes from others (e.g. Willy) be rejected, too?
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] mm: add `const` to lots of pointer parameters
2025-08-28 12:35 ` Lorenzo Stoakes
@ 2025-08-28 12:42 ` Max Kellermann
2025-08-28 12:49 ` Lorenzo Stoakes
0 siblings, 1 reply; 16+ messages in thread
From: Max Kellermann @ 2025-08-28 12:42 UTC (permalink / raw)
To: Lorenzo Stoakes
Cc: Andrew Morton, david, Liam.Howlett, vbabka, rppt, surenb, mhocko,
linux-mm, linux-kernel
On Thu, Aug 28, 2025 at 2:35 PM Lorenzo Stoakes
<lorenzo.stoakes@oracle.com> wrote:
> I'm talking about const parameter values.
But that's not what my patch is about.
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] mm: add `const` to lots of pointer parameters
2025-08-28 12:40 ` Max Kellermann
@ 2025-08-28 12:43 ` Lorenzo Stoakes
2025-08-28 12:51 ` Max Kellermann
2025-08-28 13:00 ` David Hildenbrand
1 sibling, 1 reply; 16+ messages in thread
From: Lorenzo Stoakes @ 2025-08-28 12:43 UTC (permalink / raw)
To: Max Kellermann
Cc: David Hildenbrand, akpm, Liam.Howlett, vbabka, rppt, surenb,
mhocko, linux-mm, linux-kernel
On Thu, Aug 28, 2025 at 02:40:29PM +0200, Max Kellermann wrote:
> > On 28.08.25 14:24, Lorenzo Stoakes wrote:
> > > On Wed, Aug 27, 2025 at 09:22:33PM +0200, Max Kellermann wrote:
> > >> For improved const-correctness.
> > >
> > > 'const-correctness' in C is extremely weak.
>
> "const correctness" is a commonly used term for this concept, and I
> find your arguments against const-correctness "extremely weak". I
Max, don't speak to me like this, it's disrespectful and unnecessary.
> So you suggest splitting the patch into many? I can do that, but will
> it be merged then, or will Lorenzo be able block it? Will further
> const-correctness changes from others (e.g. Willy) be rejected, too?
Also this is totally unacceptable. Speak to people with respect on the
mailing list. This is not how to interact here.
Thanks, Lorenzo
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] mm: add `const` to lots of pointer parameters
2025-08-28 12:42 ` Max Kellermann
@ 2025-08-28 12:49 ` Lorenzo Stoakes
0 siblings, 0 replies; 16+ messages in thread
From: Lorenzo Stoakes @ 2025-08-28 12:49 UTC (permalink / raw)
To: Max Kellermann
Cc: Andrew Morton, david, Liam.Howlett, vbabka, rppt, surenb, mhocko,
linux-mm, linux-kernel
On Thu, Aug 28, 2025 at 02:42:27PM +0200, Max Kellermann wrote:
> On Thu, Aug 28, 2025 at 2:35 PM Lorenzo Stoakes
> <lorenzo.stoakes@oracle.com> wrote:
> > I'm talking about const parameter values.
>
> But that's not what my patch is about.
Perhaps not being clear - I mean making the pointers const as your patch does.
I made all the relevant points at the top level reply.
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] mm: add `const` to lots of pointer parameters
2025-08-28 12:43 ` Lorenzo Stoakes
@ 2025-08-28 12:51 ` Max Kellermann
0 siblings, 0 replies; 16+ messages in thread
From: Max Kellermann @ 2025-08-28 12:51 UTC (permalink / raw)
To: Lorenzo Stoakes
Cc: David Hildenbrand, akpm, Liam.Howlett, vbabka, rppt, surenb,
mhocko, linux-mm, linux-kernel
On Thu, Aug 28, 2025 at 2:43 PM Lorenzo Stoakes
<lorenzo.stoakes@oracle.com> wrote:
>
> On Thu, Aug 28, 2025 at 02:40:29PM +0200, Max Kellermann wrote:
> > > On 28.08.25 14:24, Lorenzo Stoakes wrote:
> > > > On Wed, Aug 27, 2025 at 09:22:33PM +0200, Max Kellermann wrote:
> > > >> For improved const-correctness.
> > > >
> > > > 'const-correctness' in C is extremely weak.
> >
> > "const correctness" is a commonly used term for this concept, and I
> > find your arguments against const-correctness "extremely weak". I
>
> Max, don't speak to me like this, it's disrespectful and unnecessary.
>
> > So you suggest splitting the patch into many? I can do that, but will
> > it be merged then, or will Lorenzo be able block it? Will further
> > const-correctness changes from others (e.g. Willy) be rejected, too?
>
> Also this is totally unacceptable. Speak to people with respect on the
> mailing list. This is not how to interact here.
I don't get it. What exactly bothers you?
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] mm: add `const` to lots of pointer parameters
2025-08-27 19:22 [PATCH] mm: add `const` to lots of pointer parameters Max Kellermann
2025-08-27 21:48 ` Andrew Morton
2025-08-28 12:24 ` Lorenzo Stoakes
@ 2025-08-28 12:55 ` Lorenzo Stoakes
2 siblings, 0 replies; 16+ messages in thread
From: Lorenzo Stoakes @ 2025-08-28 12:55 UTC (permalink / raw)
To: Max Kellermann
Cc: akpm, david, Liam.Howlett, vbabka, rppt, surenb, mhocko, linux-mm,
linux-kernel
On Wed, Aug 27, 2025 at 09:22:33PM +0200, Max Kellermann wrote:
> For improved const-correctness.
>
> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
NAK.
The patch in this form has dubious value, and the interactions on this
thread are not encouraging.
I also have pending series this will conflict with.
If a more reasonable approach can be suggested with appropriate on-list
behaviour (I think it'd really need to be an RFC also) then we can take a
look at that.
I think something more logically divided as David suggested could be
workable.
Cheers, Lorenzo
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] mm: add `const` to lots of pointer parameters
2025-08-28 12:40 ` Max Kellermann
2025-08-28 12:43 ` Lorenzo Stoakes
@ 2025-08-28 13:00 ` David Hildenbrand
1 sibling, 0 replies; 16+ messages in thread
From: David Hildenbrand @ 2025-08-28 13:00 UTC (permalink / raw)
To: Max Kellermann
Cc: Lorenzo Stoakes, akpm, Liam.Howlett, vbabka, rppt, surenb, mhocko,
linux-mm, linux-kernel
On 28.08.25 14:40, Max Kellermann wrote:
>> On 28.08.25 14:24, Lorenzo Stoakes wrote:
>>> On Wed, Aug 27, 2025 at 09:22:33PM +0200, Max Kellermann wrote:
>>>> For improved const-correctness.
>>>
>>> 'const-correctness' in C is extremely weak.
>
> "const correctness" is a commonly used term for this concept, and I
> find your arguments against const-correctness "extremely weak". I
> don't think discussing the benefits of const-correctness is worth the
> time; the concept is trivial enough, and if you don't already agree,
> there is no way I can convince you. Let's agree to disagree.
>
> On Thu, Aug 28, 2025 at 2:29 PM David Hildenbrand <david@redhat.com> wrote:
>> I recall that Willy did some more targeted conversions of that kind in the past regarding folios.
>
> And I found many more commits adding "const" to pointer parameters.
> That shows this kind of patch is acceptable in the Linux kernel.
>
>> I would appreciate similarly doing it in logical chunks.
>
> So you suggest splitting the patch into many?
Yes, for example focusing on a specific class of functions like Willy did.
That makes my life as a maintainer and reviewer easier, and allows for
holding back some of these patches if we expect conflicts in that area soon.
/me stares helplessly at his overflowing inbox where #submitters is
significantly larger than #reviewers, and the majority of patches are
not in a form that would allow for any kinds of easy review.
--
Cheers
David / dhildenb
^ permalink raw reply [flat|nested] 16+ messages in thread
end of thread, other threads:[~2025-08-28 13:00 UTC | newest]
Thread overview: 16+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-08-27 19:22 [PATCH] mm: add `const` to lots of pointer parameters Max Kellermann
2025-08-27 21:48 ` Andrew Morton
2025-08-28 1:48 ` Liam R. Howlett
2025-08-28 2:47 ` Andrew Morton
2025-08-28 12:09 ` Lorenzo Stoakes
2025-08-28 12:16 ` Max Kellermann
2025-08-28 12:35 ` Lorenzo Stoakes
2025-08-28 12:42 ` Max Kellermann
2025-08-28 12:49 ` Lorenzo Stoakes
2025-08-28 12:24 ` Lorenzo Stoakes
2025-08-28 12:28 ` David Hildenbrand
2025-08-28 12:40 ` Max Kellermann
2025-08-28 12:43 ` Lorenzo Stoakes
2025-08-28 12:51 ` Max Kellermann
2025-08-28 13:00 ` David Hildenbrand
2025-08-28 12:55 ` Lorenzo Stoakes
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).