All of lore.kernel.org
 help / color / mirror / Atom feed
* [koverstreet-bcachefs:master 31/106] fs/bcachefs/btree/interior.c:2966:17: sparse: sparse: restricted __le16 degrades to integer
@ 2026-05-16  4:15 kernel test robot
  0 siblings, 0 replies; only message in thread
From: kernel test robot @ 2026-05-16  4:15 UTC (permalink / raw)
  To: Kent Overstreet; +Cc: oe-kbuild-all

tree:   https://github.com/koverstreet/bcachefs master
head:   ca944a61e079450f82be88c91e349638c75cf4b6
commit: 8fa192c9e9735f5801a191e8a37e5376fc279a57 [31/106] bcachefs: __bch2_foreground_maybe_merge: 3-way merge support
config: sh-randconfig-r123-20260514 (https://download.01.org/0day-ci/archive/20260516/202605161217.8LGbaYru-lkp@intel.com/config)
compiler: sh4-linux-gcc (GCC) 11.5.0
sparse: v0.6.5-rc1
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260516/202605161217.8LGbaYru-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202605161217.8LGbaYru-lkp@intel.com/

sparse warnings: (new ones prefixed by >>)
   fs/bcachefs/btree/interior.c:1270:33: sparse: sparse: mixing different enum types:
   fs/bcachefs/btree/interior.c:1270:33: sparse:    unsigned int enum bch_trans_commit_flags
   fs/bcachefs/btree/interior.c:1270:33: sparse:    unsigned int enum bch_watermark
>> fs/bcachefs/btree/interior.c:2966:17: sparse: sparse: restricted __le16 degrades to integer

vim +2966 fs/bcachefs/btree/interior.c

  2723	
  2724	int __bch2_foreground_maybe_merge(struct btree_trans *trans,
  2725					  btree_path_idx_t path,
  2726					  unsigned level,
  2727					  enum bch_trans_commit_flags flags,
  2728					  u64 *merge_count)
  2729	{
  2730		struct bch_fs *c = trans->c;
  2731		struct btree_update *as = NULL;
  2732		enum btree_id btree = trans->paths[path].btree_id;
  2733		u64 start_time = local_clock();
  2734		int ret = 0;
  2735	
  2736		CLASS(darray_merge_node, srcs)();
  2737		CLASS(darray_merge_node, dsts)();
  2738	
  2739		try(darray_make_room(&srcs, 3));
  2740		try(darray_make_room(&dsts, 2));
  2741	
  2742		bch2_trans_verify_not_unlocked_or_in_restart(trans);
  2743		BUG_ON(!trans->paths[path].should_be_locked);
  2744		BUG_ON(!btree_node_locked(&trans->paths[path], level));
  2745	
  2746		/*
  2747		 * Work around a deadlock caused by the btree write buffer not doing
  2748		 * merges and leaving tons of merges for us to do - we really don't need
  2749		 * to be doing merges at all from the interior update path, and if the
  2750		 * interior update path is generating too many new interior updates we
  2751		 * deadlock:
  2752		 */
  2753		if ((flags & BCH_WATERMARK_MASK) == BCH_WATERMARK_interior_updates)
  2754			return 0;
  2755	
  2756		if ((flags & BCH_WATERMARK_MASK) <= BCH_WATERMARK_reclaim) {
  2757			flags &= ~BCH_WATERMARK_MASK;
  2758			flags |= BCH_WATERMARK_btree;
  2759			flags |= BCH_TRANS_COMMIT_journal_reclaim;
  2760		}
  2761	
  2762		struct btree *b = trans->paths[path].l[level].b;
  2763	
  2764		if (bpos_eq(b->data->min_key, POS_MIN))
  2765			b->sib_u64s[btree_prev_sib] = U16_MAX;
  2766		if (bpos_eq(b->data->max_key, SPOS_MAX))
  2767			b->sib_u64s[btree_next_sib] = U16_MAX;
  2768	
  2769		/*
  2770		 * Push srcs in left-to-right order so srcs is naturally sorted: prev
  2771		 * sibling first (if merging left), then caller's node, then next
  2772		 * sibling (if merging right). The caller's path takes an extra ref so
  2773		 * the destructor can put it uniformly with helper-acquired paths.
  2774		 */
  2775		try(btree_merge_push_pos(trans, &srcs, btree, level, path, btree_prev_sib));
  2776	
  2777		__btree_path_get(trans, trans->paths + path, true);
  2778		BUG_ON(darray_push(&srcs, ((struct btree_merge_node) {
  2779			trans, b, path, b->nr.live_u64s,
  2780		})));
  2781	
  2782		try(btree_merge_push_pos(trans, &srcs, btree, level, path, btree_next_sib));
  2783	
  2784		if (srcs.nr == 1)
  2785			return 0;
  2786	
  2787		event_inc_trace(c, btree_node_merge_attempt, buf, ({
  2788			unsigned total_u64s = 0;
  2789			darray_for_each(srcs, s) {
  2790				if (s->b)
  2791					bch2_btree_pos_to_text(&buf, c, s->b);
  2792				else
  2793					prt_str(&buf, "(evicted node)");
  2794				prt_printf(&buf, "\nlive u64s %u (%zu%% full)\n",
  2795					   s->live_u64s,
  2796					   s->live_u64s * 100 / btree_max_u64s(c));
  2797				total_u64s += s->live_u64s;
  2798			}
  2799	
  2800			prt_printf(&buf, "Pivot sib_u64s %u %u threshold %u\n",
  2801				   b->sib_u64s[btree_prev_sib],
  2802				   b->sib_u64s[btree_next_sib],
  2803				   c->btree.foreground_merge_threshold);
  2804			bch2_btree_pos_to_text(&buf, c, b);
  2805	
  2806			prt_printf(&buf, "\ntotal_u64s %u per-node max %zu nr_dsts %un",
  2807				   total_u64s, BTREE_FOREGROUND_MERGE_HIGHER_THRESHOLD(c),
  2808				   max_t(unsigned, 1,
  2809					 DIV_ROUND_UP(total_u64s, BTREE_FOREGROUND_MERGE_HIGHER_THRESHOLD(c))));
  2810		}));
  2811	
  2812		struct bkey_format new_f;
  2813		unsigned nr_dsts = compute_merge(c, b, &srcs, NULL, &new_f);
  2814		if (nr_dsts >= srcs.nr)
  2815			goto out;
  2816	
  2817		/*
  2818		 * Estimate-based gate said the merge will fit — fill (or re-traverse)
  2819		 * any srcs whose path isn't currently should_be_locked. Deferred srcs
  2820		 * pushed without a real read fall here; so do any srcs whose locks
  2821		 * may have been dropped since push (a !should_be_locked path won't
  2822		 * get relocked by unlock/relock cycles).
  2823		 *
  2824		 * On parent-identity mismatch, poison that side's cached estimate
  2825		 * and drop the src — saves us the bch2_btree_update_start cost that
  2826		 * the post-update_start recheck would otherwise discover.
  2827		 */
  2828		for (unsigned i = 0; i < srcs.nr;) {
  2829			struct btree_merge_node *s = &srcs.data[i];
  2830	
  2831			if (trans->paths[s->path_idx].should_be_locked) {
  2832				i++;
  2833				continue;
  2834			}
  2835	
  2836			ret = bch2_btree_path_traverse(trans, s->path_idx, 0);
  2837			if (ret)
  2838				goto err;
  2839	
  2840			s->b = trans->paths[s->path_idx].l[level].b;
  2841			s->live_u64s = s->b->nr.live_u64s;
  2842	
  2843			if (btree_node_parent(trans->paths + s->path_idx, s->b) !=
  2844			    trans->paths[path].l[level + 1].b) {
  2845				enum btree_node_sibling bad =
  2846					bpos_lt(s->b->data->max_key, b->data->min_key)
  2847					? btree_prev_sib
  2848					: btree_next_sib;
  2849				b->sib_u64s[bad] = U16_MAX;
  2850				btree_merge_node_put(*s);
  2851				darray_remove_item(&srcs, s);
  2852				continue;
  2853			}
  2854	
  2855			btree_path_set_should_be_locked(trans, trans->paths + s->path_idx);
  2856			i++;
  2857		}
  2858	
  2859		try(btree_merge_topology_check(c, &srcs));
  2860	
  2861		if (srcs.nr == 1)
  2862			goto out;
  2863	
  2864		/*
  2865		 * Post deferred-fill, every surviving src must have ->b set so that
  2866		 * compute_merge() / merge_node_u64s_and_format() takes the precise
  2867		 * format-aware path. The estimate-only fallback (sum of live_u64s)
  2868		 * doesn't account for format growth on repack and would let an
  2869		 * infeasible merge through, blowing up later in btree_pack_into_dsts
  2870		 * or sort_into.
  2871		 */
  2872		darray_for_each(srcs, s)
  2873			BUG_ON(!s->b);
  2874	
  2875		nr_dsts = compute_merge(c, b, &srcs, &dsts, &new_f);
  2876		if (nr_dsts >= srcs.nr)
  2877			goto out;
  2878	
  2879		BUG_ON(nr_dsts > 2);
  2880	
  2881		as = bch2_btree_update_start(trans, trans->paths + path, level, nr_dsts == 2,
  2882					     0, BCH_TRANS_COMMIT_no_enospc|flags, 0);
  2883		ret = PTR_ERR_OR_ZERO(as);
  2884		if (ret) {
  2885			as = NULL;
  2886			goto err;
  2887		}
  2888	
  2889		/*
  2890		 * update_start upgraded path's locks to cover parent nodes; re-read
  2891		 * parent and re-verify all srcs still share it. The earlier check in
  2892		 * btree_merge_push_pos() was racy because parents weren't locked.
  2893		 *
  2894		 * On mismatch, identify which side the bad sibling is on by bpos
  2895		 * comparison and poison just that side's cached estimate.
  2896		 */
  2897		struct btree *parent = btree_node_parent(trans->paths + path, b);
  2898		darray_for_each(srcs, s) {
  2899			if (s->path_idx == path)
  2900				continue;
  2901			if (btree_node_parent(trans->paths + s->path_idx, s->b) != parent) {
  2902				enum btree_node_sibling bad_sib =
  2903					bpos_lt(s->b->data->max_key, b->data->min_key)
  2904						? btree_prev_sib
  2905						: btree_next_sib;
  2906				b->sib_u64s[bad_sib] = U16_MAX;
  2907				bch2_btree_update_free(as, trans);
  2908				as = NULL;
  2909				ret = 0;
  2910				goto out;
  2911			}
  2912		}
  2913	
  2914		as->node_start	= srcs.data[0].b->data->min_key;
  2915		as->node_end	= srcs.data[srcs.nr - 1].b->data->max_key;
  2916	
  2917		darray_for_each(srcs, s) {
  2918			ret = bch2_btree_node_lock_write(trans, trans->paths + s->path_idx, &s->b->c);
  2919			if (ret)
  2920				goto err_free_update;
  2921		}
  2922	
  2923		/*
  2924		 * Allocate destination nodes: 1 for plain N->1, 2 for the 3->2 case.
  2925		 * compute_merge() already populated dsts entries with their per-dst
  2926		 * format and max_key — here we just attach trans + a freshly allocated
  2927		 * btree node to each. path_idx is filled in after the pack since each
  2928		 * dst's key.k.p (== max_key) is set inside btree_set_max().
  2929		 */
  2930		darray_for_each(dsts, d) {
  2931			d->trans = trans;
  2932			d->b = bch2_btree_node_alloc(as, trans, level);
  2933		}
  2934	
  2935		if (nr_dsts == 1) {
  2936			struct btree *n = dsts.data[0].b;
  2937			u64 max_seq = 0;
  2938	
  2939			darray_for_each(srcs, s)
  2940				max_seq = max(max_seq, BTREE_NODE_SEQ(s->b->data));
  2941			SET_BTREE_NODE_SEQ(n->data, max_seq + 1);
  2942	
  2943			btree_set_min(n, srcs.data[0].b->data->min_key);
  2944			btree_set_max(n, dsts.data[0].max_key);
  2945	
  2946			n->data->format = dsts.data[0].format;
  2947			btree_node_set_format(n, n->data->format);
  2948	
  2949			darray_for_each(srcs, s)
  2950				bch2_btree_sort_into(c, n, s->b);
  2951	
  2952			ret = bch2_btree_node_check_topology(trans, n);
  2953			BUG_ON(ret);
  2954	
  2955			btree_node_reset_sib_u64s(n);
  2956		} else {
  2957			btree_pack_into_dsts(as, trans, &srcs, &dsts);
  2958		}
  2959	
  2960		/*
  2961		 * Diagnostic: each dst's packed content must fit in btree_buf_bytes
  2962		 * with room for the +8 varint slop in btree_node_write. Mirrors
  2963		 * bch2_btree_node_format_fits()'s strict-less-than check.
  2964		 */
  2965		darray_for_each(dsts, d)
> 2966			BUG_ON(__vstruct_bytes(struct btree_node, d->b->data->u64s) >=
  2967			       btree_buf_bytes(d->b));
  2968	
  2969		darray_for_each(dsts, d) {
  2970			bch2_btree_build_aux_trees(d->b);
  2971			bch2_btree_update_add_new_node(as, d->b);
  2972	
  2973			d->path_idx = bch2_path_get_unlocked_mut(trans, btree,
  2974								 d->b->c.level, d->b->key.k.p, false);
  2975			six_lock_increment(&d->b->c.lock, SIX_LOCK_intent);
  2976			mark_btree_node_locked(trans, trans->paths + d->path_idx,
  2977					       d->b->c.level, BTREE_NODE_WRITE_LOCKED);
  2978			bch2_btree_path_level_init(trans, trans->paths + d->path_idx, d->b);
  2979		}
  2980	
  2981		/*
  2982		 * Conceptually: every src becomes a delete, every dst becomes a new
  2983		 * key, sorted and deduped (a new key at the same .p as a delete
  2984		 * subsumes the delete). For N -> 1 the last src's .p equals the dst's
  2985		 * .p (the dst's max_key was set to the last src's max_key), so the
  2986		 * last src's delete is the only one that gets dropped.
  2987		 *
  2988		 * Both parent_keys (the in-memory parent update) and new_nodes (the
  2989		 * journal record) need each surviving delete; emit them together.
  2990		 */
  2991		struct btree_merge_node *src = srcs.data;
  2992		struct btree_merge_node *dst = dsts.data;
  2993		while (src || dst) {
  2994			if (src && dst && bpos_eq(src->b->key.k.p, dst->b->key.k.p)) {
  2995				bch2_btree_update_add_node(c, &as->new_nodes, dst->b);
  2996				bch2_keylist_add(&as->parent_keys, &dst->b->key);
  2997				src++;
  2998				dst++;
  2999			} else if (src && (dst ? bpos_lt(src->b->key.k.p, dst->b->key.k.p) : true)) {
  3000				struct bkey_i delete;
  3001				bkey_init(&delete.k);
  3002				delete.k.p = src->b->key.k.p;
  3003				bch2_keylist_add(&as->parent_keys, &delete);
  3004				bch2_btree_update_add_key(&as->new_nodes, level, &delete);
  3005				src++;
  3006			} else {
  3007				bch2_btree_update_add_node(c, &as->new_nodes, dst->b);
  3008				bch2_keylist_add(&as->parent_keys, &dst->b->key);
  3009				dst++;
  3010			}
  3011	
  3012			if (src == srcs.data + srcs.nr)
  3013				src = NULL;
  3014			if (dst == dsts.data + dsts.nr)
  3015				dst = NULL;
  3016		}
  3017	
  3018		bch2_trans_verify_paths(trans);
  3019	
  3020		ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys);
  3021		if (ret)
  3022			goto err_free_new_node;
  3023	
  3024		event_inc_trace(c, btree_node_merge, buf, ({
  3025			btree_node_op_log(&buf, c, b, &srcs, &dsts);
  3026		}));
  3027	
  3028		darray_for_each(srcs, s)
  3029			bch2_btree_interior_update_will_free_node(as, s->b);
  3030	
  3031		bch2_trans_verify_paths(trans);
  3032	
  3033		darray_for_each(dsts, d) {
  3034			bch2_btree_update_get_open_buckets(as, d->b);
  3035			bch2_btree_node_write_trans(trans, d->b, SIX_LOCK_write, 0);
  3036		}
  3037	
  3038		darray_for_each(srcs, s)
  3039			bch2_btree_node_free_inmem(trans, trans->paths + s->path_idx, s->b);
  3040	
  3041		darray_for_each(dsts, d)
  3042			bch2_trans_node_add(trans, trans->paths + d->path_idx, d->b);
  3043	
  3044		bch2_trans_verify_paths(trans);
  3045	
  3046		darray_for_each(dsts, d)
  3047			six_unlock_intent(&d->b->c.lock);
  3048	
  3049		bch2_btree_update_done(as, trans);
  3050	
  3051		bch2_time_stats_update(&c->times[BCH_TIME_btree_node_merge], start_time);
  3052	
  3053		if (merge_count)
  3054			(*merge_count)++;
  3055	out:
  3056	err:
  3057		bch2_trans_verify_locks(trans);
  3058		if (ret == -BCH_ERR_journal_reclaim_would_deadlock)
  3059			ret = 0;
  3060		if (!ret)
  3061			ret = bch2_trans_relock(trans);
  3062		return ret;
  3063	err_free_new_node:
  3064		darray_for_each(dsts, d)
  3065			bch2_btree_node_free_never_used(as, trans, d->b);
  3066	err_free_update:
  3067		darray_for_each_reverse(srcs, s)
  3068			if (btree_node_write_locked(trans->paths + s->path_idx, s->b->c.level))
  3069				bch2_btree_node_unlock_write(trans, trans->paths + s->path_idx, s->b);
  3070		if (as)
  3071			bch2_btree_update_free(as, trans);
  3072		goto out;
  3073	}
  3074	

--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2026-05-16  4:15 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-05-16  4:15 [koverstreet-bcachefs:master 31/106] fs/bcachefs/btree/interior.c:2966:17: sparse: sparse: restricted __le16 degrades to integer kernel test robot

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.