public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 1/2] binder: Refactor binder_node print synchronization
@ 2025-05-07 21:10 Tiffany Y. Yang
  2025-05-07 21:10 ` [PATCH v3 2/2] binder: Create safe versions of binder log files Tiffany Y. Yang
  2025-05-08 12:27 ` [PATCH v3 1/2] binder: Refactor binder_node print synchronization Alice Ryhl
  0 siblings, 2 replies; 6+ messages in thread
From: Tiffany Y. Yang @ 2025-05-07 21:10 UTC (permalink / raw)
  To: Greg Kroah-Hartman, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Joel Fernandes, Christian Brauner, Carlos Llamas,
	Suren Baghdasaryan
  Cc: linux-kernel, kernel-team, Tiffany Y. Yang

The binder driver outputs information about each dead binder node by
iterating over the dead nodes list, and it prints the state of each live
node in the system by traversing each binder_proc's proc->nodes tree.
Both cases require similar logic to maintain the global lock ordering
while accessing each node.

Create a helper function to synchronize around printing binder nodes in
a list. Opportunistically make minor cosmetic changes to binder print
functions.

Acked-by: Carlos Llamas <cmllamas@google.com>
Signed-off-by: Tiffany Y. Yang <ynaffit@google.com>
---
V2->V3: No change (updated version for patch 2/2)
V1->V2: Updated function comment to note dropped locks

 drivers/android/binder.c | 115 ++++++++++++++++++++++-----------------
 1 file changed, 64 insertions(+), 51 deletions(-)

diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 5fc2c8ee61b1..960d055c27d5 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -6377,10 +6377,10 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
 }
 
 static void print_binder_work_ilocked(struct seq_file *m,
-				     struct binder_proc *proc,
-				     const char *prefix,
-				     const char *transaction_prefix,
-				     struct binder_work *w)
+				      struct binder_proc *proc,
+				      const char *prefix,
+				      const char *transaction_prefix,
+				      struct binder_work *w)
 {
 	struct binder_node *node;
 	struct binder_transaction *t;
@@ -6430,7 +6430,7 @@ static void print_binder_work_ilocked(struct seq_file *m,
 
 static void print_binder_thread_ilocked(struct seq_file *m,
 					struct binder_thread *thread,
-					int print_always)
+					bool print_always)
 {
 	struct binder_transaction *t;
 	struct binder_work *w;
@@ -6505,8 +6505,51 @@ static void print_binder_ref_olocked(struct seq_file *m,
 	binder_node_unlock(ref->node);
 }
 
-static void print_binder_proc(struct seq_file *m,
-			      struct binder_proc *proc, int print_all)
+/**
+ * print_next_binder_node_ilocked() - Print binder_node from a locked list
+ * @m:          struct seq_file for output via seq_printf()
+ * @node:       struct binder_node to print fields of
+ * @prev_node:	struct binder_node we hold a temporary reference to (if any)
+ *
+ * Helper function to handle synchronization around printing a struct
+ * binder_node while iterating through @node->proc->nodes or the dead nodes
+ * list. Caller must hold either @node->proc->inner_lock (for live nodes) or
+ * binder_dead_nodes_lock. This lock will be released during the body of this
+ * function, but it will be reacquired before returning to the caller.
+ *
+ * Return:	pointer to the struct binder_node we hold a tmpref on
+ */
+static struct binder_node *
+print_next_binder_node_ilocked(struct seq_file *m, struct binder_node *node,
+			       struct binder_node *prev_node)
+{
+	/*
+	 * Take a temporary reference on the node so that isn't removed from
+	 * its proc's tree or the dead nodes list while we print it.
+	 */
+	binder_inc_node_tmpref_ilocked(node);
+	/*
+	 * Live nodes need to drop the inner proc lock and dead nodes need to
+	 * drop the binder_dead_nodes_lock before trying to take the node lock.
+	 */
+	if (node->proc)
+		binder_inner_proc_unlock(node->proc);
+	else
+		spin_unlock(&binder_dead_nodes_lock);
+	if (prev_node)
+		binder_put_node(prev_node);
+	binder_node_inner_lock(node);
+	print_binder_node_nilocked(m, node);
+	binder_node_inner_unlock(node);
+	if (node->proc)
+		binder_inner_proc_lock(node->proc);
+	else
+		spin_lock(&binder_dead_nodes_lock);
+	return node;
+}
+
+static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
+			      bool print_all)
 {
 	struct binder_work *w;
 	struct rb_node *n;
@@ -6519,31 +6562,17 @@ static void print_binder_proc(struct seq_file *m,
 	header_pos = m->count;
 
 	binder_inner_proc_lock(proc);
-	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+	for (n = rb_first(&proc->threads); n; n = rb_next(n))
 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
 						rb_node), print_all);
 
-	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+	for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
 		struct binder_node *node = rb_entry(n, struct binder_node,
 						    rb_node);
 		if (!print_all && !node->has_async_transaction)
 			continue;
 
-		/*
-		 * take a temporary reference on the node so it
-		 * survives and isn't removed from the tree
-		 * while we print it.
-		 */
-		binder_inc_node_tmpref_ilocked(node);
-		/* Need to drop inner lock to take node lock */
-		binder_inner_proc_unlock(proc);
-		if (last_node)
-			binder_put_node(last_node);
-		binder_node_inner_lock(node);
-		print_binder_node_nilocked(m, node);
-		binder_node_inner_unlock(node);
-		last_node = node;
-		binder_inner_proc_lock(proc);
+		last_node = print_next_binder_node_ilocked(m, node, last_node);
 	}
 	binder_inner_proc_unlock(proc);
 	if (last_node)
@@ -6551,12 +6580,10 @@ static void print_binder_proc(struct seq_file *m,
 
 	if (print_all) {
 		binder_proc_lock(proc);
-		for (n = rb_first(&proc->refs_by_desc);
-		     n != NULL;
-		     n = rb_next(n))
+		for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n))
 			print_binder_ref_olocked(m, rb_entry(n,
-							    struct binder_ref,
-							    rb_node_desc));
+							     struct binder_ref,
+							     rb_node_desc));
 		binder_proc_unlock(proc);
 	}
 	binder_alloc_print_allocated(m, &proc->alloc);
@@ -6696,7 +6723,7 @@ static void print_binder_proc_stats(struct seq_file *m,
 	count = 0;
 	ready_threads = 0;
 	binder_inner_proc_lock(proc);
-	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+	for (n = rb_first(&proc->threads); n; n = rb_next(n))
 		count++;
 
 	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
@@ -6710,7 +6737,7 @@ static void print_binder_proc_stats(struct seq_file *m,
 			ready_threads,
 			free_async_space);
 	count = 0;
-	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
+	for (n = rb_first(&proc->nodes); n; n = rb_next(n))
 		count++;
 	binder_inner_proc_unlock(proc);
 	seq_printf(m, "  nodes: %d\n", count);
@@ -6718,7 +6745,7 @@ static void print_binder_proc_stats(struct seq_file *m,
 	strong = 0;
 	weak = 0;
 	binder_proc_lock(proc);
-	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+	for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
 						  rb_node_desc);
 		count++;
@@ -6756,29 +6783,15 @@ static int state_show(struct seq_file *m, void *unused)
 	spin_lock(&binder_dead_nodes_lock);
 	if (!hlist_empty(&binder_dead_nodes))
 		seq_puts(m, "dead nodes:\n");
-	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
-		/*
-		 * take a temporary reference on the node so it
-		 * survives and isn't removed from the list
-		 * while we print it.
-		 */
-		node->tmp_refs++;
-		spin_unlock(&binder_dead_nodes_lock);
-		if (last_node)
-			binder_put_node(last_node);
-		binder_node_lock(node);
-		print_binder_node_nilocked(m, node);
-		binder_node_unlock(node);
-		last_node = node;
-		spin_lock(&binder_dead_nodes_lock);
-	}
+	hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
+		last_node = print_next_binder_node_ilocked(m, node, last_node);
 	spin_unlock(&binder_dead_nodes_lock);
 	if (last_node)
 		binder_put_node(last_node);
 
 	mutex_lock(&binder_procs_lock);
 	hlist_for_each_entry(proc, &binder_procs, proc_node)
-		print_binder_proc(m, proc, 1);
+		print_binder_proc(m, proc, true);
 	mutex_unlock(&binder_procs_lock);
 
 	return 0;
@@ -6807,7 +6820,7 @@ static int transactions_show(struct seq_file *m, void *unused)
 	seq_puts(m, "binder transactions:\n");
 	mutex_lock(&binder_procs_lock);
 	hlist_for_each_entry(proc, &binder_procs, proc_node)
-		print_binder_proc(m, proc, 0);
+		print_binder_proc(m, proc, false);
 	mutex_unlock(&binder_procs_lock);
 
 	return 0;
@@ -6822,7 +6835,7 @@ static int proc_show(struct seq_file *m, void *unused)
 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
 		if (itr->pid == pid) {
 			seq_puts(m, "binder proc state:\n");
-			print_binder_proc(m, itr, 1);
+			print_binder_proc(m, itr, true);
 		}
 	}
 	mutex_unlock(&binder_procs_lock);
-- 
2.49.0.987.g0cc8ee98dc-goog


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v3 2/2] binder: Create safe versions of binder log files
  2025-05-07 21:10 [PATCH v3 1/2] binder: Refactor binder_node print synchronization Tiffany Y. Yang
@ 2025-05-07 21:10 ` Tiffany Y. Yang
  2025-05-07 23:27   ` Carlos Llamas
  2025-05-08 12:27 ` [PATCH v3 1/2] binder: Refactor binder_node print synchronization Alice Ryhl
  1 sibling, 1 reply; 6+ messages in thread
From: Tiffany Y. Yang @ 2025-05-07 21:10 UTC (permalink / raw)
  To: Greg Kroah-Hartman, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Joel Fernandes, Christian Brauner, Carlos Llamas,
	Suren Baghdasaryan
  Cc: linux-kernel, kernel-team, Tiffany Y. Yang

Binder defines several seq_files that can be accessed via debugfs or
binderfs. Some of these files (e.g., 'state' and 'transactions')
contain more granular information about binder's internal state that
is helpful for debugging, but they also leak userspace address data
through user-defined 'cookie' or 'ptr' values. Consequently, access
to these files must be heavily restricted.

Add two new files, 'state_hashed' and 'transactions_hashed', that
reproduce the information in the original files but use the kernel's
raw pointer obfuscation to hash any potential user addresses. This
approach allows systems to grant broader access to the new files
without having to change the security policy around the existing ones.

In practice, userspace populates these fields with user addresses, but
within the driver, these values only serve as unique identifiers for
their associated binder objects. Consequently, binder logs can
obfuscate these values and still retain meaning. While this strategy
prevents leaking information about the userspace memory layout in the
existing log files, it also decouples log messages about binder
objects from their user-defined identifiers.

Acked-by: Carlos Llamas <cmllamas@google.com>
Signed-off-by: Tiffany Y. Yang <ynaffit@google.com>
---
---
V2->V3: Cast binder_uintptr_t to long before casting to ptr
V1->V2: Update commit message

 drivers/android/binder.c | 106 +++++++++++++++++++++++++++++----------
 1 file changed, 79 insertions(+), 27 deletions(-)

diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 960d055c27d5..5578e72f435b 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -6380,7 +6380,7 @@ static void print_binder_work_ilocked(struct seq_file *m,
 				      struct binder_proc *proc,
 				      const char *prefix,
 				      const char *transaction_prefix,
-				      struct binder_work *w)
+				      struct binder_work *w, bool hash_ptrs)
 {
 	struct binder_node *node;
 	struct binder_transaction *t;
@@ -6403,9 +6403,15 @@ static void print_binder_work_ilocked(struct seq_file *m,
 		break;
 	case BINDER_WORK_NODE:
 		node = container_of(w, struct binder_node, work);
-		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
-			   prefix, node->debug_id,
-			   (u64)node->ptr, (u64)node->cookie);
+		if (hash_ptrs)
+			seq_printf(m, "%snode work %d: u%p c%p\n",
+				   prefix, node->debug_id,
+				   (void *)(long)node->ptr,
+				   (void *)(long)node->cookie);
+		else
+			seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
+				   prefix, node->debug_id,
+				   (u64)node->ptr, (u64)node->cookie);
 		break;
 	case BINDER_WORK_DEAD_BINDER:
 		seq_printf(m, "%shas dead binder\n", prefix);
@@ -6430,7 +6436,7 @@ static void print_binder_work_ilocked(struct seq_file *m,
 
 static void print_binder_thread_ilocked(struct seq_file *m,
 					struct binder_thread *thread,
-					bool print_always)
+					bool print_always, bool hash_ptrs)
 {
 	struct binder_transaction *t;
 	struct binder_work *w;
@@ -6460,14 +6466,16 @@ static void print_binder_thread_ilocked(struct seq_file *m,
 	}
 	list_for_each_entry(w, &thread->todo, entry) {
 		print_binder_work_ilocked(m, thread->proc, "    ",
-					  "    pending transaction", w);
+					  "    pending transaction",
+					  w, hash_ptrs);
 	}
 	if (!print_always && m->count == header_pos)
 		m->count = start_pos;
 }
 
 static void print_binder_node_nilocked(struct seq_file *m,
-				       struct binder_node *node)
+				       struct binder_node *node,
+				       bool hash_ptrs)
 {
 	struct binder_ref *ref;
 	struct binder_work *w;
@@ -6475,8 +6483,13 @@ static void print_binder_node_nilocked(struct seq_file *m,
 
 	count = hlist_count_nodes(&node->refs);
 
-	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
-		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
+	if (hash_ptrs)
+		seq_printf(m, "  node %d: u%p c%p", node->debug_id,
+			   (void *)(long)node->ptr, (void *)(long)node->cookie);
+	else
+		seq_printf(m, "  node %d: u%016llx c%016llx", node->debug_id,
+			   (u64)node->ptr, (u64)node->cookie);
+	seq_printf(m, " hs %d hw %d ls %d lw %d is %d iw %d tr %d",
 		   node->has_strong_ref, node->has_weak_ref,
 		   node->local_strong_refs, node->local_weak_refs,
 		   node->internal_strong_refs, count, node->tmp_refs);
@@ -6489,7 +6502,8 @@ static void print_binder_node_nilocked(struct seq_file *m,
 	if (node->proc) {
 		list_for_each_entry(w, &node->async_todo, entry)
 			print_binder_work_ilocked(m, node->proc, "    ",
-					  "    pending async transaction", w);
+					  "    pending async transaction",
+					  w, hash_ptrs);
 	}
 }
 
@@ -6510,6 +6524,7 @@ static void print_binder_ref_olocked(struct seq_file *m,
  * @m:          struct seq_file for output via seq_printf()
  * @node:       struct binder_node to print fields of
  * @prev_node:	struct binder_node we hold a temporary reference to (if any)
+ * @hash_ptrs:  whether to hash @node's binder_uintptr_t fields
  *
  * Helper function to handle synchronization around printing a struct
  * binder_node while iterating through @node->proc->nodes or the dead nodes
@@ -6521,7 +6536,7 @@ static void print_binder_ref_olocked(struct seq_file *m,
  */
 static struct binder_node *
 print_next_binder_node_ilocked(struct seq_file *m, struct binder_node *node,
-			       struct binder_node *prev_node)
+			       struct binder_node *prev_node, bool hash_ptrs)
 {
 	/*
 	 * Take a temporary reference on the node so that isn't removed from
@@ -6539,7 +6554,7 @@ print_next_binder_node_ilocked(struct seq_file *m, struct binder_node *node,
 	if (prev_node)
 		binder_put_node(prev_node);
 	binder_node_inner_lock(node);
-	print_binder_node_nilocked(m, node);
+	print_binder_node_nilocked(m, node, hash_ptrs);
 	binder_node_inner_unlock(node);
 	if (node->proc)
 		binder_inner_proc_lock(node->proc);
@@ -6549,7 +6564,7 @@ print_next_binder_node_ilocked(struct seq_file *m, struct binder_node *node,
 }
 
 static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
-			      bool print_all)
+			      bool print_all, bool hash_ptrs)
 {
 	struct binder_work *w;
 	struct rb_node *n;
@@ -6564,7 +6579,7 @@ static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
 	binder_inner_proc_lock(proc);
 	for (n = rb_first(&proc->threads); n; n = rb_next(n))
 		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
-						rb_node), print_all);
+						rb_node), print_all, hash_ptrs);
 
 	for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
 		struct binder_node *node = rb_entry(n, struct binder_node,
@@ -6572,7 +6587,8 @@ static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
 		if (!print_all && !node->has_async_transaction)
 			continue;
 
-		last_node = print_next_binder_node_ilocked(m, node, last_node);
+		last_node = print_next_binder_node_ilocked(m, node, last_node,
+							   hash_ptrs);
 	}
 	binder_inner_proc_unlock(proc);
 	if (last_node)
@@ -6590,7 +6606,8 @@ static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
 	binder_inner_proc_lock(proc);
 	list_for_each_entry(w, &proc->todo, entry)
 		print_binder_work_ilocked(m, proc, "  ",
-					  "  pending transaction", w);
+					  "  pending transaction", w,
+					  hash_ptrs);
 	list_for_each_entry(w, &proc->delivered_death, entry) {
 		seq_puts(m, "  has delivered dead binder\n");
 		break;
@@ -6772,7 +6789,7 @@ static void print_binder_proc_stats(struct seq_file *m,
 	print_binder_stats(m, "  ", &proc->stats);
 }
 
-static int state_show(struct seq_file *m, void *unused)
+static void print_binder_state(struct seq_file *m, bool hash_ptrs)
 {
 	struct binder_proc *proc;
 	struct binder_node *node;
@@ -6784,16 +6801,38 @@ static int state_show(struct seq_file *m, void *unused)
 	if (!hlist_empty(&binder_dead_nodes))
 		seq_puts(m, "dead nodes:\n");
 	hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
-		last_node = print_next_binder_node_ilocked(m, node, last_node);
+		last_node = print_next_binder_node_ilocked(m, node, last_node,
+							   hash_ptrs);
 	spin_unlock(&binder_dead_nodes_lock);
 	if (last_node)
 		binder_put_node(last_node);
 
 	mutex_lock(&binder_procs_lock);
 	hlist_for_each_entry(proc, &binder_procs, proc_node)
-		print_binder_proc(m, proc, true);
+		print_binder_proc(m, proc, true, hash_ptrs);
 	mutex_unlock(&binder_procs_lock);
+}
+
+static void print_binder_transactions(struct seq_file *m, bool hash_ptrs)
+{
+	struct binder_proc *proc;
+
+	seq_puts(m, "binder transactions:\n");
+	mutex_lock(&binder_procs_lock);
+	hlist_for_each_entry(proc, &binder_procs, proc_node)
+		print_binder_proc(m, proc, false, hash_ptrs);
+	mutex_unlock(&binder_procs_lock);
+}
+
+static int state_show(struct seq_file *m, void *unused)
+{
+	print_binder_state(m, false);
+	return 0;
+}
 
+static int state_hashed_show(struct seq_file *m, void *unused)
+{
+	print_binder_state(m, true);
 	return 0;
 }
 
@@ -6815,14 +6854,13 @@ static int stats_show(struct seq_file *m, void *unused)
 
 static int transactions_show(struct seq_file *m, void *unused)
 {
-	struct binder_proc *proc;
-
-	seq_puts(m, "binder transactions:\n");
-	mutex_lock(&binder_procs_lock);
-	hlist_for_each_entry(proc, &binder_procs, proc_node)
-		print_binder_proc(m, proc, false);
-	mutex_unlock(&binder_procs_lock);
+	print_binder_transactions(m, false);
+	return 0;
+}
 
+static int transactions_hashed_show(struct seq_file *m, void *unused)
+{
+	print_binder_transactions(m, true);
 	return 0;
 }
 
@@ -6835,7 +6873,7 @@ static int proc_show(struct seq_file *m, void *unused)
 	hlist_for_each_entry(itr, &binder_procs, proc_node) {
 		if (itr->pid == pid) {
 			seq_puts(m, "binder proc state:\n");
-			print_binder_proc(m, itr, true);
+			print_binder_proc(m, itr, true, false);
 		}
 	}
 	mutex_unlock(&binder_procs_lock);
@@ -6902,8 +6940,10 @@ const struct file_operations binder_fops = {
 };
 
 DEFINE_SHOW_ATTRIBUTE(state);
+DEFINE_SHOW_ATTRIBUTE(state_hashed);
 DEFINE_SHOW_ATTRIBUTE(stats);
 DEFINE_SHOW_ATTRIBUTE(transactions);
+DEFINE_SHOW_ATTRIBUTE(transactions_hashed);
 DEFINE_SHOW_ATTRIBUTE(transaction_log);
 
 const struct binder_debugfs_entry binder_debugfs_entries[] = {
@@ -6913,6 +6953,12 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
 		.fops = &state_fops,
 		.data = NULL,
 	},
+	{
+		.name = "state_hashed",
+		.mode = 0444,
+		.fops = &state_hashed_fops,
+		.data = NULL,
+	},
 	{
 		.name = "stats",
 		.mode = 0444,
@@ -6925,6 +6971,12 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
 		.fops = &transactions_fops,
 		.data = NULL,
 	},
+	{
+		.name = "transactions_hashed",
+		.mode = 0444,
+		.fops = &transactions_hashed_fops,
+		.data = NULL,
+	},
 	{
 		.name = "transaction_log",
 		.mode = 0444,
-- 
2.49.0.987.g0cc8ee98dc-goog


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 2/2] binder: Create safe versions of binder log files
  2025-05-07 21:10 ` [PATCH v3 2/2] binder: Create safe versions of binder log files Tiffany Y. Yang
@ 2025-05-07 23:27   ` Carlos Llamas
  0 siblings, 0 replies; 6+ messages in thread
From: Carlos Llamas @ 2025-05-07 23:27 UTC (permalink / raw)
  To: Tiffany Y. Yang
  Cc: Greg Kroah-Hartman, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Joel Fernandes, Christian Brauner,
	Suren Baghdasaryan, linux-kernel, kernel-team

On Wed, May 07, 2025 at 09:10:06PM +0000, Tiffany Y. Yang wrote:
> Binder defines several seq_files that can be accessed via debugfs or
> binderfs. Some of these files (e.g., 'state' and 'transactions')
> contain more granular information about binder's internal state that
> is helpful for debugging, but they also leak userspace address data
> through user-defined 'cookie' or 'ptr' values. Consequently, access
> to these files must be heavily restricted.
> 
> Add two new files, 'state_hashed' and 'transactions_hashed', that
> reproduce the information in the original files but use the kernel's
> raw pointer obfuscation to hash any potential user addresses. This
> approach allows systems to grant broader access to the new files
> without having to change the security policy around the existing ones.
> 
> In practice, userspace populates these fields with user addresses, but
> within the driver, these values only serve as unique identifiers for
> their associated binder objects. Consequently, binder logs can
> obfuscate these values and still retain meaning. While this strategy
> prevents leaking information about the userspace memory layout in the
> existing log files, it also decouples log messages about binder
> objects from their user-defined identifiers.
> 
> Acked-by: Carlos Llamas <cmllamas@google.com>
> Signed-off-by: Tiffany Y. Yang <ynaffit@google.com>
> ---
> ---
> V2->V3: Cast binder_uintptr_t to long before casting to ptr

I just tried a W=1 build and I confirm it fixes the 32bit warn. Thanks!

Tested-by: Carlos Llamas <cmllamas@google.com>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 1/2] binder: Refactor binder_node print synchronization
  2025-05-07 21:10 [PATCH v3 1/2] binder: Refactor binder_node print synchronization Tiffany Y. Yang
  2025-05-07 21:10 ` [PATCH v3 2/2] binder: Create safe versions of binder log files Tiffany Y. Yang
@ 2025-05-08 12:27 ` Alice Ryhl
  2025-05-08 19:01   ` Tiffany Yang
  1 sibling, 1 reply; 6+ messages in thread
From: Alice Ryhl @ 2025-05-08 12:27 UTC (permalink / raw)
  To: Tiffany Y. Yang
  Cc: Greg Kroah-Hartman, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Joel Fernandes, Christian Brauner, Carlos Llamas,
	Suren Baghdasaryan, linux-kernel, kernel-team

On Wed, May 07, 2025 at 09:10:05PM +0000, Tiffany Y. Yang wrote:
> +/**
> + * print_next_binder_node_ilocked() - Print binder_node from a locked list
> + * @m:          struct seq_file for output via seq_printf()
> + * @node:       struct binder_node to print fields of
> + * @prev_node:	struct binder_node we hold a temporary reference to (if any)
> + *
> + * Helper function to handle synchronization around printing a struct
> + * binder_node while iterating through @node->proc->nodes or the dead nodes
> + * list. Caller must hold either @node->proc->inner_lock (for live nodes) or
> + * binder_dead_nodes_lock. This lock will be released during the body of this
> + * function, but it will be reacquired before returning to the caller.
> + *
> + * Return:	pointer to the struct binder_node we hold a tmpref on
> + */
> +static struct binder_node *
> +print_next_binder_node_ilocked(struct seq_file *m, struct binder_node *node,
> +			       struct binder_node *prev_node)
> +{
> +	/*
> +	 * Take a temporary reference on the node so that isn't removed from
> +	 * its proc's tree or the dead nodes list while we print it.
> +	 */
> +	binder_inc_node_tmpref_ilocked(node);
> +	/*
> +	 * Live nodes need to drop the inner proc lock and dead nodes need to
> +	 * drop the binder_dead_nodes_lock before trying to take the node lock.
> +	 */
> +	if (node->proc)
> +		binder_inner_proc_unlock(node->proc);
> +	else
> +		spin_unlock(&binder_dead_nodes_lock);
> +	if (prev_node)
> +		binder_put_node(prev_node);

I don't buy this logic. Imagine the following scenario:

1. print_binder_proc is called, and we loop over proc->nodes.
2. We call binder_inner_proc_unlock(node->proc).
3. On another thread, binder_deferred_release() is called.
4. The node is removed from proc->nodes and node->proc is set to NULL.
5. Back in print_next_binder_node_ilocked(), we now call
   spin_lock(&binder_dead_nodes_lock) and return.
6. In print_binder_proc(), we think that we hold the proc lock, but
   actually we hold the dead nodes lock instead. BOOM.

What happens with the current code is that print_binder_proc() takes the
proc lock again after the node was removed from proc->nodes, and then it
exits the loop because rb_next(n) returns NULL when called on a node not
in any rb-tree.

Alice

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 1/2] binder: Refactor binder_node print synchronization
  2025-05-08 12:27 ` [PATCH v3 1/2] binder: Refactor binder_node print synchronization Alice Ryhl
@ 2025-05-08 19:01   ` Tiffany Yang
  2025-05-09  8:57     ` Alice Ryhl
  0 siblings, 1 reply; 6+ messages in thread
From: Tiffany Yang @ 2025-05-08 19:01 UTC (permalink / raw)
  To: Alice Ryhl
  Cc: Greg Kroah-Hartman, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Joel Fernandes, Christian Brauner, Carlos Llamas,
	Suren Baghdasaryan, linux-kernel, kernel-team

Alice Ryhl <aliceryhl@google.com> writes:

> On Wed, May 07, 2025 at 09:10:05PM +0000, Tiffany Y. Yang wrote:
>> +/**
>> + * print_next_binder_node_ilocked() - Print binder_node from a locked list
>> + * @m:          struct seq_file for output via seq_printf()
>> + * @node:       struct binder_node to print fields of
>> + * @prev_node:	struct binder_node we hold a temporary reference to (if any)
>> + *
>> + * Helper function to handle synchronization around printing a struct
>> + * binder_node while iterating through @node->proc->nodes or the dead nodes
>> + * list. Caller must hold either @node->proc->inner_lock (for live nodes) or
>> + * binder_dead_nodes_lock. This lock will be released during the body of this
>> + * function, but it will be reacquired before returning to the caller.
>> + *
>> + * Return:	pointer to the struct binder_node we hold a tmpref on
>> + */
>> +static struct binder_node *
>> +print_next_binder_node_ilocked(struct seq_file *m, struct binder_node *node,
>> +			       struct binder_node *prev_node)
>> +{
>> +	/*
>> +	 * Take a temporary reference on the node so that isn't removed from
>> +	 * its proc's tree or the dead nodes list while we print it.
>> +	 */
>> +	binder_inc_node_tmpref_ilocked(node);
>> +	/*
>> +	 * Live nodes need to drop the inner proc lock and dead nodes need to
>> +	 * drop the binder_dead_nodes_lock before trying to take the node lock.
>> +	 */
>> +	if (node->proc)
>> +		binder_inner_proc_unlock(node->proc);
>> +	else
>> +		spin_unlock(&binder_dead_nodes_lock);
>> +	if (prev_node)
>> +		binder_put_node(prev_node);
>
> I don't buy this logic. Imagine the following scenario:
>
> 1. print_binder_proc is called, and we loop over proc->nodes.
> 2. We call binder_inner_proc_unlock(node->proc).
> 3. On another thread, binder_deferred_release() is called.
> 4. The node is removed from proc->nodes and node->proc is set to NULL.
> 5. Back in print_next_binder_node_ilocked(), we now call
>    spin_lock(&binder_dead_nodes_lock) and return.
> 6. In print_binder_proc(), we think that we hold the proc lock, but
>    actually we hold the dead nodes lock instead. BOOM.
>
> What happens with the current code is that print_binder_proc() takes the
> proc lock again after the node was removed from proc->nodes, and then it
> exits the loop because rb_next(n) returns NULL when called on a node not
> in any rb-tree.
>
> Alice


Thanks for catching this!! I think this race could be solved by passing
"proc" in as a parameter (NULL if iterating over the dead_nodes_list),
and locking/unlocking based on that instead of node->proc. WDYT?

-- 
Tiffany Y. Yang

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 1/2] binder: Refactor binder_node print synchronization
  2025-05-08 19:01   ` Tiffany Yang
@ 2025-05-09  8:57     ` Alice Ryhl
  0 siblings, 0 replies; 6+ messages in thread
From: Alice Ryhl @ 2025-05-09  8:57 UTC (permalink / raw)
  To: Tiffany Yang
  Cc: Greg Kroah-Hartman, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Joel Fernandes, Christian Brauner, Carlos Llamas,
	Suren Baghdasaryan, linux-kernel, kernel-team

On Thu, May 08, 2025 at 07:01:38PM +0000, Tiffany Yang wrote:
> Alice Ryhl <aliceryhl@google.com> writes:
> > I don't buy this logic. Imagine the following scenario:
> >
> > 1. print_binder_proc is called, and we loop over proc->nodes.
> > 2. We call binder_inner_proc_unlock(node->proc).
> > 3. On another thread, binder_deferred_release() is called.
> > 4. The node is removed from proc->nodes and node->proc is set to NULL.
> > 5. Back in print_next_binder_node_ilocked(), we now call
> >    spin_lock(&binder_dead_nodes_lock) and return.
> > 6. In print_binder_proc(), we think that we hold the proc lock, but
> >    actually we hold the dead nodes lock instead. BOOM.
> >
> > What happens with the current code is that print_binder_proc() takes the
> > proc lock again after the node was removed from proc->nodes, and then it
> > exits the loop because rb_next(n) returns NULL when called on a node not
> > in any rb-tree.
> >
> > Alice
> 
> 
> Thanks for catching this!! I think this race could be solved by passing
> "proc" in as a parameter (NULL if iterating over the dead_nodes_list),
> and locking/unlocking based on that instead of node->proc. WDYT?

I believe that would work.

Alice

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2025-05-09  8:57 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-05-07 21:10 [PATCH v3 1/2] binder: Refactor binder_node print synchronization Tiffany Y. Yang
2025-05-07 21:10 ` [PATCH v3 2/2] binder: Create safe versions of binder log files Tiffany Y. Yang
2025-05-07 23:27   ` Carlos Llamas
2025-05-08 12:27 ` [PATCH v3 1/2] binder: Refactor binder_node print synchronization Alice Ryhl
2025-05-08 19:01   ` Tiffany Yang
2025-05-09  8:57     ` Alice Ryhl

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox