From: Xin Li <xin3.li@intel.com>
To: linux-kernel@vger.kernel.org,
platform-driver-x86@vger.kernel.org, iommu@lists.linux.dev,
linux-hyperv@vger.kernel.org, linux-perf-users@vger.kernel.org,
x86@kernel.org
Cc: tglx@linutronix.de, mingo@redhat.com, bp@alien8.de,
dave.hansen@linux.intel.com, hpa@zytor.com, steve.wahl@hpe.com,
mike.travis@hpe.com, dimitri.sivanich@hpe.com,
russ.anderson@hpe.com, dvhart@infradead.org, andy@infradead.org,
joro@8bytes.org, suravee.suthikulpanit@amd.com, will@kernel.org,
robin.murphy@arm.com, kys@microsoft.com, haiyangz@microsoft.com,
wei.liu@kernel.org, decui@microsoft.com, dwmw2@infradead.org,
baolu.lu@linux.intel.com, peterz@infradead.org, acme@kernel.org,
mark.rutland@arm.com, alexander.shishkin@linux.intel.com,
jolsa@kernel.org, namhyung@kernel.org, irogers@google.com,
adrian.hunter@intel.com, xin3.li@intel.com, seanjc@google.com,
jiangshanlai@gmail.com, jgg@ziepe.ca, yangtiezhu@loongson.cn
Subject: [PATCH 1/3] x86/vector: Rename send_cleanup_vector() to vector_schedule_cleanup()
Date: Mon, 19 Jun 2023 16:16:09 -0700 [thread overview]
Message-ID: <20230619231611.2230-2-xin3.li@intel.com> (raw)
In-Reply-To: <20230619231611.2230-1-xin3.li@intel.com>
From: Thomas Gleixner <tglx@linutronix.de>
Rename send_cleanup_vector() to vector_schedule_cleanup() for the next
patch to replace vector cleanup IPI with a timer callback.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Xin Li <xin3.li@intel.com>
---
arch/x86/include/asm/hw_irq.h | 4 ++--
arch/x86/kernel/apic/vector.c | 8 ++++----
arch/x86/platform/uv/uv_irq.c | 2 +-
drivers/iommu/amd/iommu.c | 2 +-
drivers/iommu/hyperv-iommu.c | 4 ++--
drivers/iommu/intel/irq_remapping.c | 2 +-
6 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index d465ece58151..551829884734 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -97,10 +97,10 @@ extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
extern void lock_vector_lock(void);
extern void unlock_vector_lock(void);
#ifdef CONFIG_SMP
-extern void send_cleanup_vector(struct irq_cfg *);
+extern void vector_schedule_cleanup(struct irq_cfg *);
extern void irq_complete_move(struct irq_cfg *cfg);
#else
-static inline void send_cleanup_vector(struct irq_cfg *c) { }
+static inline void vector_schedule_cleanup(struct irq_cfg *c) { }
static inline void irq_complete_move(struct irq_cfg *c) { }
#endif
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index c1efebd27e6c..aa370bd0d933 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -967,7 +967,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup)
raw_spin_unlock(&vector_lock);
}
-static void __send_cleanup_vector(struct apic_chip_data *apicd)
+static void __vector_schedule_cleanup(struct apic_chip_data *apicd)
{
unsigned int cpu;
@@ -983,13 +983,13 @@ static void __send_cleanup_vector(struct apic_chip_data *apicd)
raw_spin_unlock(&vector_lock);
}
-void send_cleanup_vector(struct irq_cfg *cfg)
+void vector_schedule_cleanup(struct irq_cfg *cfg)
{
struct apic_chip_data *apicd;
apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
if (apicd->move_in_progress)
- __send_cleanup_vector(apicd);
+ __vector_schedule_cleanup(apicd);
}
void irq_complete_move(struct irq_cfg *cfg)
@@ -1007,7 +1007,7 @@ void irq_complete_move(struct irq_cfg *cfg)
* on the same CPU.
*/
if (apicd->cpu == smp_processor_id())
- __send_cleanup_vector(apicd);
+ __vector_schedule_cleanup(apicd);
}
/*
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index ee21d6a36a80..4221259a5870 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -58,7 +58,7 @@ uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
ret = parent->chip->irq_set_affinity(parent, mask, force);
if (ret >= 0) {
uv_program_mmr(cfg, data->chip_data);
- send_cleanup_vector(cfg);
+ vector_schedule_cleanup(cfg);
}
return ret;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index dc1ec6849775..b5900e70de60 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -3658,7 +3658,7 @@ static int amd_ir_set_affinity(struct irq_data *data,
* at the new destination. So, time to cleanup the previous
* vector allocation.
*/
- send_cleanup_vector(cfg);
+ vector_schedule_cleanup(cfg);
return IRQ_SET_MASK_OK_DONE;
}
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 8302db7f783e..8a5c17b97310 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -51,7 +51,7 @@ static int hyperv_ir_set_affinity(struct irq_data *data,
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
return ret;
- send_cleanup_vector(cfg);
+ vector_schedule_cleanup(cfg);
return 0;
}
@@ -257,7 +257,7 @@ static int hyperv_root_ir_set_affinity(struct irq_data *data,
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
return ret;
- send_cleanup_vector(cfg);
+ vector_schedule_cleanup(cfg);
return 0;
}
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index a1b987335b31..55d899f5a14b 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -1180,7 +1180,7 @@ intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
* at the new destination. So, time to cleanup the previous
* vector allocation.
*/
- send_cleanup_vector(cfg);
+ vector_schedule_cleanup(cfg);
return IRQ_SET_MASK_OK_DONE;
}
--
2.34.1
next prev parent reply other threads:[~2023-06-19 23:43 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-19 23:16 [PATCH 0/3] Do IRQ move cleanup with a timer instead of an IPI Xin Li
2023-06-19 23:16 ` Xin Li [this message]
2023-06-20 16:27 ` [PATCH 1/3] x86/vector: Rename send_cleanup_vector() to vector_schedule_cleanup() Steve Wahl
2023-06-20 17:33 ` Li, Xin3
2023-06-19 23:16 ` [PATCH 2/3] x86/vector: Replace IRQ_MOVE_CLEANUP_VECTOR with a timer callback Xin Li
2023-06-20 7:20 ` Peter Zijlstra
2023-06-20 7:47 ` Li, Xin3
2023-06-20 8:37 ` Thomas Gleixner
2023-06-20 17:30 ` Li, Xin3
2023-06-19 23:16 ` [PATCH 3/3] tools: Get rid of IRQ_MOVE_CLEANUP_VECTOR from tools Xin Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230619231611.2230-2-xin3.li@intel.com \
--to=xin3.li@intel.com \
--cc=acme@kernel.org \
--cc=adrian.hunter@intel.com \
--cc=alexander.shishkin@linux.intel.com \
--cc=andy@infradead.org \
--cc=baolu.lu@linux.intel.com \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=decui@microsoft.com \
--cc=dimitri.sivanich@hpe.com \
--cc=dvhart@infradead.org \
--cc=dwmw2@infradead.org \
--cc=haiyangz@microsoft.com \
--cc=hpa@zytor.com \
--cc=iommu@lists.linux.dev \
--cc=irogers@google.com \
--cc=jgg@ziepe.ca \
--cc=jiangshanlai@gmail.com \
--cc=jolsa@kernel.org \
--cc=joro@8bytes.org \
--cc=kys@microsoft.com \
--cc=linux-hyperv@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=mark.rutland@arm.com \
--cc=mike.travis@hpe.com \
--cc=mingo@redhat.com \
--cc=namhyung@kernel.org \
--cc=peterz@infradead.org \
--cc=platform-driver-x86@vger.kernel.org \
--cc=robin.murphy@arm.com \
--cc=russ.anderson@hpe.com \
--cc=seanjc@google.com \
--cc=steve.wahl@hpe.com \
--cc=suravee.suthikulpanit@amd.com \
--cc=tglx@linutronix.de \
--cc=wei.liu@kernel.org \
--cc=will@kernel.org \
--cc=x86@kernel.org \
--cc=yangtiezhu@loongson.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).