From: Mostafa Saleh <smostafa@google.com>
To: Jason Gunthorpe <jgg@nvidia.com>
Cc: iommu@lists.linux.dev, Joerg Roedel <joro@8bytes.org>,
linux-arm-kernel@lists.infradead.org,
Robin Murphy <robin.murphy@arm.com>,
Will Deacon <will@kernel.org>, Eric Auger <eric.auger@redhat.com>,
Jean-Philippe Brucker <jean-philippe@linaro.org>,
Moritz Fischer <mdf@kernel.org>,
Michael Shavit <mshavit@google.com>,
Nicolin Chen <nicolinc@nvidia.com>,
patches@lists.linux.dev,
Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>
Subject: Re: [PATCH v5 04/27] iommu/arm-smmu-v3: Add an ops indirection to the STE code
Date: Fri, 22 Mar 2024 18:14:24 +0000 [thread overview]
Message-ID: <Zf3KgGj4Tfc8ytgi@google.com> (raw)
In-Reply-To: <4-v5-9a37e0c884ce+31e3-smmuv3_newapi_p2_jgg@nvidia.com>
Hi Jason,
On Mon, Mar 04, 2024 at 07:43:52PM -0400, Jason Gunthorpe wrote:
> Prepare to put the CD code into the same mechanism. Add an ops indirection
> around all the STE specific code and make the worker functions independent
> of the entry content being processed.
>
> get_used and sync ops are provided to hook the correct code.
>
> Signed-off-by: Michael Shavit <mshavit@google.com>
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> ---
> drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 172 ++++++++++++--------
> 1 file changed, 104 insertions(+), 68 deletions(-)
>
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> index c60b067c1f553e..b7f947e36f596f 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> @@ -48,8 +48,20 @@ enum arm_smmu_msi_index {
> ARM_SMMU_MAX_MSIS,
> };
>
> -static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu,
> - ioasid_t sid);
> +struct arm_smmu_entry_writer_ops;
> +struct arm_smmu_entry_writer {
> + const struct arm_smmu_entry_writer_ops *ops;
> + struct arm_smmu_master *master;
> +};
> +
> +struct arm_smmu_entry_writer_ops {
> + unsigned int num_entry_qwords;
> + __le64 v_bit;
> + void (*get_used)(const __le64 *entry, __le64 *used);
> + void (*sync)(struct arm_smmu_entry_writer *writer);
> +};
> +
> +#define NUM_ENTRY_QWORDS (sizeof(struct arm_smmu_ste) / sizeof(u64))
>
> static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
> [EVTQ_MSI_INDEX] = {
> @@ -982,43 +994,42 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
> * would be nice if this was complete according to the spec, but minimally it
> * has to capture the bits this driver uses.
> */
> -static void arm_smmu_get_ste_used(const struct arm_smmu_ste *ent,
> - struct arm_smmu_ste *used_bits)
> +static void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
> {
> - unsigned int cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(ent->data[0]));
> + unsigned int cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(ent[0]));
>
> - used_bits->data[0] = cpu_to_le64(STRTAB_STE_0_V);
> - if (!(ent->data[0] & cpu_to_le64(STRTAB_STE_0_V)))
> + used_bits[0] = cpu_to_le64(STRTAB_STE_0_V);
> + if (!(ent[0] & cpu_to_le64(STRTAB_STE_0_V)))
> return;
>
> - used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_CFG);
> + used_bits[0] |= cpu_to_le64(STRTAB_STE_0_CFG);
>
> /* S1 translates */
> if (cfg & BIT(0)) {
> - used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_S1FMT |
> - STRTAB_STE_0_S1CTXPTR_MASK |
> - STRTAB_STE_0_S1CDMAX);
> - used_bits->data[1] |=
> + used_bits[0] |= cpu_to_le64(STRTAB_STE_0_S1FMT |
> + STRTAB_STE_0_S1CTXPTR_MASK |
> + STRTAB_STE_0_S1CDMAX);
> + used_bits[1] |=
> cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR |
> STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH |
> STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW |
> STRTAB_STE_1_EATS);
> - used_bits->data[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
> + used_bits[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
> }
>
> /* S2 translates */
> if (cfg & BIT(1)) {
> - used_bits->data[1] |=
> + used_bits[1] |=
> cpu_to_le64(STRTAB_STE_1_EATS | STRTAB_STE_1_SHCFG);
> - used_bits->data[2] |=
> + used_bits[2] |=
> cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR |
> STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI |
> STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2R);
> - used_bits->data[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK);
> + used_bits[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK);
> }
>
> if (cfg == STRTAB_STE_0_CFG_BYPASS)
> - used_bits->data[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG);
> + used_bits[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG);
> }
>
> /*
> @@ -1027,57 +1038,55 @@ static void arm_smmu_get_ste_used(const struct arm_smmu_ste *ent,
> * unused_update is an intermediate value of entry that has unused bits set to
> * their new values.
> */
> -static u8 arm_smmu_entry_qword_diff(const struct arm_smmu_ste *entry,
> - const struct arm_smmu_ste *target,
> - struct arm_smmu_ste *unused_update)
> +static u8 arm_smmu_entry_qword_diff(struct arm_smmu_entry_writer *writer,
> + const __le64 *entry, const __le64 *target,
> + __le64 *unused_update)
> {
> - struct arm_smmu_ste target_used = {};
> - struct arm_smmu_ste cur_used = {};
> + __le64 target_used[NUM_ENTRY_QWORDS] = {};
> + __le64 cur_used[NUM_ENTRY_QWORDS] = {};
This is confusing to me, the function was modified to be generic, so its has
args are __le64 * instead of struct arm_smmu_ste *.
But NUM_ENTRY_QWORDS is defined as “(sizeof(struct arm_smmu_ste) / sizeof(u64))”
and in the same function writer->ops->num_entry_qwords is used nterchangeably,
I understand that this not a constant and the compiler would complain.
But since for any other num_entry_qwords larger than NUM_ENTRY_QWORDS it fails,
and we know STEs and CDs both have the same size, we simplify the code and make
it a constant everywhere.
I see in the next patch, that this is redefined to be the max between STE and
CD, but again, this hardware and it never changes, so my opinion is to simplify
the code, as there is no need to generalize this part.
> u8 used_qword_diff = 0;
> unsigned int i;
>
> - arm_smmu_get_ste_used(entry, &cur_used);
> - arm_smmu_get_ste_used(target, &target_used);
> + writer->ops->get_used(entry, cur_used);
> + writer->ops->get_used(target, target_used);
>
> - for (i = 0; i != ARRAY_SIZE(target_used.data); i++) {
> + for (i = 0; i != writer->ops->num_entry_qwords; i++) {
> /*
> * Check that masks are up to date, the make functions are not
> * allowed to set a bit to 1 if the used function doesn't say it
> * is used.
> */
> - WARN_ON_ONCE(target->data[i] & ~target_used.data[i]);
> + WARN_ON_ONCE(target[i] & ~target_used[i]);
>
> /* Bits can change because they are not currently being used */
> - unused_update->data[i] = (entry->data[i] & cur_used.data[i]) |
> - (target->data[i] & ~cur_used.data[i]);
> + unused_update[i] = (entry[i] & cur_used[i]) |
> + (target[i] & ~cur_used[i]);
> /*
> * Each bit indicates that a used bit in a qword needs to be
> * changed after unused_update is applied.
> */
> - if ((unused_update->data[i] & target_used.data[i]) !=
> - target->data[i])
> + if ((unused_update[i] & target_used[i]) != target[i])
> used_qword_diff |= 1 << i;
> }
> return used_qword_diff;
> }
>
> -static bool entry_set(struct arm_smmu_device *smmu, ioasid_t sid,
> - struct arm_smmu_ste *entry,
> - const struct arm_smmu_ste *target, unsigned int start,
> +static bool entry_set(struct arm_smmu_entry_writer *writer, __le64 *entry,
> + const __le64 *target, unsigned int start,
> unsigned int len)
> {
> bool changed = false;
> unsigned int i;
>
> for (i = start; len != 0; len--, i++) {
> - if (entry->data[i] != target->data[i]) {
> - WRITE_ONCE(entry->data[i], target->data[i]);
> + if (entry[i] != target[i]) {
> + WRITE_ONCE(entry[i], target[i]);
> changed = true;
> }
> }
>
> if (changed)
> - arm_smmu_sync_ste_for_sid(smmu, sid);
> + writer->ops->sync(writer);
> return changed;
> }
>
> @@ -1107,17 +1116,15 @@ static bool entry_set(struct arm_smmu_device *smmu, ioasid_t sid,
> * V=0 process. This relies on the IGNORED behavior described in the
> * specification.
> */
> -static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
> - struct arm_smmu_ste *entry,
> - const struct arm_smmu_ste *target)
> +static void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer,
> + __le64 *entry, const __le64 *target)
> {
> - unsigned int num_entry_qwords = ARRAY_SIZE(target->data);
> - struct arm_smmu_device *smmu = master->smmu;
> - struct arm_smmu_ste unused_update;
> + unsigned int num_entry_qwords = writer->ops->num_entry_qwords;
> + __le64 unused_update[NUM_ENTRY_QWORDS];
> u8 used_qword_diff;
>
> used_qword_diff =
> - arm_smmu_entry_qword_diff(entry, target, &unused_update);
> + arm_smmu_entry_qword_diff(writer, entry, target, unused_update);
> if (hweight8(used_qword_diff) == 1) {
> /*
> * Only one qword needs its used bits to be changed. This is a
> @@ -1133,22 +1140,21 @@ static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
> * writing it in the next step anyways. This can save a sync
> * when the only change is in that qword.
> */
> - unused_update.data[critical_qword_index] =
> - entry->data[critical_qword_index];
> - entry_set(smmu, sid, entry, &unused_update, 0, num_entry_qwords);
> - entry_set(smmu, sid, entry, target, critical_qword_index, 1);
> - entry_set(smmu, sid, entry, target, 0, num_entry_qwords);
> + unused_update[critical_qword_index] =
> + entry[critical_qword_index];
> + entry_set(writer, entry, unused_update, 0, num_entry_qwords);
> + entry_set(writer, entry, target, critical_qword_index, 1);
> + entry_set(writer, entry, target, 0, num_entry_qwords);
> } else if (used_qword_diff) {
> /*
> * At least two qwords need their inuse bits to be changed. This
> * requires a breaking update, zero the V bit, write all qwords
> * but 0, then set qword 0
> */
> - unused_update.data[0] = entry->data[0] &
> - cpu_to_le64(~STRTAB_STE_0_V);
> - entry_set(smmu, sid, entry, &unused_update, 0, 1);
> - entry_set(smmu, sid, entry, target, 1, num_entry_qwords - 1);
> - entry_set(smmu, sid, entry, target, 0, 1);
> + unused_update[0] = entry[0] & (~writer->ops->v_bit);
> + entry_set(writer, entry, unused_update, 0, 1);
> + entry_set(writer, entry, target, 1, num_entry_qwords - 1);
> + entry_set(writer, entry, target, 0, 1);
> } else {
> /*
> * No inuse bit changed. Sanity check that all unused bits are 0
> @@ -1156,18 +1162,7 @@ static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
> * compute_qword_diff().
> */
> WARN_ON_ONCE(
> - entry_set(smmu, sid, entry, target, 0, num_entry_qwords));
> - }
> -
> - /* It's likely that we'll want to use the new STE soon */
> - if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) {
> - struct arm_smmu_cmdq_ent
> - prefetch_cmd = { .opcode = CMDQ_OP_PREFETCH_CFG,
> - .prefetch = {
> - .sid = sid,
> - } };
> -
> - arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
> + entry_set(writer, entry, target, 0, num_entry_qwords));
> }
> }
>
> @@ -1440,17 +1435,58 @@ arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
> WRITE_ONCE(*dst, cpu_to_le64(val));
> }
>
> -static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
> +struct arm_smmu_ste_writer {
> + struct arm_smmu_entry_writer writer;
> + u32 sid;
> +};
> +
> +static void arm_smmu_ste_writer_sync_entry(struct arm_smmu_entry_writer *writer)
> {
> + struct arm_smmu_ste_writer *ste_writer =
> + container_of(writer, struct arm_smmu_ste_writer, writer);
> struct arm_smmu_cmdq_ent cmd = {
> .opcode = CMDQ_OP_CFGI_STE,
> .cfgi = {
> - .sid = sid,
> + .sid = ste_writer->sid,
> .leaf = true,
> },
> };
>
> - arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
> + arm_smmu_cmdq_issue_cmd_with_sync(writer->master->smmu, &cmd);
> +}
> +
> +static const struct arm_smmu_entry_writer_ops arm_smmu_ste_writer_ops = {
> + .sync = arm_smmu_ste_writer_sync_entry,
> + .get_used = arm_smmu_get_ste_used,
> + .v_bit = cpu_to_le64(STRTAB_STE_0_V),
> + .num_entry_qwords = sizeof(struct arm_smmu_ste) / sizeof(u64),
> +};
> +
> +static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
> + struct arm_smmu_ste *ste,
> + const struct arm_smmu_ste *target)
> +{
> + struct arm_smmu_device *smmu = master->smmu;
> + struct arm_smmu_ste_writer ste_writer = {
> + .writer = {
> + .ops = &arm_smmu_ste_writer_ops,
> + .master = master,
> + },
> + .sid = sid,
> + };
> +
> + arm_smmu_write_entry(&ste_writer.writer, ste->data, target->data);
> +
> + /* It's likely that we'll want to use the new STE soon */
> + if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) {
> + struct arm_smmu_cmdq_ent
> + prefetch_cmd = { .opcode = CMDQ_OP_PREFETCH_CFG,
> + .prefetch = {
> + .sid = sid,
> + } };
> +
> + arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
> + }
> }
>
> static void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
> --
> 2.43.2
>
Thanks,
Mostafa
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2024-03-22 18:14 UTC|newest]
Thread overview: 116+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-03-04 23:43 [PATCH v5 00/27] Update SMMUv3 to the modern iommu API (part 2/3) Jason Gunthorpe
2024-03-04 23:43 ` [PATCH v5 01/27] iommu/arm-smmu-v3: Do not allow a SVA domain to be set on the wrong PASID Jason Gunthorpe
2024-03-15 3:38 ` Nicolin Chen
2024-03-18 18:16 ` Jason Gunthorpe
2024-03-22 17:48 ` Mostafa Saleh
2024-03-26 18:30 ` Jason Gunthorpe
2024-03-26 19:06 ` Mostafa Saleh
2024-03-26 22:10 ` Jason Gunthorpe
2024-03-04 23:43 ` [PATCH v5 02/27] iommu/arm-smmu-v3: Do not ATC invalidate the entire domain Jason Gunthorpe
2024-03-13 9:18 ` Michael Shavit
2024-03-15 2:24 ` Nicolin Chen
2024-03-16 18:09 ` Moritz Fischer
2024-03-22 17:51 ` Mostafa Saleh
2024-03-04 23:43 ` [PATCH v5 03/27] iommu/arm-smmu-v3: Add a type for the CD entry Jason Gunthorpe
2024-03-13 9:44 ` Michael Shavit
2024-03-16 18:10 ` Moritz Fischer
2024-03-18 18:02 ` Jason Gunthorpe
2024-03-15 3:12 ` Nicolin Chen
2024-03-22 17:52 ` Mostafa Saleh
2024-03-04 23:43 ` [PATCH v5 04/27] iommu/arm-smmu-v3: Add an ops indirection to the STE code Jason Gunthorpe
2024-03-13 11:30 ` Michael Shavit
2024-03-15 4:22 ` Nicolin Chen
2024-03-15 5:20 ` Nicolin Chen
2024-03-18 18:06 ` Jason Gunthorpe
2024-03-22 18:14 ` Mostafa Saleh [this message]
2024-03-25 14:11 ` Jason Gunthorpe
2024-03-25 21:01 ` Mostafa Saleh
2024-03-04 23:43 ` [PATCH v5 05/27] iommu/arm-smmu-v3: Make CD programming use arm_smmu_write_entry() Jason Gunthorpe
2024-03-15 7:52 ` Nicolin Chen
2024-03-20 12:46 ` Jason Gunthorpe
2024-03-16 18:14 ` Moritz Fischer
2024-03-23 13:02 ` Mostafa Saleh
2024-03-25 14:25 ` Jason Gunthorpe
2024-03-26 18:30 ` Jason Gunthorpe
2024-03-26 19:12 ` Mostafa Saleh
2024-03-26 22:27 ` Jason Gunthorpe
2024-03-27 9:45 ` Mostafa Saleh
2024-03-27 16:42 ` Jason Gunthorpe
2024-03-04 23:43 ` [PATCH v5 06/27] iommu/arm-smmu-v3: Consolidate clearing a CD table entry Jason Gunthorpe
2024-03-13 11:57 ` Michael Shavit
2024-03-15 6:17 ` Nicolin Chen
2024-03-16 18:15 ` Moritz Fischer
2024-03-22 18:36 ` Mostafa Saleh
2024-03-25 14:14 ` Jason Gunthorpe
2024-03-25 21:02 ` Mostafa Saleh
2024-03-04 23:43 ` [PATCH v5 07/27] iommu/arm-smmu-v3: Move the CD generation for S1 domains into a function Jason Gunthorpe
2024-03-13 12:13 ` Michael Shavit
2024-03-18 18:11 ` Jason Gunthorpe
2024-03-23 13:11 ` Mostafa Saleh
2024-03-25 14:30 ` Jason Gunthorpe
2024-03-04 23:43 ` [PATCH v5 08/27] iommu/arm-smmu-v3: Move allocation of the cdtable into arm_smmu_get_cd_ptr() Jason Gunthorpe
2024-03-13 12:15 ` Michael Shavit
2024-03-16 3:31 ` Nicolin Chen
2024-03-22 19:07 ` Mostafa Saleh
2024-03-25 14:21 ` Jason Gunthorpe
2024-03-25 21:03 ` Mostafa Saleh
2024-03-04 23:43 ` [PATCH v5 09/27] iommu/arm-smmu-v3: Allocate the CD table entry in advance Jason Gunthorpe
2024-03-13 12:17 ` Michael Shavit
2024-03-16 4:16 ` Nicolin Chen
2024-03-18 18:14 ` Jason Gunthorpe
2024-03-22 19:15 ` Mostafa Saleh
2024-03-04 23:43 ` [PATCH v5 10/27] iommu/arm-smmu-v3: Move the CD generation for SVA into a function Jason Gunthorpe
2024-03-16 5:19 ` Nicolin Chen
2024-03-20 13:09 ` Jason Gunthorpe
2024-03-04 23:43 ` [PATCH v5 11/27] iommu/arm-smmu-v3: Build the whole CD in arm_smmu_make_s1_cd() Jason Gunthorpe
2024-03-15 10:04 ` Michael Shavit
2024-03-20 12:50 ` Jason Gunthorpe
2024-03-23 13:20 ` Mostafa Saleh
2024-03-04 23:44 ` [PATCH v5 12/27] iommu/arm-smmu-v3: Start building a generic PASID layer Jason Gunthorpe
2024-03-19 16:11 ` Michael Shavit
2024-03-20 18:32 ` Jason Gunthorpe
2024-03-04 23:44 ` [PATCH v5 13/27] iommu/arm-smmu-v3: Make smmu_domain->devices into an allocated list Jason Gunthorpe
2024-03-19 13:09 ` Michael Shavit
2024-03-04 23:44 ` [PATCH v5 14/27] iommu/arm-smmu-v3: Make changing domains be hitless for ATS Jason Gunthorpe
2024-03-21 12:26 ` Michael Shavit
2024-03-21 13:28 ` Jason Gunthorpe
2024-03-21 14:53 ` Michael Shavit
2024-03-21 14:57 ` Michael Shavit
2024-03-21 17:32 ` Jason Gunthorpe
2024-03-04 23:44 ` [PATCH v5 15/27] iommu/arm-smmu-v3: Add ssid to struct arm_smmu_master_domain Jason Gunthorpe
2024-03-19 13:31 ` Michael Shavit
2024-03-20 12:53 ` Jason Gunthorpe
2024-03-04 23:44 ` [PATCH v5 16/27] iommu/arm-smmu-v3: Keep track of valid CD entries in the cd_table Jason Gunthorpe
2024-03-19 13:55 ` Michael Shavit
2024-03-20 18:21 ` Jason Gunthorpe
2024-03-04 23:44 ` [PATCH v5 17/27] iommu/arm-smmu-v3: Thread SSID through the arm_smmu_attach_*() interface Jason Gunthorpe
2024-03-04 23:44 ` [PATCH v5 18/27] iommu/arm-smmu-v3: Make SVA allocate a normal arm_smmu_domain Jason Gunthorpe
2024-03-19 14:52 ` Michael Shavit
2024-03-20 23:20 ` Jason Gunthorpe
2024-03-04 23:44 ` [PATCH v5 19/27] iommu/arm-smmu-v3: Keep track of arm_smmu_master_domain for SVA Jason Gunthorpe
2024-03-21 10:47 ` Michael Shavit
2024-03-21 13:55 ` Jason Gunthorpe
2024-03-04 23:44 ` [PATCH v5 20/27] iommu: Add ops->domain_alloc_sva() Jason Gunthorpe
2024-03-19 15:09 ` Michael Shavit
2024-03-04 23:44 ` [PATCH v5 21/27] iommu/arm-smmu-v3: Put the SVA mmu notifier in the smmu_domain Jason Gunthorpe
2024-03-19 16:23 ` Michael Shavit
2024-03-20 18:35 ` Jason Gunthorpe
2024-03-04 23:44 ` [PATCH v5 22/27] iommu/arm-smmu-v3: Consolidate freeing the ASID/VMID Jason Gunthorpe
2024-03-19 16:44 ` Michael Shavit
2024-03-19 18:37 ` Jason Gunthorpe
2024-03-04 23:44 ` [PATCH v5 23/27] iommu/arm-smmu-v3: Move the arm_smmu_asid_xa to per-smmu like vmid Jason Gunthorpe
2024-03-04 23:44 ` [PATCH v5 24/27] iommu/arm-smmu-v3: Bring back SVA BTM support Jason Gunthorpe
2024-03-19 17:07 ` Michael Shavit
2024-03-20 13:05 ` Jason Gunthorpe
2024-03-04 23:44 ` [PATCH v5 25/27] iommu/arm-smmu-v3: Allow IDENTITY/BLOCKED to be set while PASID is used Jason Gunthorpe
2024-03-04 23:44 ` [PATCH v5 26/27] iommu/arm-smmu-v3: Allow a PASID to be set when RID is IDENTITY/BLOCKED Jason Gunthorpe
2024-03-04 23:44 ` [PATCH v5 27/27] iommu/arm-smmu-v3: Allow setting a S1 domain to a PASID Jason Gunthorpe
2024-03-15 10:40 ` [PATCH v5 00/27] Update SMMUv3 to the modern iommu API (part 2/3) Shameerali Kolothum Thodi
2024-03-23 13:38 ` Mostafa Saleh
2024-03-25 14:35 ` Jason Gunthorpe
2024-03-25 21:06 ` Mostafa Saleh
2024-03-25 22:44 ` Jason Gunthorpe
2024-03-25 10:22 ` Mostafa Saleh
2024-03-25 10:44 ` Shameerali Kolothum Thodi
2024-03-25 11:22 ` Mostafa Saleh
2024-03-25 16:47 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Zf3KgGj4Tfc8ytgi@google.com \
--to=smostafa@google.com \
--cc=eric.auger@redhat.com \
--cc=iommu@lists.linux.dev \
--cc=jean-philippe@linaro.org \
--cc=jgg@nvidia.com \
--cc=joro@8bytes.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=mdf@kernel.org \
--cc=mshavit@google.com \
--cc=nicolinc@nvidia.com \
--cc=patches@lists.linux.dev \
--cc=robin.murphy@arm.com \
--cc=shameerali.kolothum.thodi@huawei.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).