qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Helge Deller <deller@gmx.de>
To: Richard Henderson <richard.henderson@linaro.org>, qemu-devel@nongnu.org
Subject: Re: [PATCH 11/13] linux-user: Use WITH_MMAP_LOCK_GUARD in target_{shmat, shmdt}
Date: Fri, 1 Sep 2023 16:58:26 +0200	[thread overview]
Message-ID: <15405ae0-1422-05f6-eb82-38e8dc9af2bc@gmx.de> (raw)
In-Reply-To: <20230824010237.1379735-12-richard.henderson@linaro.org>

On 8/24/23 03:02, Richard Henderson wrote:
> Move the CF_PARALLEL setting outside of the mmap lock.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

Reviewed-by: Helge Deller <deller@gmx.de>

> ---
>   linux-user/mmap.c | 98 ++++++++++++++++++++++-------------------------
>   1 file changed, 46 insertions(+), 52 deletions(-)
>
> diff --git a/linux-user/mmap.c b/linux-user/mmap.c
> index 3aeacd1ecd..f45b2d307c 100644
> --- a/linux-user/mmap.c
> +++ b/linux-user/mmap.c
> @@ -1017,9 +1017,8 @@ abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
>   {
>       CPUState *cpu = env_cpu(cpu_env);
>       abi_ulong raddr;
> -    void *host_raddr;
>       struct shmid_ds shm_info;
> -    int i, ret;
> +    int ret;
>       abi_ulong shmlba;
>
>       /* shmat pointers are always untagged */
> @@ -1044,7 +1043,43 @@ abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
>           return -TARGET_EINVAL;
>       }
>
> -    mmap_lock();
> +    WITH_MMAP_LOCK_GUARD() {
> +        void *host_raddr;
> +
> +        if (shmaddr) {
> +            host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
> +        } else {
> +            abi_ulong mmap_start;
> +
> +            /* In order to use the host shmat, we need to honor host SHMLBA.  */
> +            mmap_start = mmap_find_vma(0, shm_info.shm_segsz,
> +                                       MAX(SHMLBA, shmlba));
> +
> +            if (mmap_start == -1) {
> +                return -TARGET_ENOMEM;
> +            }
> +            host_raddr = shmat(shmid, g2h_untagged(mmap_start),
> +                               shmflg | SHM_REMAP);
> +        }
> +
> +        if (host_raddr == (void *)-1) {
> +            return get_errno(-1);
> +        }
> +        raddr = h2g(host_raddr);
> +
> +        page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
> +                       PAGE_VALID | PAGE_RESET | PAGE_READ |
> +                       (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
> +
> +        for (int i = 0; i < N_SHM_REGIONS; i++) {
> +            if (!shm_regions[i].in_use) {
> +                shm_regions[i].in_use = true;
> +                shm_regions[i].start = raddr;
> +                shm_regions[i].size = shm_info.shm_segsz;
> +                break;
> +            }
> +        }
> +    }
>
>       /*
>        * We're mapping shared memory, so ensure we generate code for parallel
> @@ -1057,65 +1092,24 @@ abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
>           tb_flush(cpu);
>       }
>
> -    if (shmaddr) {
> -        host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
> -    } else {
> -        abi_ulong mmap_start;
> -
> -        /* In order to use the host shmat, we need to honor host SHMLBA.  */
> -        mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
> -
> -        if (mmap_start == -1) {
> -            errno = ENOMEM;
> -            host_raddr = (void *)-1;
> -        } else {
> -            host_raddr = shmat(shmid, g2h_untagged(mmap_start),
> -                               shmflg | SHM_REMAP);
> -        }
> -    }
> -
> -    if (host_raddr == (void *)-1) {
> -        mmap_unlock();
> -        return get_errno((intptr_t)host_raddr);
> -    }
> -    raddr = h2g((uintptr_t)host_raddr);
> -
> -    page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
> -                   PAGE_VALID | PAGE_RESET | PAGE_READ |
> -                   (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
> -
> -    for (i = 0; i < N_SHM_REGIONS; i++) {
> -        if (!shm_regions[i].in_use) {
> -            shm_regions[i].in_use = true;
> -            shm_regions[i].start = raddr;
> -            shm_regions[i].size = shm_info.shm_segsz;
> -            break;
> -        }
> -    }
> -
> -    mmap_unlock();
>       return raddr;
>   }
>
>   abi_long target_shmdt(abi_ulong shmaddr)
>   {
> -    int i;
>       abi_long rv;
>
>       /* shmdt pointers are always untagged */
>
> -    mmap_lock();
> -
> -    for (i = 0; i < N_SHM_REGIONS; ++i) {
> -        if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
> -            shm_regions[i].in_use = false;
> -            page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
> -            break;
> +    WITH_MMAP_LOCK_GUARD() {
> +        for (int i = 0; i < N_SHM_REGIONS; ++i) {
> +            if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
> +                shm_regions[i].in_use = false;
> +                page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
> +                break;
> +            }
>           }
> +        rv = get_errno(shmdt(g2h_untagged(shmaddr)));
>       }
> -    rv = get_errno(shmdt(g2h_untagged(shmaddr)));
> -
> -    mmap_unlock();
> -
>       return rv;
>   }



  reply	other threads:[~2023-09-01 14:59 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-24  1:02 [PATCH 00/13] linux-user patch queue Richard Henderson
2023-08-24  1:02 ` [PATCH 01/13] linux-user: Split out cpu/target_proc.h Richard Henderson
2023-09-01 14:35   ` Philippe Mathieu-Daudé
2023-08-24  1:02 ` [PATCH 02/13] linux-user: Emulate /proc/cpuinfo on aarch64 and arm Richard Henderson
2023-08-24  1:02 ` [PATCH 03/13] linux-user: Emulate /proc/cpuinfo for Alpha Richard Henderson
2025-01-21 10:39   ` Philippe Mathieu-Daudé
2023-08-24  1:02 ` [PATCH 04/13] util/selfmap: Use dev_t and ino_t in MapInfo Richard Henderson
2023-08-24  1:02 ` [PATCH 05/13] linux-user: Use walk_memory_regions for open_self_maps Richard Henderson
2023-08-24  1:02 ` [PATCH 06/13] linux-user: Adjust brk for load_bias Richard Henderson
2023-08-24  1:02 ` [PATCH 07/13] linux-user: Show heap address in /proc/pid/maps Richard Henderson
2023-09-01 14:39   ` Philippe Mathieu-Daudé
2023-08-24  1:02 ` [PATCH 08/13] linux-user: Emulate the Anonymous: keyword in /proc/self/smaps Richard Henderson
2023-09-01 14:37   ` Philippe Mathieu-Daudé
2023-08-24  1:02 ` [PATCH 09/13] linux-user: Remove ELF_START_MMAP and image_info.start_mmap Richard Henderson
2023-08-24  1:02 ` [PATCH 10/13] linux-user: Move shmat and shmdt implementations to mmap.c Richard Henderson
2023-08-24  1:02 ` [PATCH 11/13] linux-user: Use WITH_MMAP_LOCK_GUARD in target_{shmat, shmdt} Richard Henderson
2023-09-01 14:58   ` Helge Deller [this message]
2023-08-24  1:02 ` [PATCH 12/13] linux-user: Fix shmdt Richard Henderson
2023-09-01 14:38   ` Philippe Mathieu-Daudé
2023-08-24  1:02 ` [PATCH 13/13] linux-user: Track shm regions with an interval tree Richard Henderson
2023-09-01 15:10   ` Helge Deller
2023-09-01  2:32 ` [PATCH 00/13] linux-user patch queue Richard Henderson
2023-09-01 15:12   ` Helge Deller

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=15405ae0-1422-05f6-eb82-38e8dc9af2bc@gmx.de \
    --to=deller@gmx.de \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).