* Linux 6.12.86
@ 2026-05-07 4:52 Greg Kroah-Hartman
2026-05-07 4:52 ` Greg Kroah-Hartman
0 siblings, 1 reply; 2+ messages in thread
From: Greg Kroah-Hartman @ 2026-05-07 4:52 UTC (permalink / raw)
To: linux-kernel, akpm, torvalds, stable; +Cc: lwn, jslaby, Greg Kroah-Hartman
I'm announcing the release of the 6.12.86 kernel.
All users of the 6.12 kernel series must upgrade.
The updated 6.12.y git tree can be found at:
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-6.12.y
and can be browsed at the normal kernel.org git web browser:
https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git;a=summary
thanks,
greg k-h
------------
Makefile | 3
arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi | 5
arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi | 20 -
arch/arm64/crypto/aes-modes.S | 4
arch/arm64/mm/mmu.c | 36 +-
arch/loongarch/kernel/cpu-probe.c | 7
arch/loongarch/kernel/syscall.c | 3
arch/parisc/kernel/syscalls/syscall.tbl | 2
arch/um/drivers/cow_user.c | 8
arch/x86/kvm/hyperv.h | 8
arch/x86/kvm/svm/hyperv.h | 9
arch/x86/kvm/svm/nested.c | 55 ++--
arch/x86/kvm/svm/svm.c | 32 ++
arch/x86/kvm/svm/svm.h | 1
arch/x86/kvm/x86.c | 62 ++--
arch/x86/mm/pti.c | 5
block/bio-integrity.c | 2
block/bio.c | 14 -
block/blk-zoned.c | 12
block/blk.h | 19 +
certs/extract-cert.c | 6
crypto/authencesn.c | 5
crypto/pcrypt.c | 7
drivers/base/core.c | 39 ++
drivers/base/dd.c | 20 +
drivers/block/rbd.c | 6
drivers/block/zram/zram_drv.c | 3
drivers/bus/imx-weim.c | 2
drivers/bus/mhi/host/pci_generic.c | 2
drivers/char/tpm/tpm-chip.c | 2
drivers/char/tpm/tpm2-cmd.c | 6
drivers/char/tpm/tpm_tis_core.c | 11
drivers/crypto/atmel-aes.c | 2
drivers/crypto/atmel-ecc.c | 1
drivers/crypto/atmel-i2c.c | 4
drivers/crypto/atmel-sha204a.c | 37 +-
drivers/crypto/atmel-tdes.c | 8
drivers/crypto/ccree/cc_hash.c | 1
drivers/crypto/hisilicon/sec/sec_algs.c | 2
drivers/crypto/nx/nx-842.h | 4
drivers/crypto/talitos.c | 254 ++++++++++++-------
drivers/firmware/google/framebuffer-coreboot.c | 12
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 43 +--
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 52 +++
drivers/gpu/drm/nouveau/nouveau_drm.c | 2
drivers/gpu/drm/nouveau/nouveau_gem.c | 2
drivers/gpu/drm/tiny/arcpgu.c | 3
drivers/greybus/gb-beagleplay.c | 112 +++++++-
drivers/hid/hid-apple.c | 2
drivers/hwmon/powerz.c | 11
drivers/hwmon/pt5161l.c | 4
drivers/i2c/i2c-core-of.c | 2
drivers/iio/adc/ad7768-1.c | 9
drivers/iio/adc/ti-ads7950.c | 11
drivers/iio/frequency/admv1013.c | 90 +++---
drivers/infiniband/core/addr.c | 3
drivers/infiniband/hw/mana/qp.c | 15 +
drivers/infiniband/sw/rxe/rxe_recv.c | 3
drivers/leds/rgb/leds-qcom-lpg.c | 7
drivers/md/dm-raid1.c | 6
drivers/md/raid10.c | 4
drivers/md/raid5-cache.c | 48 ++-
drivers/md/raid5.c | 8
drivers/media/i2c/imx219.c | 3
drivers/media/platform/amphion/vpu_v4l2.c | 9
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c | 1
drivers/media/rc/igorplugusb.c | 16 -
drivers/media/rc/ttusbir.c | 13
drivers/mfd/mfd-core.c | 12
drivers/mfd/stpmic1.c | 20 +
drivers/misc/ibmasm/ibmasmfs.c | 7
drivers/misc/ibmasm/lowlevel.c | 12
drivers/misc/ibmasm/remote.c | 5
drivers/mmc/core/block.c | 12
drivers/mmc/core/queue.h | 3
drivers/mmc/host/sdhci-of-dwcmshc.c | 19 +
drivers/mtd/devices/docg3.c | 3
drivers/mtd/spi-nor/sst.c | 13
drivers/net/bonding/bond_main.c | 12
drivers/net/can/usb/ucan.c | 2
drivers/net/ethernet/micrel/ks8851.h | 6
drivers/net/ethernet/micrel/ks8851_common.c | 69 ++---
drivers/net/ethernet/micrel/ks8851_par.c | 15 -
drivers/net/ethernet/micrel/ks8851_spi.c | 11
drivers/net/ethernet/microsoft/mana/mana_en.c | 11
drivers/net/ethernet/wangxun/txgbe/txgbe_main.c | 3
drivers/net/netconsole.c | 2
drivers/net/phy/mdio_bus.c | 4
drivers/net/wireless/marvell/mwifiex/init.c | 2
drivers/net/wireless/mediatek/mt76/mt792x_regs.h | 4
drivers/net/wireless/mediatek/mt76/mt792x_usb.c | 51 +++
drivers/net/wireless/realtek/rtl8xxxu/core.c | 28 --
drivers/net/wireless/realtek/rtw88/pci.c | 3
drivers/nvme/host/core.c | 2
drivers/nvme/host/pci.c | 2
drivers/of/base.c | 2
drivers/of/dynamic.c | 2
drivers/of/platform.c | 2
drivers/of/unittest.c | 4
drivers/pci/endpoint/functions/pci-epf-mhi.c | 4
drivers/pci/endpoint/functions/pci-epf-ntb.c | 56 ----
drivers/power/supply/axp288_charger.c | 19 -
drivers/pwm/pwm-imx-tpm.c | 9
drivers/remoteproc/xlnx_r5_remoteproc.c | 20 -
drivers/rtc/rtc-ntxec.c | 2
drivers/scsi/sd.c | 1
drivers/spi/spi-ch341.c | 36 +-
drivers/spi/spi-imx.c | 4
drivers/spi/spi.c | 63 ++--
drivers/thermal/thermal_core.c | 7
drivers/usb/chipidea/core.c | 45 +--
drivers/usb/chipidea/otg.c | 7
drivers/usb/host/xhci.c | 1
drivers/vfio/cdx/intr.c | 13
drivers/vfio/cdx/main.c | 19 +
drivers/vfio/cdx/private.h | 3
fs/ceph/dir.c | 6
fs/erofs/dir.c | 28 +-
fs/ext2/inode.c | 14 -
fs/ext4/xattr.c | 6
fs/f2fs/data.c | 4
fs/f2fs/f2fs.h | 2
fs/f2fs/segment.c | 6
fs/f2fs/super.c | 11
fs/jbd2/revoke.c | 8
fs/notify/inotify/inotify_user.c | 1
fs/ntfs3/run.c | 18 +
fs/ocfs2/aops.c | 74 +++--
fs/smb/client/cifsacl.c | 116 ++++++--
fs/smb/server/connection.c | 28 --
fs/smb/server/connection.h | 6
fs/smb/server/smb2pdu.c | 4
fs/smb/server/transport_rdma.c | 5
fs/smb/server/transport_tcp.c | 25 +
fs/userfaultfd.c | 2
fs/xfs/xfs_buf.c | 1
include/linux/device.h | 45 +++
include/linux/fwnode.h | 44 ++-
include/linux/hugetlb_inline.h | 4
include/linux/padata.h | 4
include/linux/randomize_kstack.h | 26 +
include/linux/sched.h | 4
include/linux/tpm_eventlog.h | 9
include/linux/usb.h | 3
include/net/mana/mana.h | 1
include/net/mctp.h | 3
include/trace/events/rxrpc.h | 6
init/main.c | 1
io_uring/poll.c | 15 -
io_uring/timeout.c | 4
kernel/fork.c | 2
kernel/locking/rtmutex.c | 13
kernel/padata.c | 136 ++--------
kernel/sched/core.c | 2
kernel/sched/rt.c | 2
kernel/sched/sched.h | 2
kernel/taskstats.c | 1
lib/test_hmm.c | 86 +++---
lib/ts_kmp.c | 18 +
mm/damon/core.c | 3
mm/internal.h | 10
mm/memory_hotplug.c | 10
mm/migrate.c | 152 ++++++-----
mm/mlock.c | 10
mm/mmap.c | 4
mm/zsmalloc.c | 1
net/bluetooth/hci_event.c | 18 +
net/bridge/br_arp_nd_proxy.c | 8
net/bridge/br_fdb.c | 28 +-
net/caif/cfsrvl.c | 14 -
net/ceph/auth.c | 2
net/ipv4/icmp.c | 5
net/ipv4/inet_connection_sock.c | 3
net/ipv6/exthdrs.c | 9
net/ipv6/rpl_iptunnel.c | 9
net/ipv6/seg6_iptunnel.c | 12
net/mctp/route.c | 8
net/netfilter/nft_bitwise.c | 3
net/qrtr/ns.c | 86 +++++-
net/rds/rdma.c | 4
net/rxrpc/ar-internal.h | 1
net/rxrpc/call_event.c | 25 +
net/rxrpc/conn_event.c | 14 -
net/rxrpc/io_thread.c | 24 -
net/rxrpc/rxkad.c | 112 +++-----
net/rxrpc/skbuff.c | 9
net/smc/smc_clc.c | 4
net/strparser/strparser.c | 8
rust/kernel/init/macros.rs | 7
scripts/check-uapi.sh | 7
security/apparmor/lsm.c | 16 -
sound/aoa/codecs/onyx.c | 104 ++-----
sound/aoa/codecs/tas.c | 113 ++------
sound/aoa/core/gpio-feature.c | 20 -
sound/aoa/core/gpio-pmf.c | 26 -
sound/aoa/soundbus/i2sbus/core.c | 12
sound/aoa/soundbus/i2sbus/pcm.c | 143 +++++-----
sound/core/control.c | 4
sound/core/misc.c | 13
sound/core/seq/oss/seq_oss_rw.c | 6
sound/drivers/pcmtest.c | 19 +
sound/pci/ctxfi/ctatc.c | 3
sound/usb/6fire/control.c | 10
sound/usb/caiaq/control.c | 52 ++-
sound/usb/caiaq/device.c | 35 +-
sound/usb/caiaq/input.c | 2
sound/usb/endpoint.c | 6
sound/usb/format.c | 2
sound/usb/mixer.c | 7
sound/usb/mixer_quirks.c | 12
tools/accounting/getdelays.c | 41 ++-
tools/accounting/procacct.c | 40 ++
tools/perf/arch/loongarch/annotate/instructions.c | 1
tools/perf/util/disasm.c | 1
tools/testing/ktest/ktest.pl | 2
tools/testing/selftests/landlock/net_test.c | 2
tools/testing/selftests/mqueue/setting | 1
tools/testing/selftests/mqueue/settings | 1
219 files changed, 2463 insertions(+), 1512 deletions(-)
Aditya Garg (1):
HID: apple: ensure the keyboard backlight is off if suspending
Alex Williamson (1):
vfio/cdx: Serialize VFIO_DEVICE_SET_IRQS with a per-device mutex
Alistair Popple (1):
lib: test_hmm: evict device pages on file close to avoid use-after-free
Andrea Mayer (2):
seg6: fix seg6 lwtunnel output redirect for L2 reduced encap mode
net: ipv6: fix NOREF dst use in seg6 and rpl lwtunnels
Anshuman Khandual (1):
arm64/mm: Enable batched TLB flush in unmap_hotplug_range()
Anthony Yznaga (1):
mm: prevent droppable mappings from being locked
Antoniu Miclaus (2):
iio: frequency: admv1013: add dev variable
iio: frequency: admv1013: fix NULL pointer dereference on str
Ao Zhou (1):
net: rds: fix MR cleanup on copy error
Arjan van de Ven (1):
drm/amdgpu: fix zero-size GDS range init on RDNA4
Arnd Bergmann (2):
tpm: avoid -Wunused-but-set-variable
check-uapi: link into shared objects
Ben Levinsky (1):
remoteproc: xlnx: Only access buffer information if IPI is buffered
Bin Liu (1):
mmc: block: use single block write in retry
Breno Leitao (1):
netconsole: avoid out-of-bounds access on empty string in trim_newline()
Brian Mak (1):
mfd: core: Preserve OF node when ACPI handle is present
Cengiz Can (1):
apparmor: use target task's context in apparmor_getprocattr()
Chao Yu (1):
f2fs: fix to do sanity check on dcc->discard_cmd_cnt conditionally
Chen Ni (1):
media: i2c: imx219: Check return value of devm_gpiod_get_optional() in imx219_probe()
Chen Zhao (1):
IB/core: Fix zero dmac race in neighbor resolution
Chia-Ming Chang (2):
md/raid5: fix soft lockup in retry_aligned_read()
inotify: fix watch count leak when fsnotify_add_inode_mark_locked() fails
Cássio Gabriel (9):
ALSA: usb-audio: stop parsing UAC2 rates at MAX_NR_RATES
ALSA: usb-audio: Avoid false E-MU sample-rate notifications
ALSA: usb-audio: Fix Audio Advantage Micro II SPDIF switch
ALSA: aoa: i2sbus: fix OF node lifetime handling
ALSA: seq_oss: return full count for successful SEQ_FULLSIZE writes
ALSA: caiaq: Fix control_put() result and cache rollback
ALSA: 6fire: Fix input volume change detection
ALSA: pcmtest: Fix resource leaks in module init error paths
ALSA: aoa: i2sbus: clear stale prepared state
DaeMyung Kang (1):
ksmbd: reset rcount per connection in ksmbd_conn_wait_idle_sess_id()
Damien Le Moal (1):
block: fix zone write plugs refcount handling in disk_zone_wplug_schedule_bio_work()
Daniel Hodges (2):
PCI: epf-mhi: Return 0, not remaining timeout, when eDMA ops complete
wifi: mwifiex: fix use-after-free in mwifiex_adapter_cleanup()
Dave Hansen (1):
x86/cpu: Disable FRED when PTI is forced on
David (Ming Qiang) Wu (1):
amdgpu/jpeg: fix deepsleep register for jpeg 5_0_0 and 5_0_2
David Carlier (1):
drm/nouveau: fix nvkm_device leak on aperture removal failure
David Hildenbrand (2):
mm/migrate: factor out movable_ops page handling into migrate_movable_ops_page()
mm/migrate: move movable_ops page handling out of move_to_new_folio()
David Howells (5):
rxrpc: Fix memory leaks in rxkad_verify_response()
rxrpc: Fix rxkad crypto unalignment handling
rxrpc: Fix re-decryption of RESPONSE packets
rxrpc: Fix potential UAF after skb_unshare() failure
rxrpc: Fix rxrpc_input_call_event() to only unshare DATA packets
David Lechner (1):
iio: adc: ti-ads7950: use iio_push_to_buffers_with_ts_unaligned()
Dawei Feng (1):
rbd: fix null-ptr-deref when device_add_disk() fails
Deepanshu Kartikey (2):
ext4: fix bounds check in check_xattrs() to prevent out-of-bounds access
ALSA: caiaq: fix usb_dev refcount leak on probe failure
Denis M. Karpov (1):
userfaultfd: allow registration of ranges below mmap_min_addr
Douglas Anderson (3):
driver core: Don't let a device probe until it's ready
device property: Make modifications of fwnode "flags" thread safe
driver core: Add kernel-doc for DEV_FLAG_COUNT enum value
Eric Biggers (1):
crypto: arm64/aes - Fix 32-bit aes_mac_update() arg treated as 64-bit
Fan Wu (1):
media: mtk-jpeg: fix use-after-free in release path due to uncancelled work
Fedor Pchelkin (1):
wifi: rtw88: check for PCI upstream bridge existence
Francesco Dolcini (1):
arm64: dts: ti: am62-verdin: Enable pullup for eMMC data pins
Gao Xiang (1):
erofs: fix the out-of-bounds nameoff handling for trailing dirents
Greg Kroah-Hartman (5):
LoongArch: Add spectre boundry for syscall dispatch table
drm/nouveau: fix u32 overflow in pushbuf reloc bounds check
leds: qcom-lpg: Check for array overflow when selecting the high resolution
ipv6: rpl: reserve mac_len headroom when recompressed SRH grows
Linux 6.12.86
Guangshuo Li (1):
ALSA: pcmtest: fix reference leak on failed device registration
Gunnar Kudrjavets (2):
tpm: Fix auth session leak in tpm2_get_random() error path
tpm: Use kfree_sensitive() to free auth session in tpm_dev_release()
Gustavo A. R. Silva (1):
crypto: nx - Fix packed layout in struct nx842_crypto_header
Haoxiang Li (2):
xfs: fix a resource leak in xfs_alloc_buftarg()
crypto: ccree - fix a memory leak in cc_mac_digest()
Harin Lee (1):
ALSA: ctxfi: Add fallback to default RSR for S/PDIF
Helge Deller (1):
parisc: _llseek syscall is only available for 32-bit userspace
Heming Zhao (1):
ocfs2: split transactions in dio completion to avoid credit exhaustion
Herbert Xu (3):
padata: Fix pd UAF once and for all
padata: Remove comment for reorder_work
crypto: pcrypt - Fix handling of MAY_BACKLOG requests
Huacai Chen (1):
LoongArch: Show CPU vulnerabilites correctly
Jacqueline Wong (2):
tpm: tpm_tis: add error logging for data transfer
tpm: tpm_tis: stop transmit if retries are exhausted
James Kim (1):
mtd: docg3: fix use-after-free in docg3_release()
Jens Axboe (2):
io_uring/poll: ensure EPOLL_ONESHOT is propagated for EPOLL_URING_WAKE
io_uring/poll: fix multishot recv missing EOF on wakeup race
Jesse.Zhang (1):
drm/amdgpu: Limit BO list entry count to prevent resource exhaustion
Jiawen Wu (1):
net: txgbe: fix firmware version check
Jinjiang Tu (1):
mm/memory_hotplug: fix hwpoisoned large folio handling in do_migrate_range()
Johan Hovold (5):
spi: imx: fix use-after-free on unbind
spi: ch341: fix memory leaks on probe failures
rtc: ntxec: fix OF node reference imbalance
can: ucan: fix devres lifetime
spi: fix resource leaks on device setup failure
Jonathan Santos (1):
iio: adc: ad7768-1: fix one-shot mode data acquisition
Joseph Salisbury (1):
sched: Use u64 for bandwidth ratio calculations
Josh Hunt (1):
md/raid10: fix deadlock with check operation and nowait requests
Josh Law (1):
lib/ts_kmp: fix integer overflow in pattern length calculation
Junrui Luo (2):
md/raid5: validate payload size before accessing journal metadata
dm mirror: fix integer overflow in create_dirty_log()
Kai Ma (1):
netfilter: reject zero shift in nft_bitwise
Keenan Dong (1):
rtmutex: Use waiter::task instead of current in remove_waiter()
Kevin Cheng (2):
KVM: SVM: Inject #UD for INVLPGA if EFER.SVME=0
KVM: nSVM: Raise #UD if unhandled VMMCALL isn't intercepted by L1
Koichiro Den (1):
PCI: endpoint: pci-epf-ntb: Remove duplicate resource teardown
Krzysztof Kozlowski (1):
power: supply: axp288_charger: Do not cancel work before initializing it
Long Li (1):
RDMA/mana_ib: Disable RX steering on RSS QP destroy
Longxuan Yu (1):
io_uring/poll: fix signed comparison in io_poll_get_ownership()
Luca Ceresoli (1):
drm/arcpgu: fix device node leak
Luxiao Xu (1):
net: strparser: fix skb_head leak in strp_abort_strp()
Manivannan Sadhasivam (5):
net: qrtr: ns: Fix use-after-free in driver remove()
net: qrtr: ns: Free the node during ctrl_cmd_bye()
net: qrtr: ns: Limit the maximum server registration per node
net: qrtr: ns: Limit the maximum number of lookups
net: qrtr: ns: Limit the total number of nodes
Marek Vasut (3):
mfd: stpmic1: Attempt system shutdown twice in case PMIC is confused
net: ks8851: Reinstate disabling of BHs around IRQ handler
net: ks8851: Avoid excess softirq scheduling
Max Kellermann (1):
ceph: only d_add() negative dentries when they are unhashed
Michael Bommarito (2):
um: drivers: call kernel_strrchr() explicitly in cow_user.c
smb: client: validate the whole DACL before rewriting it in cifsacl
Michal Pecio (1):
usb: xhci: Make usb_host_endpoint.hcpriv survive endpoint_disable()
Mickaël Salaün (1):
selftests/landlock: Fix format warning for __u64 in net_test
Miguel Ojeda (2):
kbuild: rust: allow `clippy::uninlined_format_args`
rust: init: fix `clippy::undocumented_unsafe_blocks` warnings
Ming Qian (1):
media: amphion: Fix race between m2m job_abort and device_run
Naman Jain (1):
block: relax pgmap check in bio_add_page for compatible zone device pages
Namjae Jeon (2):
ksmbd: use msleep instaed of schedule_timeout_interruptible()
ksmbd: replace connection list with hash table
Nathan Chancellor (1):
extract-cert: Wrap key_pass with '#ifdef USE_PKCS11_ENGINE'
Oliver Neukum (2):
media: rc: ttusbir: respect DMA coherency rules
media: rc: igorplugusb: heed coherency rules
Paul Louvel (2):
crypto: talitos - fix SEC1 32k ahash request limitation
crypto: talitos - rename first/last to first_desc/last_desc
Pavel Begunkov (1):
io_uring/timeout: check unused sqe fields
Pengpeng Hou (1):
greybus: gb-beagleplay: bound bootloader receive buffering
Prasanna Kumar T S M (1):
vfio/cdx: Fix NULL pointer dereference in interrupt trigger path
Qiang Yu (1):
bus: mhi: host: pci_generic: Switch to async power up to avoid boot delays
Rafael J. Wysocki (1):
thermal: core: Fix thermal zone governor cleanup issues
Raphael Zimmer (1):
libceph: Prevent potential null-ptr-deref in ceph_handle_auth_reply()
Robert Beckett (2):
nvme-pci: add NVME_QUIRK_DISABLE_WRITE_ZEROES for Kingston OM3SGP4
nvme: respect NVME_QUIRK_DISABLE_WRITE_ZEROES when wzsl is set
Robert Marko (1):
arm64: dts: marvell: uDPU: add ethernet aliases
Rong Bao (1):
perf annotate: Use jump__delete when freeing LoongArch jumps
Rong Zhang (1):
Revert "ALSA: usb: Increase volume range that triggers a warning"
Ruide Cao (1):
ipv4: icmp: validate reply type before using icmp_pointers
Ruijie Li (1):
net/smc: avoid early lgr access in smc_clc_wait_msg
Ryan Roberts (1):
randomize_kstack: Maintain kstack_offset per task
Sanjaikumar V S (1):
mtd: spi-nor: sst: Fix write enable before AAI sequence
Sanman Pradhan (2):
hwmon: (powerz) Fix missing usb_kill_urb() on signal interrupt
hwmon: (pt5161l) Fix bugs in pt5161l_read_block_data()
Sean Christopherson (3):
KVM: x86: Defer non-architectural deliver of exception payload to userspace read
KVM: SVM: Explicitly mark vmcb01 dirty after modifying VMCB intercepts
KVM: nSVM: Always intercept VMMCALL when L2 is active
Sean Wang (2):
wifi: mt76: mt792x: describe USB WFSYS reset with a descriptor
wifi: mt76: mt792x: fix mt7925u USB WFSYS reset handling
SeongJae Park (1):
mm/damon/core: use time_in_range_open() for damos quota window start
Sergey Senozhatsky (1):
zram: do not forget to endio for partial discard requests
Shawn Lin (1):
mmc: sdhci-of-dwcmshc: Disable clock before DLL configuration
Shigeru Yoshida (1):
mm/zsmalloc: copy KMSAN metadata in zs_page_migrate()
Shuvam Pandey (1):
Bluetooth: hci_event: fix potential UAF in SSP passkey handlers
Simon Liebold (1):
selftests/mqueue: Fix incorrectly named file
Sohei Koyama (1):
ext4: fix missing brelse() in ext4_xattr_inode_dec_ref_all()
Steven Rostedt (1):
ktest: Fix the month in the name of the failure directory
Takashi Iwai (6):
ALSA: usb-audio: Evaluate packsize caps at the right place
ALSA: core: Fix potential data race at fasync handling
ALSA: caiaq: Handle probe errors properly
ALSA: aoa: Use guard() for mutex locks
ALSA: caiaq: Fix potentially leftover ep1_in_urb at error path
ALSA: caiaq: Don't abort when no input device is available
Thomas Fourier (1):
crypto: hisilicon - Fix dma_unmap_single() direction
Thomas Zimmermann (2):
firmware: google: framebuffer: Do not mark framebuffer as busy
firmware: google: framebuffer: Do not unregister platform device
Thorsten Blum (8):
crypto: atmel-sha204a - Fix OTP sysfs read and error handling
crypto: atmel-aes - Fix 3-page memory leak in atmel_aes_buff_cleanup
crypto: atmel-ecc - Release client on allocation failure
crypto: atmel-tdes - fix DMA sync direction
crypto: atmel-sha204a - Fix error codes in OTP reads
crypto: atmel-sha204a - Fix potential UAF and memory leak in remove path
crypto: atmel-sha204a - Fix uninitialized data access on OTP read error
ALSA: aoa: Skip devices with no codecs in i2sbus_resume()
Tobias Gaertner (2):
ntfs3: add buffer boundary checks to run_unpack()
ntfs3: fix integer overflow in run_unpack() volume boundary check
Tvrtko Ursulin (1):
drm/amdgpu: Use vmemdup_array_user in amdgpu_bo_create_list_entry_array
Tyllis Xu (3):
misc: ibmasm: fix OOB MMIO read in ibmasm_handle_mouse_interrupt()
ibmasm: fix OOB reads in command_file_write due to missing size checks
ibmasm: fix heap over-read in ibmasm_send_i2o_message()
Usama Arif (1):
mm: migrate: requeue destination folio on deferred split queue
Vasiliy Kovalev (1):
ext2: reject inodes with zero i_nlink and valid mode in ext2_iget()
Viorel Suman (OSS) (1):
pwm: imx-tpm: Count the number of enabled channels in probe
Weigang He (1):
greybus: gb-beagleplay: fix sleep in atomic context in hdlc_tx_frames()
Wentao Liang (2):
of: unittest: fix use-after-free in of_unittest_changeset()
of: unittest: fix use-after-free in testdrv_probe()
Xiang Mei (1):
net: bonding: fix use-after-free in bond_xmit_broadcast()
Xu Yang (2):
usb: chipidea: otg: not wait vbus drop if use role_switch
usb: chipidea: core: allow ci_irq_handler() handle both ID and VBUS change
Yang Xiuwei (1):
scsi: sd: fix missing put_disk() when device_add(&disk_dev) fails
Yi Cong (1):
wifi: rtl8xxxu: fix potential use of uninitialized value
Yiyang Chen (2):
tools/accounting: handle truncated taskstats netlink messages
taskstats: set version in TGID exit notifications
Yongpeng Yang (1):
f2fs: fix UAF caused by decrementing sbi->nr_pages[] in f2fs_write_end_io()
Yosry Ahmed (11):
KVM: nSVM: Mark all of vmcb02 dirty when restoring nested state
KVM: nSVM: Sync NextRIP to cached vmcb12 after VMRUN of L2
KVM: nSVM: Sync interrupt shadow to cached vmcb12 after VMRUN of L2
KVM: nSVM: Ensure AVIC is inhibited when restoring a vCPU to guest mode
KVM: nSVM: Use vcpu->arch.cr2 when updating vmcb12 on nested #VMEXIT
KVM: nSVM: Always inject a #GP if mapping VMCB12 fails on nested VMRUN
KVM: nSVM: Clear GIF on nested #VMEXIT(INVALID)
KVM: nSVM: Clear EVENTINJ fields in vmcb12 on nested #VMEXIT
KVM: nSVM: Clear tracking of L1->L2 NMI and soft IRQ on nested #VMEXIT
KVM: nSVM: Add missing consistency check for EFER, CR0, CR4, and CS
KVM: nSVM: Add missing consistency check for nCR3 validity
Yuan Zhaoming (1):
net: mctp: fix don't require received header reserved bits to be zero
Yucheng Lu (1):
crypto: authencesn - reject short ahash digests during instance creation
Zhang Yi (1):
jbd2: fix deadlock in jbd2_journal_cancel_revoke()
Zhengchuan Liang (2):
net: caif: clear client service pointer on teardown
net: bridge: use a stable FDB dst snapshot in RCU readers
Zhenzhong Wu (1):
tcp: call sk_data_ready() after listener migration
Ziqing Chen (1):
ALSA: control: Validate buf_len before strnlen() in snd_ctl_elem_init_enum_names()
hkbinbin (1):
RDMA/rxe: Validate pad and ICRC before payload_size() in rxe_rcv
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: Linux 6.12.86
2026-05-07 4:52 Linux 6.12.86 Greg Kroah-Hartman
@ 2026-05-07 4:52 ` Greg Kroah-Hartman
0 siblings, 0 replies; 2+ messages in thread
From: Greg Kroah-Hartman @ 2026-05-07 4:52 UTC (permalink / raw)
To: linux-kernel, akpm, torvalds, stable; +Cc: lwn, jslaby, Greg Kroah-Hartman
diff --git a/Makefile b/Makefile
index a31dcebc60ba..f5053b825039 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 12
-SUBLEVEL = 85
+SUBLEVEL = 86
EXTRAVERSION =
NAME = Baby Opossum Posse
@@ -459,6 +459,7 @@ export rust_common_flags := --edition=2021 \
-Aclippy::needless_lifetimes \
-Wclippy::no_mangle_with_rust_abi \
-Wclippy::undocumented_unsafe_blocks \
+ -Aclippy::uninlined_format_args \
-Wclippy::unnecessary_safety_comment \
-Wclippy::unnecessary_safety_doc \
-Wrustdoc::missing_crate_level_docs \
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi
index 242820845707..cd856c0aba71 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi
@@ -15,6 +15,11 @@
#include "armada-372x.dtsi"
/ {
+ aliases {
+ ethernet0 = ð0;
+ ethernet1 = ð1;
+ };
+
chosen {
stdout-path = "serial0:115200n8";
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
index 86e7f98d430e..7c90a4e488a4 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
@@ -566,16 +566,16 @@ AM62X_IOPAD(0x15c, PIN_INPUT, 0) /* (AB22) MDIO0_MDIO */ /* ETH_1_MDIO, SODIMM
/* On-module eMMC */
pinctrl_sdhci0: main-mmc0-default-pins {
pinctrl-single,pins = <
- AM62X_IOPAD(0x220, PIN_INPUT, 0) /* (Y3) MMC0_CMD */
- AM62X_IOPAD(0x218, PIN_INPUT, 0) /* (AB1) MMC0_CLK */
- AM62X_IOPAD(0x214, PIN_INPUT, 0) /* (AA2) MMC0_DAT0 */
- AM62X_IOPAD(0x210, PIN_INPUT, 0) /* (AA1) MMC0_DAT1 */
- AM62X_IOPAD(0x20c, PIN_INPUT, 0) /* (AA3) MMC0_DAT2 */
- AM62X_IOPAD(0x208, PIN_INPUT, 0) /* (Y4) MMC0_DAT3 */
- AM62X_IOPAD(0x204, PIN_INPUT, 0) /* (AB2) MMC0_DAT4 */
- AM62X_IOPAD(0x200, PIN_INPUT, 0) /* (AC1) MMC0_DAT5 */
- AM62X_IOPAD(0x1fc, PIN_INPUT, 0) /* (AD2) MMC0_DAT6 */
- AM62X_IOPAD(0x1f8, PIN_INPUT, 0) /* (AC2) MMC0_DAT7 */
+ AM62X_IOPAD(0x220, PIN_INPUT, 0) /* (Y3) MMC0_CMD */
+ AM62X_IOPAD(0x218, PIN_INPUT, 0) /* (AB1) MMC0_CLK */
+ AM62X_IOPAD(0x214, PIN_INPUT, 0) /* (AA2) MMC0_DAT0 */
+ AM62X_IOPAD(0x210, PIN_INPUT_PULLUP, 0) /* (AA1) MMC0_DAT1 */
+ AM62X_IOPAD(0x20c, PIN_INPUT_PULLUP, 0) /* (AA3) MMC0_DAT2 */
+ AM62X_IOPAD(0x208, PIN_INPUT_PULLUP, 0) /* (Y4) MMC0_DAT3 */
+ AM62X_IOPAD(0x204, PIN_INPUT_PULLUP, 0) /* (AB2) MMC0_DAT4 */
+ AM62X_IOPAD(0x200, PIN_INPUT_PULLUP, 0) /* (AC1) MMC0_DAT5 */
+ AM62X_IOPAD(0x1fc, PIN_INPUT_PULLUP, 0) /* (AD2) MMC0_DAT6 */
+ AM62X_IOPAD(0x1f8, PIN_INPUT_PULLUP, 0) /* (AC2) MMC0_DAT7 */
>;
};
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index 0e834a2c062c..e793478f37c1 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -838,7 +838,7 @@ AES_FUNC_START(aes_mac_update)
encrypt_block v0, w2, x1, x7, w8
eor v0.16b, v0.16b, v4.16b
cmp w3, wzr
- csinv x5, x6, xzr, eq
+ csinv w5, w6, wzr, eq
cbz w5, .Lmacout
encrypt_block v0, w2, x1, x7, w8
st1 {v0.16b}, [x4] /* return dg */
@@ -852,7 +852,7 @@ AES_FUNC_START(aes_mac_update)
eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */
subs w3, w3, #1
- csinv x5, x6, xzr, eq
+ csinv w5, w6, wzr, eq
cbz w5, .Lmacout
.Lmacenc:
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index ea80e271301e..872227e4fada 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -869,10 +869,14 @@ static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
WARN_ON(!pte_present(pte));
__pte_clear(&init_mm, addr, ptep);
- flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
- if (free_mapped)
+ if (free_mapped) {
+ /* CONT blocks are not supported in the vmemmap */
+ WARN_ON(pte_cont(pte));
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
free_hotplug_page_range(pte_page(pte),
PAGE_SIZE, altmap);
+ }
+ /* unmap_hotplug_range() flushes TLB for !free_mapped */
} while (addr += PAGE_SIZE, addr < end);
}
@@ -893,15 +897,14 @@ static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr,
WARN_ON(!pmd_present(pmd));
if (pmd_sect(pmd)) {
pmd_clear(pmdp);
-
- /*
- * One TLBI should be sufficient here as the PMD_SIZE
- * range is mapped with a single block entry.
- */
- flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
- if (free_mapped)
+ if (free_mapped) {
+ /* CONT blocks are not supported in the vmemmap */
+ WARN_ON(pmd_cont(pmd));
+ flush_tlb_kernel_range(addr, addr + PMD_SIZE);
free_hotplug_page_range(pmd_page(pmd),
PMD_SIZE, altmap);
+ }
+ /* unmap_hotplug_range() flushes TLB for !free_mapped */
continue;
}
WARN_ON(!pmd_table(pmd));
@@ -926,15 +929,12 @@ static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr,
WARN_ON(!pud_present(pud));
if (pud_sect(pud)) {
pud_clear(pudp);
-
- /*
- * One TLBI should be sufficient here as the PUD_SIZE
- * range is mapped with a single block entry.
- */
- flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
- if (free_mapped)
+ if (free_mapped) {
+ flush_tlb_kernel_range(addr, addr + PUD_SIZE);
free_hotplug_page_range(pud_page(pud),
PUD_SIZE, altmap);
+ }
+ /* unmap_hotplug_range() flushes TLB for !free_mapped */
continue;
}
WARN_ON(!pud_table(pud));
@@ -964,6 +964,7 @@ static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr,
static void unmap_hotplug_range(unsigned long addr, unsigned long end,
bool free_mapped, struct vmem_altmap *altmap)
{
+ unsigned long start = addr;
unsigned long next;
pgd_t *pgdp, pgd;
@@ -985,6 +986,9 @@ static void unmap_hotplug_range(unsigned long addr, unsigned long end,
WARN_ON(!pgd_present(pgd));
unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap);
} while (addr = next, addr < end);
+
+ if (!free_mapped)
+ flush_tlb_kernel_range(start, end);
}
static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr,
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
index cbce099037b2..9ad4cd7e6b4d 100644
--- a/arch/loongarch/kernel/cpu-probe.c
+++ b/arch/loongarch/kernel/cpu-probe.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
+#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/export.h>
@@ -352,3 +353,9 @@ void cpu_probe(void)
cpu_report();
}
+
+ssize_t cpu_show_spectre_v1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Mitigation: __user pointer sanitization\n");
+}
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
index 168bd97540f8..d0257935078e 100644
--- a/arch/loongarch/kernel/syscall.c
+++ b/arch/loongarch/kernel/syscall.c
@@ -9,6 +9,7 @@
#include <linux/entry-common.h>
#include <linux/errno.h>
#include <linux/linkage.h>
+#include <linux/nospec.h>
#include <linux/objtool.h>
#include <linux/randomize_kstack.h>
#include <linux/syscalls.h>
@@ -61,7 +62,7 @@ void noinstr __no_stack_protector do_syscall(struct pt_regs *regs)
add_random_kstack_offset();
if (nr < NR_syscalls) {
- syscall_fn = sys_call_table[nr];
+ syscall_fn = sys_call_table[array_index_nospec(nr, NR_syscalls)];
regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6],
regs->regs[7], regs->regs[8], regs->regs[9]);
}
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index 66dc406b12e4..99054f90b19a 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -154,7 +154,7 @@
# 137 was afs_syscall
138 common setfsuid sys_setfsuid
139 common setfsgid sys_setfsgid
-140 common _llseek sys_llseek
+140 32 _llseek sys_llseek
141 common getdents sys_getdents compat_sys_getdents
142 common _newselect sys_select compat_sys_select
143 common flock sys_flock
diff --git a/arch/um/drivers/cow_user.c b/arch/um/drivers/cow_user.c
index 29b46581ddd1..dc1d1bcd85ec 100644
--- a/arch/um/drivers/cow_user.c
+++ b/arch/um/drivers/cow_user.c
@@ -15,6 +15,12 @@
#include "cow.h"
#include "cow_sys.h"
+/*
+ * arch/um/Makefile remaps strrchr to kernel_strrchr; call the kernel
+ * name directly to avoid glibc >= 2.43's C23 strrchr macro.
+ */
+char *kernel_strrchr(const char *, int);
+
#define PATH_LEN_V1 256
/* unsigned time_t works until year 2106 */
@@ -153,7 +159,7 @@ static int absolutize(char *to, int size, char *from)
errno);
return -1;
}
- slash = strrchr(from, '/');
+ slash = kernel_strrchr(from, '/');
if (slash != NULL) {
*slash = '\0';
if (chdir(from)) {
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index 913bfc96959c..8d1ae8277038 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -304,14 +304,6 @@ static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
{
return false;
}
-static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
-{
- return false;
-}
-static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
-{
- return false;
-}
static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
{
return 0;
diff --git a/arch/x86/kvm/svm/hyperv.h b/arch/x86/kvm/svm/hyperv.h
index d3f8bfc05832..f70d076911a6 100644
--- a/arch/x86/kvm/svm/hyperv.h
+++ b/arch/x86/kvm/svm/hyperv.h
@@ -41,10 +41,17 @@ static inline bool nested_svm_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
return hv_vcpu->vp_assist_page.nested_control.features.directhypercall;
}
+static inline bool nested_svm_is_l2_tlb_flush_hcall(struct kvm_vcpu *vcpu)
+{
+ return guest_hv_cpuid_has_l2_tlb_flush(vcpu) &&
+ nested_svm_l2_tlb_flush_enabled(vcpu) &&
+ kvm_hv_is_tlb_flush_hcall(vcpu);
+}
+
void svm_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu);
#else /* CONFIG_KVM_HYPERV */
static inline void nested_svm_hv_update_vm_vp_ids(struct kvm_vcpu *vcpu) {}
-static inline bool nested_svm_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
+static inline bool nested_svm_is_l2_tlb_flush_hcall(struct kvm_vcpu *vcpu)
{
return false;
}
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 862758eeac84..70b0b8322ad0 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -129,11 +129,13 @@ void recalc_intercepts(struct vcpu_svm *svm)
struct vmcb_ctrl_area_cached *g;
unsigned int i;
- vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+ vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_INTERCEPTS);
if (!is_guest_mode(&svm->vcpu))
return;
+ vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+
c = &svm->vmcb->control;
h = &svm->vmcb01.ptr->control;
g = &svm->nested.ctl;
@@ -157,13 +159,6 @@ void recalc_intercepts(struct vcpu_svm *svm)
vmcb_clr_intercept(c, INTERCEPT_VINTR);
}
- /*
- * We want to see VMMCALLs from a nested guest only when Hyper-V L2 TLB
- * flush feature is enabled.
- */
- if (!nested_svm_l2_tlb_flush_enabled(&svm->vcpu))
- vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
-
for (i = 0; i < MAX_INTERCEPT; i++)
c->intercepts[i] |= g->intercepts[i];
@@ -269,6 +264,10 @@ static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
return false;
+ if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
+ !kvm_vcpu_is_legal_gpa(vcpu, control->nested_cr3)))
+ return false;
+
if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
MSRPM_SIZE)))
return false;
@@ -308,6 +307,10 @@ static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
CC(!(save->cr0 & X86_CR0_PE)) ||
CC(!kvm_vcpu_is_legal_cr3(vcpu, save->cr3)))
return false;
+
+ if (CC((save->cs.attrib & SVM_SELECTOR_L_MASK) &&
+ (save->cs.attrib & SVM_SELECTOR_DB_MASK)))
+ return false;
}
/* Note, SVM doesn't have any additional restrictions on CR4. */
@@ -396,6 +399,8 @@ static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to,
* Copy only fields that are validated, as we need them
* to avoid TOC/TOU races.
*/
+ to->cs = from->cs;
+
to->efer = from->efer;
to->cr0 = from->cr0;
to->cr3 = from->cr3;
@@ -420,6 +425,7 @@ void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
u32 mask;
svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
+ svm->nested.ctl.int_state = svm->vmcb->control.int_state;
/* Only a few fields of int_ctl are written by the processor. */
mask = V_IRQ_MASK | V_TPR_MASK;
@@ -862,12 +868,9 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
}
vmcb12_gpa = svm->vmcb->save.rax;
- ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
- if (ret == -EINVAL) {
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map)) {
kvm_inject_gp(vcpu, 0);
return 1;
- } else if (ret) {
- return kvm_skip_emulated_instruction(vcpu);
}
ret = kvm_skip_emulated_instruction(vcpu);
@@ -886,6 +889,9 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
vmcb12->control.exit_code_hi = -1u;
vmcb12->control.exit_info_1 = 0;
vmcb12->control.exit_info_2 = 0;
+ vmcb12->control.event_inj = 0;
+ vmcb12->control.event_inj_err = 0;
+ svm_set_gif(svm, false);
goto out;
}
@@ -912,8 +918,6 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
out_exit_err:
svm->nested.nested_run_pending = 0;
- svm->nmi_l1_to_l2 = false;
- svm->soft_int_injected = false;
svm->vmcb->control.exit_code = SVM_EXIT_ERR;
svm->vmcb->control.exit_code_hi = -1u;
@@ -1004,7 +1008,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb12->save.efer = svm->vcpu.arch.efer;
vmcb12->save.cr0 = kvm_read_cr0(vcpu);
vmcb12->save.cr3 = kvm_read_cr3(vcpu);
- vmcb12->save.cr2 = vmcb02->save.cr2;
+ vmcb12->save.cr2 = vcpu->arch.cr2;
vmcb12->save.cr4 = svm->vcpu.arch.cr4;
vmcb12->save.rflags = kvm_get_rflags(vcpu);
vmcb12->save.rip = kvm_rip_read(vcpu);
@@ -1026,9 +1030,9 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
vmcb12->control.next_rip = vmcb02->control.next_rip;
+ vmcb12->control.event_inj = 0;
+ vmcb12->control.event_inj_err = 0;
vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
- vmcb12->control.event_inj = svm->nested.ctl.event_inj;
- vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
if (!kvm_pause_in_guest(vcpu->kvm)) {
vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
@@ -1153,6 +1157,10 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
+ /* Drop tracking for L1->L2 injected NMIs and soft IRQs */
+ svm->nmi_l1_to_l2 = false;
+ svm->soft_int_injected = false;
+
/*
* Un-inhibit the AVIC right away, so that other vCPUs can start
* to benefit from it right away.
@@ -1517,9 +1525,7 @@ int nested_svm_exit_special(struct vcpu_svm *svm)
}
case SVM_EXIT_VMMCALL:
/* Hyper-V L2 TLB flush hypercall is handled by L0 */
- if (guest_hv_cpuid_has_l2_tlb_flush(vcpu) &&
- nested_svm_l2_tlb_flush_enabled(vcpu) &&
- kvm_hv_is_tlb_flush_hcall(vcpu))
+ if (nested_svm_is_l2_tlb_flush_hcall(vcpu))
return NESTED_EXIT_HOST;
break;
default:
@@ -1754,6 +1760,12 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
svm_switch_vmcb(svm, &svm->nested.vmcb02);
nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base);
+ /*
+ * Any previously restored state (e.g. KVM_SET_SREGS) would mark fields
+ * dirty in vmcb01 instead of vmcb02, so mark all of vmcb02 dirty here.
+ */
+ vmcb_mark_all_dirty(svm->vmcb);
+
/*
* While the nested guest CR3 is already checked and set by
* KVM_SET_SREGS, it was set when nested state was yet loaded,
@@ -1767,6 +1779,9 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
svm->nested.force_msr_bitmap_recalc = true;
+ if (kvm_vcpu_apicv_active(vcpu))
+ kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
+
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
ret = 0;
out_free:
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index fb9e62a167b8..6ca9bd96c34e 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -49,6 +49,7 @@
#include "svm.h"
#include "svm_ops.h"
+#include "hyperv.h"
#include "kvm_onhyperv.h"
#include "svm_onhyperv.h"
@@ -2555,6 +2556,9 @@ static int invlpga_interception(struct kvm_vcpu *vcpu)
gva_t gva = kvm_rax_read(vcpu);
u32 asid = kvm_rcx_read(vcpu);
+ if (nested_svm_check_permissions(vcpu))
+ return 1;
+
/* FIXME: Handle an address size prefix. */
if (!is_long_mode(vcpu))
gva = (u32)gva;
@@ -3374,6 +3378,22 @@ static int invpcid_interception(struct kvm_vcpu *vcpu)
return kvm_handle_invpcid(vcpu, type, gva);
}
+static int vmmcall_interception(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Inject a #UD if L2 is active and the VMMCALL isn't a Hyper-V TLB
+ * hypercall, as VMMCALL #UDs if it's not intercepted, and this path is
+ * reachable if and only if L1 doesn't want to intercept VMMCALL or has
+ * enabled L0 (KVM) handling of Hyper-V L2 TLB flush hypercalls.
+ */
+ if (is_guest_mode(vcpu) && !nested_svm_is_l2_tlb_flush_hcall(vcpu)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ return kvm_emulate_hypercall(vcpu);
+}
+
static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[SVM_EXIT_READ_CR0] = cr_interception,
[SVM_EXIT_READ_CR3] = cr_interception,
@@ -3424,7 +3444,7 @@ static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[SVM_EXIT_TASK_SWITCH] = task_switch_interception,
[SVM_EXIT_SHUTDOWN] = shutdown_interception,
[SVM_EXIT_VMRUN] = vmrun_interception,
- [SVM_EXIT_VMMCALL] = kvm_emulate_hypercall,
+ [SVM_EXIT_VMMCALL] = vmmcall_interception,
[SVM_EXIT_VMLOAD] = vmload_interception,
[SVM_EXIT_VMSAVE] = vmsave_interception,
[SVM_EXIT_STGI] = stgi_interception,
@@ -4445,6 +4465,16 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
svm_complete_interrupts(vcpu);
+ /*
+ * Update the cache after completing interrupts to get an accurate
+ * NextRIP, e.g. when re-injecting a soft interrupt.
+ *
+ * FIXME: Rework svm_get_nested_state() to not pull data from the
+ * cache (except for maybe int_ctl).
+ */
+ if (is_guest_mode(vcpu))
+ svm->nested.ctl.next_rip = svm->vmcb->control.next_rip;
+
return svm_exit_handlers_fastpath(vcpu);
}
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index cfb43f8b0c75..cf3be6355388 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -136,6 +136,7 @@ struct kvm_vmcb_info {
};
struct vmcb_save_area_cached {
+ struct vmcb_seg cs;
u64 efer;
u64 cr4;
u64 cr3;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e44b5f7a33a5..a1ee8bd3ca15 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -875,9 +875,6 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
vcpu->arch.exception.error_code = error_code;
vcpu->arch.exception.has_payload = has_payload;
vcpu->arch.exception.payload = payload;
- if (!is_guest_mode(vcpu))
- kvm_deliver_exception_payload(vcpu,
- &vcpu->arch.exception);
return;
}
@@ -5328,18 +5325,8 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
return 0;
}
-static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
- struct kvm_vcpu_events *events)
+static struct kvm_queued_exception *kvm_get_exception_to_save(struct kvm_vcpu *vcpu)
{
- struct kvm_queued_exception *ex;
-
- process_nmi(vcpu);
-
-#ifdef CONFIG_KVM_SMM
- if (kvm_check_request(KVM_REQ_SMI, vcpu))
- process_smi(vcpu);
-#endif
-
/*
* KVM's ABI only allows for one exception to be migrated. Luckily,
* the only time there can be two queued exceptions is if there's a
@@ -5350,21 +5337,46 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
if (vcpu->arch.exception_vmexit.pending &&
!vcpu->arch.exception.pending &&
!vcpu->arch.exception.injected)
- ex = &vcpu->arch.exception_vmexit;
- else
- ex = &vcpu->arch.exception;
+ return &vcpu->arch.exception_vmexit;
+
+ return &vcpu->arch.exception;
+}
+
+static void kvm_handle_exception_payload_quirk(struct kvm_vcpu *vcpu)
+{
+ struct kvm_queued_exception *ex = kvm_get_exception_to_save(vcpu);
/*
- * In guest mode, payload delivery should be deferred if the exception
- * will be intercepted by L1, e.g. KVM should not modifying CR2 if L1
- * intercepts #PF, ditto for DR6 and #DBs. If the per-VM capability,
- * KVM_CAP_EXCEPTION_PAYLOAD, is not set, userspace may or may not
- * propagate the payload and so it cannot be safely deferred. Deliver
- * the payload if the capability hasn't been requested.
+ * If KVM_CAP_EXCEPTION_PAYLOAD is disabled, then (prematurely) deliver
+ * the pending exception payload when userspace saves *any* vCPU state
+ * that interacts with exception payloads to avoid breaking userspace.
+ *
+ * Architecturally, KVM must not deliver an exception payload until the
+ * exception is actually injected, e.g. to avoid losing pending #DB
+ * information (which VMX tracks in the VMCS), and to avoid clobbering
+ * state if the exception is never injected for whatever reason. But
+ * if KVM_CAP_EXCEPTION_PAYLOAD isn't enabled, then userspace may or
+ * may not propagate the payload across save+restore, and so KVM can't
+ * safely defer delivery of the payload.
*/
if (!vcpu->kvm->arch.exception_payload_enabled &&
ex->pending && ex->has_payload)
kvm_deliver_exception_payload(vcpu, ex);
+}
+
+static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
+{
+ struct kvm_queued_exception *ex = kvm_get_exception_to_save(vcpu);
+
+ process_nmi(vcpu);
+
+#ifdef CONFIG_KVM_SMM
+ if (kvm_check_request(KVM_REQ_SMI, vcpu))
+ process_smi(vcpu);
+#endif
+
+ kvm_handle_exception_payload_quirk(vcpu);
memset(events, 0, sizeof(*events));
@@ -5549,6 +5561,8 @@ static int kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
vcpu->arch.guest_state_protected)
return -EINVAL;
+ kvm_handle_exception_payload_quirk(vcpu);
+
memset(dbgregs, 0, sizeof(*dbgregs));
BUILD_BUG_ON(ARRAY_SIZE(vcpu->arch.db) != ARRAY_SIZE(dbgregs->db));
@@ -11782,6 +11796,8 @@ static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
if (vcpu->arch.guest_state_protected)
goto skip_protected_regs;
+ kvm_handle_exception_payload_quirk(vcpu);
+
kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 5f0d579932c6..2b6e32c5cea4 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -412,6 +412,11 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
BUG();
}
}
+
+ if (cpu_feature_enabled(X86_FEATURE_FRED)) {
+ pr_debug("PTI enabled, disabling FRED\n");
+ setup_clear_cpu_cap(X86_FEATURE_FRED);
+ }
}
#ifdef CONFIG_X86_64
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 456026c4a3c9..6641ecbf6967 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -167,6 +167,8 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1];
bool same_page = false;
+ if (!zone_device_pages_compatible(bv->bv_page, page))
+ return 0;
if (bvec_try_merge_hw_page(q, bv, page, len, offset,
&same_page)) {
bip->bip_iter.bi_size += len;
diff --git a/block/bio.c b/block/bio.c
index b919f3fa2f2d..a081e2ddf9cf 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1155,11 +1155,15 @@ int bio_add_page(struct bio *bio, struct page *page,
if (bio->bi_iter.bi_size > UINT_MAX - len)
return 0;
- if (bio->bi_vcnt > 0 &&
- bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
- page, len, offset, &same_page)) {
- bio->bi_iter.bi_size += len;
- return len;
+ if (bio->bi_vcnt > 0) {
+ struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
+
+ if (!zone_device_pages_compatible(bv->bv_page, page))
+ return 0;
+ if (bvec_try_merge_page(bv, page, len, offset, &same_page)) {
+ bio->bi_iter.bi_size += len;
+ return len;
+ }
}
if (bio->bi_vcnt >= bio->bi_max_vecs)
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 7e04ed9b2c0b..f63070f0e440 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -811,13 +811,17 @@ static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk,
struct blk_zone_wplug *zwplug)
{
/*
- * Take a reference on the zone write plug and schedule the submission
- * of the next plugged BIO. blk_zone_wplug_bio_work() will release the
- * reference we take here.
+ * Schedule the submission of the next plugged BIO. Taking a reference
+ * to the zone write plug is required as the bio_work belongs to the
+ * plug, and thus we must ensure that the write plug does not go away
+ * while the work is being scheduled but has not run yet.
+ * blk_zone_wplug_bio_work() will release the reference we take here,
+ * and we also drop this reference if the work is already scheduled.
*/
WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED));
refcount_inc(&zwplug->ref);
- queue_work(disk->zone_wplugs_wq, &zwplug->bio_work);
+ if (!queue_work(disk->zone_wplugs_wq, &zwplug->bio_work))
+ disk_put_zone_wplug(zwplug);
}
static inline void disk_zone_wplug_add_bio(struct gendisk *disk,
diff --git a/block/blk.h b/block/blk.h
index e7d7c5c63652..8af4f7101c8a 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -124,6 +124,25 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
return true;
}
+/*
+ * Check if two pages from potentially different zone device pgmaps can
+ * coexist as separate bvec entries in the same bio.
+ *
+ * The block DMA iterator (blk_dma_map_iter_start) caches the P2PDMA mapping
+ * state from the first segment and applies it to all subsequent segments, so
+ * P2PDMA pages from different pgmaps must not be mixed in the same bio.
+ *
+ * Other zone device types (FS_DAX, GENERIC) use the same dma_map_phys() path
+ * as normal RAM. PRIVATE and COHERENT pages never appear in bios.
+ */
+static inline bool zone_device_pages_compatible(const struct page *a,
+ const struct page *b)
+{
+ if (is_pci_p2pdma_page(a) || is_pci_p2pdma_page(b))
+ return zone_device_pages_have_same_pgmap(a, b);
+ return true;
+}
+
static inline bool __bvec_gap_to_prev(const struct queue_limits *lim,
struct bio_vec *bprv, unsigned int offset)
{
diff --git a/certs/extract-cert.c b/certs/extract-cert.c
index 7d6d468ed612..54ecd1024274 100644
--- a/certs/extract-cert.c
+++ b/certs/extract-cert.c
@@ -43,7 +43,9 @@ void format(void)
exit(2);
}
+#ifdef USE_PKCS11_ENGINE
static const char *key_pass;
+#endif
static BIO *wb;
static char *cert_dst;
static bool verbose;
@@ -135,7 +137,9 @@ int main(int argc, char **argv)
if (verbose_env && strchr(verbose_env, '1'))
verbose = true;
- key_pass = getenv("KBUILD_SIGN_PIN");
+#ifdef USE_PKCS11_ENGINE
+ key_pass = getenv("KBUILD_SIGN_PIN");
+#endif
if (argc != 3)
format();
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index c01cc3087919..ac9eab13a3cb 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -390,6 +390,11 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
auth = crypto_spawn_ahash_alg(&ctx->auth);
auth_base = &auth->base;
+ if (auth->digestsize > 0 && auth->digestsize < 4) {
+ err = -EINVAL;
+ goto err_free_inst;
+ }
+
err = crypto_grab_skcipher(&ctx->enc, aead_crypto_instance(inst),
crypto_attr_alg_name(tb[2]), 0, mask);
if (err)
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 7fc79e7dce44..71a0c74eb634 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -69,6 +69,9 @@ static void pcrypt_aead_done(void *data, int err)
struct pcrypt_request *preq = aead_request_ctx(req);
struct padata_priv *padata = pcrypt_request_padata(preq);
+ if (err == -EINPROGRESS)
+ return;
+
padata->info = err;
padata_do_serial(padata);
@@ -82,7 +85,7 @@ static void pcrypt_aead_enc(struct padata_priv *padata)
ret = crypto_aead_encrypt(req);
- if (ret == -EINPROGRESS)
+ if (ret == -EINPROGRESS || ret == -EBUSY)
return;
padata->info = ret;
@@ -133,7 +136,7 @@ static void pcrypt_aead_dec(struct padata_priv *padata)
ret = crypto_aead_decrypt(req);
- if (ret == -EINPROGRESS)
+ if (ret == -EINPROGRESS || ret == -EBUSY)
return;
padata->info = ret;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 09139e265c9b..d233a245e19b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -182,7 +182,7 @@ void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
if (fwnode->dev)
return;
- fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
+ fwnode_set_flag(fwnode, FWNODE_FLAG_NOT_DEVICE);
fwnode_links_purge_consumers(fwnode);
fwnode_for_each_available_child_node(fwnode, child)
@@ -228,7 +228,7 @@ static void __fw_devlink_pickup_dangling_consumers(struct fwnode_handle *fwnode,
if (fwnode->dev && fwnode->dev->bus)
return;
- fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
+ fwnode_set_flag(fwnode, FWNODE_FLAG_NOT_DEVICE);
__fwnode_links_move_consumers(fwnode, new_sup);
fwnode_for_each_available_child_node(fwnode, child)
@@ -1013,7 +1013,7 @@ static void device_links_missing_supplier(struct device *dev)
static bool dev_is_best_effort(struct device *dev)
{
return (fw_devlink_best_effort && dev->can_match) ||
- (dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT));
+ (dev->fwnode && fwnode_test_flag(dev->fwnode, FWNODE_FLAG_BEST_EFFORT));
}
static struct fwnode_handle *fwnode_links_check_suppliers(
@@ -1724,11 +1724,11 @@ bool fw_devlink_is_strict(void)
static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode)
{
- if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED)
+ if (fwnode_test_flag(fwnode, FWNODE_FLAG_LINKS_ADDED))
return;
fwnode_call_int_op(fwnode, add_links);
- fwnode->flags |= FWNODE_FLAG_LINKS_ADDED;
+ fwnode_set_flag(fwnode, FWNODE_FLAG_LINKS_ADDED);
}
static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
@@ -1888,7 +1888,7 @@ static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
struct device *dev;
bool ret;
- if (!(fwnode->flags & FWNODE_FLAG_INITIALIZED))
+ if (!fwnode_test_flag(fwnode, FWNODE_FLAG_INITIALIZED))
return false;
dev = get_dev_from_fwnode(fwnode);
@@ -2004,10 +2004,10 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
* We aren't trying to find all cycles. Just a cycle between con and
* sup_handle.
*/
- if (sup_handle->flags & FWNODE_FLAG_VISITED)
+ if (fwnode_test_flag(sup_handle, FWNODE_FLAG_VISITED))
return false;
- sup_handle->flags |= FWNODE_FLAG_VISITED;
+ fwnode_set_flag(sup_handle, FWNODE_FLAG_VISITED);
/* Termination condition. */
if (sup_handle == con_handle) {
@@ -2077,7 +2077,7 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
}
out:
- sup_handle->flags &= ~FWNODE_FLAG_VISITED;
+ fwnode_clear_flag(sup_handle, FWNODE_FLAG_VISITED);
put_device(sup_dev);
put_device(con_dev);
put_device(par_dev);
@@ -2130,7 +2130,7 @@ static int fw_devlink_create_devlink(struct device *con,
* When such a flag is set, we can't create device links where P is the
* supplier of C as that would delay the probe of C.
*/
- if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD &&
+ if (fwnode_test_flag(sup_handle, FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD) &&
fwnode_is_ancestor_of(sup_handle, con->fwnode))
return -EINVAL;
@@ -2153,7 +2153,7 @@ static int fw_devlink_create_devlink(struct device *con,
else
flags = FW_DEVLINK_FLAGS_PERMISSIVE;
- if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE)
+ if (fwnode_test_flag(sup_handle, FWNODE_FLAG_NOT_DEVICE))
sup_dev = fwnode_get_next_parent_dev(sup_handle);
else
sup_dev = get_dev_from_fwnode(sup_handle);
@@ -2165,7 +2165,7 @@ static int fw_devlink_create_devlink(struct device *con,
* supplier device indefinitely.
*/
if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
- sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
+ fwnode_test_flag(sup_handle, FWNODE_FLAG_INITIALIZED)) {
dev_dbg(con,
"Not linking %pfwf - dev might never probe\n",
sup_handle);
@@ -3691,6 +3691,21 @@ int device_add(struct device *dev)
fw_devlink_link_device(dev);
}
+ /*
+ * The moment the device was linked into the bus's "klist_devices" in
+ * bus_add_device() then it's possible that probe could have been
+ * attempted in a different thread via userspace loading a driver
+ * matching the device. "ready_to_probe" being unset would have
+ * blocked those attempts. Now that all of the above initialization has
+ * happened, unblock probe. If probe happens through another thread
+ * after this point but before bus_probe_device() runs then it's fine.
+ * bus_probe_device() -> device_initial_probe() -> __device_attach()
+ * will notice (under device_lock) that the device is already bound.
+ */
+ device_lock(dev);
+ dev_set_ready_to_probe(dev);
+ device_unlock(dev);
+
bus_probe_device(dev);
/*
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 70d6ded3dd0a..7524555ff6cf 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -844,6 +844,26 @@ static int __driver_probe_device(const struct device_driver *drv, struct device
if (dev->driver)
return -EBUSY;
+ /*
+ * In device_add(), the "struct device" gets linked into the subsystem's
+ * list of devices and broadcast to userspace (via uevent) before we're
+ * quite ready to probe. Those open pathways to driver probe before
+ * we've finished enough of device_add() to reliably support probe.
+ * Detect this and tell other pathways to try again later. device_add()
+ * itself will also try to probe immediately after setting
+ * "ready_to_probe".
+ */
+ if (!dev_ready_to_probe(dev))
+ return dev_err_probe(dev, -EPROBE_DEFER, "Device not ready to probe\n");
+
+ /*
+ * Set can_match = true after calling dev_ready_to_probe(), so
+ * driver_deferred_probe_add() won't actually add the device to the
+ * deferred probe list when dev_ready_to_probe() returns false.
+ *
+ * When dev_ready_to_probe() returns false, it means that device_add()
+ * will do another probe() attempt for us.
+ */
dev->can_match = true;
dev_dbg(dev, "bus: '%s': %s: matched device with driver %s\n",
drv->bus->name, __func__, drv->name);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 28e60fc7e2dc..9f9e4e0fc95d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -7166,7 +7166,7 @@ static ssize_t do_rbd_add(const char *buf, size_t count)
rc = device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
if (rc)
- goto err_out_cleanup_disk;
+ goto err_out_device;
spin_lock(&rbd_dev_list_lock);
list_add_tail(&rbd_dev->node, &rbd_dev_list);
@@ -7180,8 +7180,8 @@ static ssize_t do_rbd_add(const char *buf, size_t count)
module_put(THIS_MODULE);
return rc;
-err_out_cleanup_disk:
- rbd_free_disk(rbd_dev);
+err_out_device:
+ device_del(&rbd_dev->dev);
err_out_image_lock:
rbd_dev_image_unlock(rbd_dev);
rbd_dev_device_release(rbd_dev);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 76b326ddd75c..cbb613f7968b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -2017,7 +2017,7 @@ static void zram_bio_discard(struct zram *zram, struct bio *bio)
*/
if (offset) {
if (n <= (PAGE_SIZE - offset))
- return;
+ goto end_bio;
n -= (PAGE_SIZE - offset);
index++;
@@ -2032,6 +2032,7 @@ static void zram_bio_discard(struct zram *zram, struct bio *bio)
n -= PAGE_SIZE;
}
+end_bio:
bio_endio(bio);
}
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 83d623d97f5f..f735e0462c55 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -332,7 +332,7 @@ static int of_weim_notify(struct notifier_block *nb, unsigned long action,
* fw_devlink doesn't skip adding consumers to this
* device.
*/
- rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
+ fwnode_clear_flag(&rd->dn->fwnode, FWNODE_FLAG_NOT_DEVICE);
if (!of_platform_device_create(rd->dn, NULL, &pdev->dev)) {
dev_err(&pdev->dev,
"Failed to create child device '%pOF'\n",
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 73889a7dcc13..833d0c940477 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -1235,7 +1235,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_unregister;
}
- err = mhi_sync_power_up(mhi_cntrl);
+ err = mhi_async_power_up(mhi_cntrl);
if (err) {
dev_err(&pdev->dev, "failed to power up MHI controller\n");
goto err_unprepare;
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index dfeb28866a32..192063a20043 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -283,7 +283,7 @@ static void tpm_dev_release(struct device *dev)
kfree(chip->work_space.context_buf);
kfree(chip->work_space.session_buf);
#ifdef CONFIG_TCG_TPM2_HMAC
- kfree(chip->auth);
+ kfree_sensitive(chip->auth);
#endif
kfree(chip);
}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index d7aabb66a4d1..c710128f49b1 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -338,10 +338,8 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
NULL, 0);
tpm_buf_append_u16(&buf, num_bytes);
err = tpm_buf_fill_hmac_session(chip, &buf);
- if (err) {
- tpm_buf_destroy(&buf);
- return err;
- }
+ if (err)
+ goto out;
err = tpm_transmit_cmd(chip, &buf,
offsetof(struct tpm2_get_random_out,
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 59e992dc65c4..78c6a21bc92b 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -472,6 +472,8 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
status = tpm_tis_status(chip);
if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
rc = -EIO;
+ dev_err(&chip->dev, "TPM_STS_DATA_EXPECT should be set. sts = 0x%08x\n",
+ status);
goto out_err;
}
}
@@ -492,6 +494,8 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
status = tpm_tis_status(chip);
if (!itpm && (status & TPM_STS_DATA_EXPECT) != 0) {
rc = -EIO;
+ dev_err(&chip->dev, "TPM_STS_DATA_EXPECT should be unset. sts = 0x%08x\n",
+ status);
goto out_err;
}
@@ -553,11 +557,16 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
break;
else if (rc != -EAGAIN && rc != -EIO)
/* Data transfer failed, not recoverable */
- return rc;
+ goto out_err;
usleep_range(priv->timeout_min, priv->timeout_max);
}
+ if (rc == -EAGAIN || rc == -EIO) {
+ dev_err(&chip->dev, "Exhausted %d tpm_tis_send_data retries\n", TPM_RETRY);
+ goto out_err;
+ }
+
/* go and do it */
rc = tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_GO);
if (rc < 0)
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 0dd90785db9a..5f53936eb905 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2130,7 +2130,7 @@ static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
{
- free_page((unsigned long)dd->buf);
+ free_pages((unsigned long)dd->buf, ATMEL_AES_BUFFER_ORDER);
}
static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 590ea984c622..813d8517e1c3 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -261,6 +261,7 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
if (IS_ERR(fallback)) {
dev_err(&ctx->client->dev, "Failed to allocate transformation for '%s': %ld\n",
alg, PTR_ERR(fallback));
+ atmel_ecc_i2c_client_free(ctx->client);
return PTR_ERR(fallback);
}
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index a895e4289efa..a85dfdc6b360 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -72,8 +72,8 @@ EXPORT_SYMBOL(atmel_i2c_init_read_config_cmd);
int atmel_i2c_init_read_otp_cmd(struct atmel_i2c_cmd *cmd, u16 addr)
{
- if (addr < 0 || addr > OTP_ZONE_SIZE)
- return -1;
+ if (addr >= OTP_ZONE_SIZE / 4)
+ return -EINVAL;
cmd->word_addr = COMMAND;
cmd->opcode = OPCODE_READ;
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index 63a3f5042a48..1add8dcd306f 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include "atmel-i2c.h"
@@ -95,19 +96,24 @@ static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max,
static int atmel_sha204a_otp_read(struct i2c_client *client, u16 addr, u8 *otp)
{
struct atmel_i2c_cmd cmd;
- int ret = -1;
+ int ret;
- if (atmel_i2c_init_read_otp_cmd(&cmd, addr) < 0) {
+ ret = atmel_i2c_init_read_otp_cmd(&cmd, addr);
+ if (ret < 0) {
dev_err(&client->dev, "failed, invalid otp address %04X\n",
addr);
return ret;
}
ret = atmel_i2c_send_receive(client, &cmd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to read otp at %04X\n", addr);
+ return ret;
+ }
if (cmd.data[0] == 0xff) {
dev_err(&client->dev, "failed, device not ready\n");
- return -EINVAL;
+ return -EIO;
}
memcpy(otp, cmd.data+1, 4);
@@ -120,21 +126,22 @@ static ssize_t otp_show(struct device *dev,
{
u16 addr;
u8 otp[OTP_ZONE_SIZE];
- char *str = buf;
struct i2c_client *client = to_i2c_client(dev);
- int i;
+ ssize_t len = 0;
+ int i, ret;
- for (addr = 0; addr < OTP_ZONE_SIZE/4; addr++) {
- if (atmel_sha204a_otp_read(client, addr, otp + addr * 4) < 0) {
+ for (addr = 0; addr < OTP_ZONE_SIZE / 4; addr++) {
+ ret = atmel_sha204a_otp_read(client, addr, otp + addr * 4);
+ if (ret < 0) {
dev_err(dev, "failed to read otp zone\n");
- break;
+ return ret;
}
}
- for (i = 0; i < addr*2; i++)
- str += sprintf(str, "%02X", otp[i]);
- str += sprintf(str, "\n");
- return str - buf;
+ for (i = 0; i < OTP_ZONE_SIZE; i++)
+ len += sysfs_emit_at(buf, len, "%02X", otp[i]);
+ len += sysfs_emit_at(buf, len, "\n");
+ return len;
}
static DEVICE_ATTR_RO(otp);
@@ -191,10 +198,8 @@ static void atmel_sha204a_remove(struct i2c_client *client)
{
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
- if (atomic_read(&i2c_priv->tfm_count)) {
- dev_emerg(&client->dev, "Device is busy, will remove it anyhow\n");
- return;
- }
+ devm_hwrng_unregister(&client->dev, &i2c_priv->hwrng);
+ atmel_i2c_flush_queue();
sysfs_remove_group(&client->dev.kobj, &atmel_sha204a_groups);
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index d15b2e943447..813ed47e60cc 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -294,8 +294,8 @@ static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
} else {
- dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
- dd->dma_size, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
+ dd->dma_size, DMA_FROM_DEVICE);
/* copy data */
count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
@@ -619,8 +619,8 @@ static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
} else {
- dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
- dd->dma_size, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
+ dd->dma_size, DMA_FROM_DEVICE);
/* copy data */
count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index f418162932fe..ef9bde93a695 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -1448,6 +1448,7 @@ static int cc_mac_digest(struct ahash_request *req)
if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
req->nbytes, 1, flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_result(dev, state, digestsize, req->result);
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index 1189effcdad0..512190b31b99 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -844,7 +844,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
if (crypto_skcipher_ivsize(atfm))
dma_unmap_single(info->dev, sec_req->dma_iv,
crypto_skcipher_ivsize(atfm),
- DMA_BIDIRECTIONAL);
+ DMA_TO_DEVICE);
err_unmap_out_sg:
if (split)
sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
index 887d4ce3cb49..169f822fcae3 100644
--- a/drivers/crypto/nx/nx-842.h
+++ b/drivers/crypto/nx/nx-842.h
@@ -158,7 +158,7 @@ struct nx842_crypto_header_group {
struct nx842_crypto_header {
/* New members MUST be added within the struct_group() macro below. */
- struct_group_tagged(nx842_crypto_header_hdr, hdr,
+ __struct_group(nx842_crypto_header_hdr, hdr, __packed,
__be16 magic; /* NX842_CRYPTO_MAGIC */
__be16 ignore; /* decompressed end bytes to ignore */
u8 groups; /* total groups in this header */
@@ -166,7 +166,7 @@ struct nx842_crypto_header {
struct nx842_crypto_header_group group[];
} __packed;
static_assert(offsetof(struct nx842_crypto_header, group) == sizeof(struct nx842_crypto_header_hdr),
- "struct member likely outside of struct_group_tagged()");
+ "struct member likely outside of __struct_group()");
#define NX842_CRYPTO_GROUP_MAX (0x20)
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 511ddcb0efd4..7447af59f7bb 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -12,6 +12,7 @@
* All rights reserved.
*/
+#include <linux/workqueue.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -868,20 +869,28 @@ struct talitos_ahash_req_ctx {
u8 buf[2][HASH_MAX_BLOCK_SIZE];
int buf_idx;
unsigned int swinit;
- unsigned int first;
- unsigned int last;
+ unsigned int first_desc;
+ unsigned int last_desc;
+ unsigned int last_request;
unsigned int to_hash_later;
unsigned int nbuf;
struct scatterlist bufsl[2];
struct scatterlist *psrc;
+
+ struct scatterlist request_bufsl[2];
+ struct ahash_request *areq;
+ struct scatterlist *request_sl;
+ unsigned int remaining_ahash_request_bytes;
+ unsigned int current_ahash_request_bytes;
+ struct work_struct sec1_ahash_process_remaining;
};
struct talitos_export_state {
u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
u8 buf[HASH_MAX_BLOCK_SIZE];
unsigned int swinit;
- unsigned int first;
- unsigned int last;
+ unsigned int first_desc;
+ unsigned int last_desc;
unsigned int to_hash_later;
unsigned int nbuf;
};
@@ -1713,7 +1722,7 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
if (desc->next_desc &&
desc->ptr[5].ptr != desc2->ptr[5].ptr)
unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
- if (req_ctx->last)
+ if (req_ctx->last_desc)
memcpy(areq->result, req_ctx->hw_context,
crypto_ahash_digestsize(tfm));
@@ -1750,7 +1759,7 @@ static void ahash_done(struct device *dev,
container_of(desc, struct talitos_edesc, desc);
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- if (!req_ctx->last && req_ctx->to_hash_later) {
+ if (!req_ctx->last_desc && req_ctx->to_hash_later) {
/* Position any partial block for next update/final/finup */
req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
req_ctx->nbuf = req_ctx->to_hash_later;
@@ -1759,7 +1768,20 @@ static void ahash_done(struct device *dev,
kfree(edesc);
- ahash_request_complete(areq, err);
+ if (err) {
+ ahash_request_complete(areq, err);
+ return;
+ }
+
+ req_ctx->remaining_ahash_request_bytes -=
+ req_ctx->current_ahash_request_bytes;
+
+ if (!req_ctx->remaining_ahash_request_bytes) {
+ ahash_request_complete(areq, 0);
+ return;
+ }
+
+ schedule_work(&req_ctx->sec1_ahash_process_remaining);
}
/*
@@ -1803,7 +1825,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
/* first DWORD empty */
/* hash context in */
- if (!req_ctx->first || req_ctx->swinit) {
+ if (!req_ctx->first_desc || req_ctx->swinit) {
map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
req_ctx->hw_context_size,
req_ctx->hw_context,
@@ -1811,7 +1833,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
req_ctx->swinit = 0;
}
/* Indicate next op is not the first. */
- req_ctx->first = 0;
+ req_ctx->first_desc = 0;
/* HMAC key */
if (ctx->keylen)
@@ -1844,7 +1866,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
/* fifth DWORD empty */
/* hash/HMAC out -or- hash context out */
- if (req_ctx->last)
+ if (req_ctx->last_desc)
map_single_talitos_ptr(dev, &desc->ptr[5],
crypto_ahash_digestsize(tfm),
req_ctx->hw_context, DMA_FROM_DEVICE);
@@ -1886,7 +1908,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
if (sg_count > 1)
sync_needed = true;
copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
- if (req_ctx->last)
+ if (req_ctx->last_desc)
map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
req_ctx->hw_context_size,
req_ctx->hw_context,
@@ -1925,60 +1947,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
nbytes, 0, 0, 0, areq->base.flags, false);
}
-static int ahash_init(struct ahash_request *areq)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
- struct device *dev = ctx->dev;
- struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- unsigned int size;
- dma_addr_t dma;
-
- /* Initialize the context */
- req_ctx->buf_idx = 0;
- req_ctx->nbuf = 0;
- req_ctx->first = 1; /* first indicates h/w must init its context */
- req_ctx->swinit = 0; /* assume h/w init of context */
- size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
- ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
- : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
- req_ctx->hw_context_size = size;
-
- dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
- DMA_TO_DEVICE);
- dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
-
- return 0;
-}
-
-/*
- * on h/w without explicit sha224 support, we initialize h/w context
- * manually with sha224 constants, and tell it to run sha256.
- */
-static int ahash_init_sha224_swinit(struct ahash_request *areq)
-{
- struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
-
- req_ctx->hw_context[0] = SHA224_H0;
- req_ctx->hw_context[1] = SHA224_H1;
- req_ctx->hw_context[2] = SHA224_H2;
- req_ctx->hw_context[3] = SHA224_H3;
- req_ctx->hw_context[4] = SHA224_H4;
- req_ctx->hw_context[5] = SHA224_H5;
- req_ctx->hw_context[6] = SHA224_H6;
- req_ctx->hw_context[7] = SHA224_H7;
-
- /* init 64-bit count */
- req_ctx->hw_context[8] = 0;
- req_ctx->hw_context[9] = 0;
-
- ahash_init(areq);
- req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
-
- return 0;
-}
-
-static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+static int ahash_process_req_one(struct ahash_request *areq, unsigned int nbytes)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
@@ -1995,14 +1964,14 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
bool is_sec1 = has_ftr_sec1(priv);
u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
- if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
+ if (!req_ctx->last_desc && (nbytes + req_ctx->nbuf <= blocksize)) {
/* Buffer up to one whole block */
- nents = sg_nents_for_len(areq->src, nbytes);
+ nents = sg_nents_for_len(req_ctx->request_sl, nbytes);
if (nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
return nents;
}
- sg_copy_to_buffer(areq->src, nents,
+ sg_copy_to_buffer(req_ctx->request_sl, nents,
ctx_buf + req_ctx->nbuf, nbytes);
req_ctx->nbuf += nbytes;
return 0;
@@ -2012,7 +1981,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
nbytes_to_hash = nbytes + req_ctx->nbuf;
to_hash_later = nbytes_to_hash & (blocksize - 1);
- if (req_ctx->last)
+ if (req_ctx->last_desc)
to_hash_later = 0;
else if (to_hash_later)
/* There is a partial block. Hash the full block(s) now */
@@ -2029,7 +1998,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
sg_init_table(req_ctx->bufsl, nsg);
sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
if (nsg > 1)
- sg_chain(req_ctx->bufsl, 2, areq->src);
+ sg_chain(req_ctx->bufsl, 2, req_ctx->request_sl);
req_ctx->psrc = req_ctx->bufsl;
} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
int offset;
@@ -2038,26 +2007,26 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
offset = blocksize - req_ctx->nbuf;
else
offset = nbytes_to_hash - req_ctx->nbuf;
- nents = sg_nents_for_len(areq->src, offset);
+ nents = sg_nents_for_len(req_ctx->request_sl, offset);
if (nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
return nents;
}
- sg_copy_to_buffer(areq->src, nents,
+ sg_copy_to_buffer(req_ctx->request_sl, nents,
ctx_buf + req_ctx->nbuf, offset);
req_ctx->nbuf += offset;
- req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
+ req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, req_ctx->request_sl,
offset);
} else
- req_ctx->psrc = areq->src;
+ req_ctx->psrc = req_ctx->request_sl;
if (to_hash_later) {
- nents = sg_nents_for_len(areq->src, nbytes);
+ nents = sg_nents_for_len(req_ctx->request_sl, nbytes);
if (nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
return nents;
}
- sg_pcopy_to_buffer(areq->src, nents,
+ sg_pcopy_to_buffer(req_ctx->request_sl, nents,
req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
to_hash_later,
nbytes - to_hash_later);
@@ -2065,36 +2034,145 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
req_ctx->to_hash_later = to_hash_later;
/* Allocate extended descriptor */
- edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
+ edesc = ahash_edesc_alloc(req_ctx->areq, nbytes_to_hash);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
edesc->desc.hdr = ctx->desc_hdr_template;
/* On last one, request SEC to pad; otherwise continue */
- if (req_ctx->last)
+ if (req_ctx->last_desc)
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
else
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
/* request SEC to INIT hash. */
- if (req_ctx->first && !req_ctx->swinit)
+ if (req_ctx->first_desc && !req_ctx->swinit)
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
/* When the tfm context has a keylen, it's an HMAC.
* A first or last (ie. not middle) descriptor must request HMAC.
*/
- if (ctx->keylen && (req_ctx->first || req_ctx->last))
+ if (ctx->keylen && (req_ctx->first_desc || req_ctx->last_desc))
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
- return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
+ return common_nonsnoop_hash(edesc, req_ctx->areq, nbytes_to_hash, ahash_done);
+}
+
+static void sec1_ahash_process_remaining(struct work_struct *work)
+{
+ struct talitos_ahash_req_ctx *req_ctx =
+ container_of(work, struct talitos_ahash_req_ctx,
+ sec1_ahash_process_remaining);
+ int err = 0;
+
+ req_ctx->request_sl = scatterwalk_ffwd(req_ctx->request_bufsl,
+ req_ctx->request_sl, TALITOS1_MAX_DATA_LEN);
+
+ if (req_ctx->remaining_ahash_request_bytes > TALITOS1_MAX_DATA_LEN)
+ req_ctx->current_ahash_request_bytes = TALITOS1_MAX_DATA_LEN;
+ else {
+ req_ctx->current_ahash_request_bytes =
+ req_ctx->remaining_ahash_request_bytes;
+
+ if (req_ctx->last_request)
+ req_ctx->last_desc = 1;
+ }
+
+ err = ahash_process_req_one(req_ctx->areq,
+ req_ctx->current_ahash_request_bytes);
+
+ if (err != -EINPROGRESS)
+ ahash_request_complete(req_ctx->areq, err);
+}
+
+static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = ctx->dev;
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+
+ req_ctx->areq = areq;
+ req_ctx->request_sl = areq->src;
+ req_ctx->remaining_ahash_request_bytes = nbytes;
+
+ if (is_sec1) {
+ if (nbytes > TALITOS1_MAX_DATA_LEN)
+ nbytes = TALITOS1_MAX_DATA_LEN;
+ else if (req_ctx->last_request)
+ req_ctx->last_desc = 1;
+ }
+
+ req_ctx->current_ahash_request_bytes = nbytes;
+
+ return ahash_process_req_one(req_ctx->areq,
+ req_ctx->current_ahash_request_bytes);
+}
+
+static int ahash_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = ctx->dev;
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ unsigned int size;
+ dma_addr_t dma;
+
+ /* Initialize the context */
+ req_ctx->buf_idx = 0;
+ req_ctx->nbuf = 0;
+ req_ctx->first_desc = 1; /* first_desc indicates h/w must init its context */
+ req_ctx->swinit = 0; /* assume h/w init of context */
+ size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+ ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+ : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+ req_ctx->hw_context_size = size;
+ req_ctx->last_request = 0;
+ req_ctx->last_desc = 0;
+ INIT_WORK(&req_ctx->sec1_ahash_process_remaining, sec1_ahash_process_remaining);
+
+ dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+/*
+ * on h/w without explicit sha224 support, we initialize h/w context
+ * manually with sha224 constants, and tell it to run sha256.
+ */
+static int ahash_init_sha224_swinit(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ req_ctx->hw_context[0] = SHA224_H0;
+ req_ctx->hw_context[1] = SHA224_H1;
+ req_ctx->hw_context[2] = SHA224_H2;
+ req_ctx->hw_context[3] = SHA224_H3;
+ req_ctx->hw_context[4] = SHA224_H4;
+ req_ctx->hw_context[5] = SHA224_H5;
+ req_ctx->hw_context[6] = SHA224_H6;
+ req_ctx->hw_context[7] = SHA224_H7;
+
+ /* init 64-bit count */
+ req_ctx->hw_context[8] = 0;
+ req_ctx->hw_context[9] = 0;
+
+ ahash_init(areq);
+ req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
+
+ return 0;
}
static int ahash_update(struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- req_ctx->last = 0;
+ req_ctx->last_request = 0;
return ahash_process_req(areq, areq->nbytes);
}
@@ -2103,7 +2181,7 @@ static int ahash_final(struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- req_ctx->last = 1;
+ req_ctx->last_request = 1;
return ahash_process_req(areq, 0);
}
@@ -2112,7 +2190,7 @@ static int ahash_finup(struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- req_ctx->last = 1;
+ req_ctx->last_request = 1;
return ahash_process_req(areq, areq->nbytes);
}
@@ -2146,8 +2224,8 @@ static int ahash_export(struct ahash_request *areq, void *out)
req_ctx->hw_context_size);
memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
export->swinit = req_ctx->swinit;
- export->first = req_ctx->first;
- export->last = req_ctx->last;
+ export->first_desc = req_ctx->first_desc;
+ export->last_desc = req_ctx->last_desc;
export->to_hash_later = req_ctx->to_hash_later;
export->nbuf = req_ctx->nbuf;
@@ -2172,8 +2250,8 @@ static int ahash_import(struct ahash_request *areq, const void *in)
memcpy(req_ctx->hw_context, export->hw_context, size);
memcpy(req_ctx->buf[0], export->buf, export->nbuf);
req_ctx->swinit = export->swinit;
- req_ctx->first = export->first;
- req_ctx->last = export->last;
+ req_ctx->first_desc = export->first_desc;
+ req_ctx->last_desc = export->last_desc;
req_ctx->to_hash_later = export->to_hash_later;
req_ctx->nbuf = export->nbuf;
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
index daadd71d8ddd..a9715e0f2e14 100644
--- a/drivers/firmware/google/framebuffer-coreboot.c
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -53,7 +53,7 @@ static int framebuffer_probe(struct coreboot_device *dev)
return -ENODEV;
memset(&res, 0, sizeof(res));
- res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res.flags = IORESOURCE_MEM;
res.name = "Coreboot Framebuffer";
res.start = fb->physical_address;
length = PAGE_ALIGN(fb->y_resolution * fb->bytes_per_line);
@@ -67,19 +67,10 @@ static int framebuffer_probe(struct coreboot_device *dev)
sizeof(pdata));
if (IS_ERR(pdev))
pr_warn("coreboot: could not register framebuffer\n");
- else
- dev_set_drvdata(&dev->dev, pdev);
return PTR_ERR_OR_ZERO(pdev);
}
-static void framebuffer_remove(struct coreboot_device *dev)
-{
- struct platform_device *pdev = dev_get_drvdata(&dev->dev);
-
- platform_device_unregister(pdev);
-}
-
static const struct coreboot_device_id framebuffer_ids[] = {
{ .tag = CB_TAG_FRAMEBUFFER },
{ /* sentinel */ }
@@ -88,7 +79,6 @@ MODULE_DEVICE_TABLE(coreboot, framebuffer_ids);
static struct coreboot_driver framebuffer_driver = {
.probe = framebuffer_probe,
- .remove = framebuffer_remove,
.drv = {
.name = "framebuffer",
},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 702f6610d024..ded22f244ada 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -36,6 +36,7 @@
#define AMDGPU_BO_LIST_MAX_PRIORITY 32u
#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
+#define AMDGPU_BO_LIST_MAX_ENTRIES (128 * 1024)
static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
{
@@ -184,43 +185,39 @@ void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
struct drm_amdgpu_bo_list_entry **info_param)
{
- const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+ const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
+ const uint32_t bo_info_size = in->bo_info_size;
+ const uint32_t bo_number = in->bo_number;
struct drm_amdgpu_bo_list_entry *info;
- int r;
- info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
- if (!info)
- return -ENOMEM;
+ if (bo_number > AMDGPU_BO_LIST_MAX_ENTRIES)
+ return -EINVAL;
/* copy the handle array from userspace to a kernel buffer */
- r = -EFAULT;
- if (likely(info_size == in->bo_info_size)) {
- unsigned long bytes = in->bo_number *
- in->bo_info_size;
-
- if (copy_from_user(info, uptr, bytes))
- goto error_free;
-
+ if (likely(info_size == bo_info_size)) {
+ info = vmemdup_array_user(uptr, bo_number, info_size);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
} else {
- unsigned long bytes = min(in->bo_info_size, info_size);
+ const uint32_t bytes = min(bo_info_size, info_size);
unsigned i;
- memset(info, 0, in->bo_number * info_size);
- for (i = 0; i < in->bo_number; ++i) {
- if (copy_from_user(&info[i], uptr, bytes))
- goto error_free;
+ info = kvmalloc_array(bo_number, info_size, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- uptr += in->bo_info_size;
+ memset(info, 0, bo_number * info_size);
+ for (i = 0; i < bo_number; ++i, uptr += bo_info_size) {
+ if (copy_from_user(&info[i], uptr, bytes)) {
+ kvfree(info);
+ return -EFAULT;
+ }
}
}
*info_param = info;
return 0;
-
-error_free:
- kvfree(info);
- return r;
}
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index af729cd521ed..40dd04a4f7df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -75,6 +75,9 @@ static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
unsigned int type,
uint64_t size_in_page)
{
+ if (!size_in_page)
+ return 0;
+
return ttm_range_man_init(&adev->mman.bdev, type,
false, size_in_page);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index aa5815bd633e..ae9b95dd8602 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -670,15 +670,35 @@ static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring)
*/
void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
{
- if (!amdgpu_sriov_vf(ring->adev)) {
+ struct amdgpu_device *adev = ring->adev;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ int jpeg_inst = GET_INST(JPEG, ring->me);
+ uint32_t value = 0x80004000; /* default DS14 */
+
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */
+
+ /* PCTL0__MMHUB_DEEPSLEEP_IB could be different on different mmhub version */
+ switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
+ case IP_VERSION(4, 1, 0):
+ amdgpu_ring_write(ring, 0x69004);
+ value = 0x80010000;
+ break;
+ case IP_VERSION(4, 2, 0):
+ amdgpu_ring_write(ring, 0x60804);
+ if (jpeg_inst & 1)
+ value = 0x80010000;
+ break;
+ default:
+ amdgpu_ring_write(ring, 0x62a04);
+ break;
+ }
amdgpu_ring_write(ring,
PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0,
0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x80004000);
+ amdgpu_ring_write(ring, value);
}
}
@@ -691,15 +711,35 @@ void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
*/
void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
{
- if (!amdgpu_sriov_vf(ring->adev)) {
+ struct amdgpu_device *adev = ring->adev;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ int jpeg_inst = GET_INST(JPEG, ring->me);
+ uint32_t value = 0x00004000; /* default DS14 */
+
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x62a04);
+
+ /* PCTL0__MMHUB_DEEPSLEEP_IB could be different on different mmhub version */
+ switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
+ case IP_VERSION(4, 1, 0):
+ amdgpu_ring_write(ring, 0x69004);
+ value = 0x00010000;
+ break;
+ case IP_VERSION(4, 2, 0):
+ amdgpu_ring_write(ring, 0x60804);
+ if (jpeg_inst & 1)
+ value = 0x00010000;
+ break;
+ default:
+ amdgpu_ring_write(ring, 0x62a04);
+ break;
+ }
amdgpu_ring_write(ring,
PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0,
0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x00004000);
+ amdgpu_ring_write(ring, value);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 6e5adab03471..2216db0aa9d5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -853,7 +853,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
/* Remove conflicting drivers (vesafb, efifb etc). */
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver_pci);
if (ret)
- return ret;
+ goto fail_nvkm;
pci_set_master(pdev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 67e3c99de73a..1771fe338f8d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -686,7 +686,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
}
nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
- if (unlikely(r->reloc_bo_offset + 4 >
+ if (unlikely((u64)r->reloc_bo_offset + 4 >
nvbo->bo.base.size)) {
NV_PRINTK(err, cli, "reloc outside of bo\n");
ret = -EINVAL;
diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c
index 4f8f3172379e..874150f758be 100644
--- a/drivers/gpu/drm/tiny/arcpgu.c
+++ b/drivers/gpu/drm/tiny/arcpgu.c
@@ -248,7 +248,8 @@ DEFINE_DRM_GEM_DMA_FOPS(arcpgu_drm_ops);
static int arcpgu_load(struct arcpgu_drm_private *arcpgu)
{
struct platform_device *pdev = to_platform_device(arcpgu->drm.dev);
- struct device_node *encoder_node = NULL, *endpoint_node = NULL;
+ struct device_node *encoder_node __free(device_node) = NULL;
+ struct device_node *endpoint_node = NULL;
struct drm_connector *connector = NULL;
struct drm_device *drm = &arcpgu->drm;
struct resource *res;
diff --git a/drivers/greybus/gb-beagleplay.c b/drivers/greybus/gb-beagleplay.c
index 2a207eab4045..59cfef3c2b43 100644
--- a/drivers/greybus/gb-beagleplay.c
+++ b/drivers/greybus/gb-beagleplay.c
@@ -242,30 +242,26 @@ static void hdlc_write(struct gb_beagleplay *bg)
}
/**
- * hdlc_append() - Queue HDLC data for sending.
+ * hdlc_append() - Queue a single HDLC byte for sending.
* @bg: beagleplay greybus driver
* @value: hdlc byte to transmit
*
- * Assumes that producer lock as been acquired.
+ * Caller must hold tx_producer_lock and must have ensured sufficient
+ * space in the circular buffer before calling (see hdlc_tx_frames()).
*/
static void hdlc_append(struct gb_beagleplay *bg, u8 value)
{
- int tail, head = bg->tx_circ_buf.head;
+ int head = bg->tx_circ_buf.head;
+ int tail = READ_ONCE(bg->tx_circ_buf.tail);
- while (true) {
- tail = READ_ONCE(bg->tx_circ_buf.tail);
-
- if (CIRC_SPACE(head, tail, TX_CIRC_BUF_SIZE) >= 1) {
- bg->tx_circ_buf.buf[head] = value;
+ lockdep_assert_held(&bg->tx_producer_lock);
+ if (WARN_ON_ONCE(CIRC_SPACE(head, tail, TX_CIRC_BUF_SIZE) < 1))
+ return;
- /* Finish producing HDLC byte */
- smp_store_release(&bg->tx_circ_buf.head,
- (head + 1) & (TX_CIRC_BUF_SIZE - 1));
- return;
- }
- dev_warn(&bg->sd->dev, "Tx circ buf full");
- usleep_range(3000, 5000);
- }
+ bg->tx_circ_buf.buf[head] = value;
+ /* Ensure buffer write is visible before advancing head. */
+ smp_store_release(&bg->tx_circ_buf.head,
+ (head + 1) & (TX_CIRC_BUF_SIZE - 1));
}
static void hdlc_append_escaped(struct gb_beagleplay *bg, u8 value)
@@ -313,13 +309,90 @@ static void hdlc_transmit(struct work_struct *work)
spin_unlock_bh(&bg->tx_consumer_lock);
}
+/**
+ * hdlc_encoded_length() - Calculate worst-case encoded length of an HDLC frame.
+ * @payloads: array of payload buffers
+ * @count: number of payloads
+ *
+ * Returns the maximum number of bytes needed in the circular buffer.
+ */
+static size_t hdlc_encoded_length(const struct hdlc_payload payloads[],
+ size_t count)
+{
+ size_t i, payload_len = 0;
+
+ for (i = 0; i < count; i++)
+ payload_len += payloads[i].len;
+
+ /*
+ * Worst case: every data byte needs escaping (doubles in size).
+ * data bytes = address(1) + control(1) + payload + crc(2)
+ * framing = opening flag(1) + closing flag(1)
+ */
+ return 2 + (1 + 1 + payload_len + 2) * 2;
+}
+
+#define HDLC_TX_BUF_WAIT_RETRIES 500
+#define HDLC_TX_BUF_WAIT_US_MIN 3000
+#define HDLC_TX_BUF_WAIT_US_MAX 5000
+
+/**
+ * hdlc_tx_frames() - Encode and queue an HDLC frame for transmission.
+ * @bg: beagleplay greybus driver
+ * @address: HDLC address field
+ * @control: HDLC control field
+ * @payloads: array of payload buffers
+ * @count: number of payloads
+ *
+ * Sleeps outside the spinlock until enough circular-buffer space is
+ * available, then verifies space under the lock and writes the entire
+ * frame atomically. Either a complete frame is enqueued or nothing is
+ * written, avoiding both sleeping in atomic context and partial frames.
+ */
static void hdlc_tx_frames(struct gb_beagleplay *bg, u8 address, u8 control,
const struct hdlc_payload payloads[], size_t count)
{
+ size_t needed = hdlc_encoded_length(payloads, count);
+ int retries = HDLC_TX_BUF_WAIT_RETRIES;
size_t i;
+ int head, tail;
+
+ /* Wait outside the lock for sufficient buffer space. */
+ while (retries--) {
+ /* Pairs with smp_store_release() in hdlc_append(). */
+ head = smp_load_acquire(&bg->tx_circ_buf.head);
+ tail = READ_ONCE(bg->tx_circ_buf.tail);
+
+ if (CIRC_SPACE(head, tail, TX_CIRC_BUF_SIZE) >= needed)
+ break;
+
+ /* Kick the consumer and sleep — no lock held. */
+ schedule_work(&bg->tx_work);
+ usleep_range(HDLC_TX_BUF_WAIT_US_MIN, HDLC_TX_BUF_WAIT_US_MAX);
+ }
+
+ if (retries < 0) {
+ dev_warn_ratelimited(&bg->sd->dev,
+ "Tx circ buf full, dropping frame\n");
+ return;
+ }
spin_lock(&bg->tx_producer_lock);
+ /*
+ * Re-check under the lock. Should not fail since
+ * tx_producer_lock serialises all producers and the
+ * consumer only frees space, but guard against it.
+ */
+ head = bg->tx_circ_buf.head;
+ tail = READ_ONCE(bg->tx_circ_buf.tail);
+ if (unlikely(CIRC_SPACE(head, tail, TX_CIRC_BUF_SIZE) < needed)) {
+ spin_unlock(&bg->tx_producer_lock);
+ dev_warn_ratelimited(&bg->sd->dev,
+ "Tx circ buf space lost, dropping frame\n");
+ return;
+ }
+
hdlc_append_tx_frame(bg);
hdlc_append_tx_u8(bg, address);
hdlc_append_tx_u8(bg, control);
@@ -535,6 +608,13 @@ static size_t cc1352_bootloader_rx(struct gb_beagleplay *bg, const u8 *data,
int ret;
size_t off = 0;
+ if (count > sizeof(bg->rx_buffer) - bg->rx_buffer_len) {
+ dev_warn(&bg->sd->dev,
+ "dropping oversized bootloader receive chunk");
+ bg->rx_buffer_len = 0;
+ return count;
+ }
+
memcpy(bg->rx_buffer + bg->rx_buffer_len, data, count);
bg->rx_buffer_len += count;
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 7e11f2932ff0..03ffd669eb6d 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -832,6 +832,7 @@ static int apple_backlight_init(struct hid_device *hdev)
asc->backlight->cdev.name = "apple::kbd_backlight";
asc->backlight->cdev.max_brightness = rep->backlight_on_max;
asc->backlight->cdev.brightness_set_blocking = apple_backlight_led_set;
+ asc->backlight->cdev.flags = LED_CORE_SUSPENDRESUME;
ret = apple_backlight_set(hdev, 0, 0);
if (ret < 0) {
@@ -900,6 +901,7 @@ static int apple_magic_backlight_init(struct hid_device *hdev)
backlight->cdev.name = ":white:" LED_FUNCTION_KBD_BACKLIGHT;
backlight->cdev.max_brightness = backlight->brightness->field[0]->logical_maximum;
backlight->cdev.brightness_set_blocking = apple_magic_backlight_led_set;
+ backlight->cdev.flags = LED_CORE_SUSPENDRESUME;
apple_magic_backlight_set(backlight, 0, 0);
diff --git a/drivers/hwmon/powerz.c b/drivers/hwmon/powerz.c
index 9e1dfe59aa56..da6dd48ac67c 100644
--- a/drivers/hwmon/powerz.c
+++ b/drivers/hwmon/powerz.c
@@ -112,6 +112,7 @@ static void powerz_usb_cmd_complete(struct urb *urb)
static int powerz_read_data(struct usb_device *udev, struct powerz_priv *priv)
{
+ long rc;
int ret;
if (!priv->urb)
@@ -133,8 +134,14 @@ static int powerz_read_data(struct usb_device *udev, struct powerz_priv *priv)
if (ret)
return ret;
- if (!wait_for_completion_interruptible_timeout
- (&priv->completion, msecs_to_jiffies(5))) {
+ rc = wait_for_completion_interruptible_timeout(&priv->completion,
+ msecs_to_jiffies(5));
+ if (rc < 0) {
+ usb_kill_urb(priv->urb);
+ return rc;
+ }
+
+ if (rc == 0) {
usb_kill_urb(priv->urb);
return -EIO;
}
diff --git a/drivers/hwmon/pt5161l.c b/drivers/hwmon/pt5161l.c
index a9f0b23f9e76..f95750522699 100644
--- a/drivers/hwmon/pt5161l.c
+++ b/drivers/hwmon/pt5161l.c
@@ -124,7 +124,7 @@ static int pt5161l_read_block_data(struct pt5161l_data *data, u32 address,
int ret, tries;
u8 remain_len = len;
u8 curr_len;
- u8 wbuf[16], rbuf[24];
+ u8 wbuf[16], rbuf[I2C_SMBUS_BLOCK_MAX];
u8 cmd = 0x08; /* [7]:pec_en, [4:2]:func, [1]:start, [0]:end */
u8 config = 0x00; /* [6]:cfg_type, [4:1]:burst_len, [0]:address bit16 */
@@ -154,7 +154,7 @@ static int pt5161l_read_block_data(struct pt5161l_data *data, u32 address,
break;
}
if (tries >= 3)
- return ret;
+ return ret < 0 ? ret : -EIO;
memcpy(val, rbuf, curr_len);
val += curr_len;
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index a6c407d36800..50e97e2ed2cf 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -182,7 +182,7 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
* Clear the flag before adding the device so that fw_devlink
* doesn't skip adding consumers to this device.
*/
- rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
+ fwnode_clear_flag(&rd->dn->fwnode, FWNODE_FLAG_NOT_DEVICE);
client = of_i2c_register_device(adap, rd->dn);
if (IS_ERR(client)) {
dev_err(&adap->dev, "failed to create client for '%pOF'\n",
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index 7956948166ab..e54a45170150 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -241,12 +241,17 @@ static int ad7768_scan_direct(struct iio_dev *indio_dev)
struct ad7768_state *st = iio_priv(indio_dev);
int readval, ret;
- reinit_completion(&st->completion);
-
ret = ad7768_set_mode(st, AD7768_ONE_SHOT);
if (ret < 0)
return ret;
+ reinit_completion(&st->completion);
+
+ /* One-shot mode requires a SYNC pulse to generate a new sample */
+ ret = ad7768_send_sync_pulse(st);
+ if (ret)
+ return ret;
+
ret = wait_for_completion_timeout(&st->completion,
msecs_to_jiffies(1000));
if (!ret)
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index af28672aa803..c2fd992906c2 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -47,8 +47,6 @@
#define TI_ADS7950_MAX_CHAN 16
#define TI_ADS7950_NUM_GPIOS 4
-#define TI_ADS7950_TIMESTAMP_SIZE (sizeof(int64_t) / sizeof(__be16))
-
/* val = value, dec = left shift, bits = number of bits of the mask */
#define TI_ADS7950_EXTRACT(val, dec, bits) \
(((val) >> (dec)) & ((1 << (bits)) - 1))
@@ -105,8 +103,7 @@ struct ti_ads7950_state {
* DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
- u16 rx_buf[TI_ADS7950_MAX_CHAN + 2 + TI_ADS7950_TIMESTAMP_SIZE]
- __aligned(IIO_DMA_MINALIGN);
+ u16 rx_buf[TI_ADS7950_MAX_CHAN + 2] __aligned(IIO_DMA_MINALIGN);
u16 tx_buf[TI_ADS7950_MAX_CHAN + 2];
u16 single_tx;
u16 single_rx;
@@ -313,8 +310,10 @@ static irqreturn_t ti_ads7950_trigger_handler(int irq, void *p)
if (ret < 0)
goto out;
- iio_push_to_buffers_with_timestamp(indio_dev, &st->rx_buf[2],
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts_unaligned(indio_dev, &st->rx_buf[2],
+ sizeof(*st->rx_buf) *
+ TI_ADS7950_MAX_CHAN,
+ iio_get_time_ns(indio_dev));
out:
mutex_unlock(&st->slock);
diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c
index 8ef583680ad0..635bcd7556f0 100644
--- a/drivers/iio/frequency/admv1013.c
+++ b/drivers/iio/frequency/admv1013.c
@@ -85,9 +85,9 @@ enum {
};
enum {
- ADMV1013_SE_MODE_POS = 6,
- ADMV1013_SE_MODE_NEG = 9,
- ADMV1013_SE_MODE_DIFF = 12
+ ADMV1013_SE_MODE_POS,
+ ADMV1013_SE_MODE_NEG,
+ ADMV1013_SE_MODE_DIFF,
};
struct admv1013_state {
@@ -470,10 +470,23 @@ static int admv1013_init(struct admv1013_state *st, int vcm_uv)
if (ret)
return ret;
- data = FIELD_PREP(ADMV1013_QUAD_SE_MODE_MSK, st->quad_se_mode);
+ switch (st->quad_se_mode) {
+ case ADMV1013_SE_MODE_POS:
+ data = 6;
+ break;
+ case ADMV1013_SE_MODE_NEG:
+ data = 9;
+ break;
+ case ADMV1013_SE_MODE_DIFF:
+ data = 12;
+ break;
+ default:
+ return -EINVAL;
+ }
ret = __admv1013_spi_update_bits(st, ADMV1013_REG_QUAD,
- ADMV1013_QUAD_SE_MODE_MSK, data);
+ ADMV1013_QUAD_SE_MODE_MSK,
+ FIELD_PREP(ADMV1013_QUAD_SE_MODE_MSK, data));
if (ret)
return ret;
@@ -514,43 +527,39 @@ static void admv1013_powerdown(void *data)
admv1013_spi_update_bits(data, ADMV1013_REG_ENABLE, enable_reg_msk, enable_reg);
}
+static const char * const admv1013_input_modes[] = {
+ [ADMV1013_IQ_MODE] = "iq",
+ [ADMV1013_IF_MODE] = "if",
+};
+
+static const char * const admv1013_quad_se_modes[] = {
+ [ADMV1013_SE_MODE_POS] = "se-pos",
+ [ADMV1013_SE_MODE_NEG] = "se-neg",
+ [ADMV1013_SE_MODE_DIFF] = "diff",
+};
+
static int admv1013_properties_parse(struct admv1013_state *st)
{
int ret;
- const char *str;
- struct spi_device *spi = st->spi;
+ struct device *dev = &st->spi->dev;
- st->det_en = device_property_read_bool(&spi->dev, "adi,detector-enable");
+ st->det_en = device_property_read_bool(dev, "adi,detector-enable");
- ret = device_property_read_string(&spi->dev, "adi,input-mode", &str);
- if (ret)
- st->input_mode = ADMV1013_IQ_MODE;
+ ret = device_property_match_property_string(dev, "adi,input-mode",
+ admv1013_input_modes,
+ ARRAY_SIZE(admv1013_input_modes));
+ st->input_mode = ret >= 0 ? ret : ADMV1013_IQ_MODE;
- if (!strcmp(str, "iq"))
- st->input_mode = ADMV1013_IQ_MODE;
- else if (!strcmp(str, "if"))
- st->input_mode = ADMV1013_IF_MODE;
- else
- return -EINVAL;
-
- ret = device_property_read_string(&spi->dev, "adi,quad-se-mode", &str);
- if (ret)
- st->quad_se_mode = ADMV1013_SE_MODE_DIFF;
-
- if (!strcmp(str, "diff"))
- st->quad_se_mode = ADMV1013_SE_MODE_DIFF;
- else if (!strcmp(str, "se-pos"))
- st->quad_se_mode = ADMV1013_SE_MODE_POS;
- else if (!strcmp(str, "se-neg"))
- st->quad_se_mode = ADMV1013_SE_MODE_NEG;
- else
- return -EINVAL;
+ ret = device_property_match_property_string(dev, "adi,quad-se-mode",
+ admv1013_quad_se_modes,
+ ARRAY_SIZE(admv1013_quad_se_modes));
+ st->quad_se_mode = ret >= 0 ? ret : ADMV1013_SE_MODE_DIFF;
- ret = devm_regulator_bulk_get_enable(&st->spi->dev,
+ ret = devm_regulator_bulk_get_enable(dev,
ARRAY_SIZE(admv1013_vcc_regs),
admv1013_vcc_regs);
if (ret) {
- dev_err_probe(&spi->dev, ret,
+ dev_err_probe(dev, ret,
"Failed to request VCC regulators\n");
return ret;
}
@@ -562,9 +571,10 @@ static int admv1013_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct admv1013_state *st;
+ struct device *dev = &spi->dev;
int ret, vcm_uv;
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
@@ -581,20 +591,20 @@ static int admv1013_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vcm");
+ ret = devm_regulator_get_enable_read_voltage(dev, "vcm");
if (ret < 0)
- return dev_err_probe(&spi->dev, ret,
+ return dev_err_probe(dev, ret,
"failed to get the common-mode voltage\n");
vcm_uv = ret;
- st->clkin = devm_clk_get_enabled(&spi->dev, "lo_in");
+ st->clkin = devm_clk_get_enabled(dev, "lo_in");
if (IS_ERR(st->clkin))
- return dev_err_probe(&spi->dev, PTR_ERR(st->clkin),
+ return dev_err_probe(dev, PTR_ERR(st->clkin),
"failed to get the LO input clock\n");
st->nb.notifier_call = admv1013_freq_change;
- ret = devm_clk_notifier_register(&spi->dev, st->clkin, &st->nb);
+ ret = devm_clk_notifier_register(dev, st->clkin, &st->nb);
if (ret)
return ret;
@@ -606,11 +616,11 @@ static int admv1013_probe(struct spi_device *spi)
return ret;
}
- ret = devm_add_action_or_reset(&spi->dev, admv1013_powerdown, st);
+ ret = devm_add_action_or_reset(dev, admv1013_powerdown, st);
if (ret)
return ret;
- return devm_iio_device_register(&spi->dev, indio_dev);
+ return devm_iio_device_register(dev, indio_dev);
}
static const struct spi_device_id admv1013_id[] = {
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 27eac10638cb..0008fa46ef47 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -321,11 +321,14 @@ static int dst_fetch_ha(const struct dst_entry *dst,
if (!n)
return -ENODATA;
+ read_lock_bh(&n->lock);
if (!(n->nud_state & NUD_VALID)) {
+ read_unlock_bh(&n->lock);
neigh_event_send(n, NULL);
ret = -ENODATA;
} else {
neigh_ha_snapshot(dev_addr->dst_dev_addr, n, dst->dev);
+ read_unlock_bh(&n->lock);
}
neigh_release(n);
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index 48fef989318b..84b8666af606 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -601,6 +601,21 @@ static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
mpc = netdev_priv(ndev);
+ /* Disable vPort RX steering before destroying RX WQ objects.
+ * Otherwise firmware still routes traffic to the destroyed queues,
+ * which can cause bogus completions on reused CQ IDs when the
+ * ethernet driver later creates new queues on mana_open().
+ *
+ * Unlike the ethernet teardown path, mana_fence_rqs() cannot be
+ * used here because the fence completion CQE is delivered on the
+ * CQ which is polled by userspace (e.g. DPDK), so there is no way
+ * for the kernel to wait for fence completion.
+ *
+ * This is best effort — if it fails there is not much we can do,
+ * and mana_cfg_vport_steering() already logs the error.
+ */
+ mana_disable_vport_rx(mpc);
+
for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
ibwq = ind_tbl->ind_tbl[i];
wq = container_of(ibwq, struct mana_ib_wq, ibwq);
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 5861e4244049..f79214738c2b 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -330,7 +330,8 @@ void rxe_rcv(struct sk_buff *skb)
pkt->qp = NULL;
pkt->mask |= rxe_opcode[pkt->opcode].mask;
- if (unlikely(skb->len < header_size(pkt)))
+ if (unlikely(pkt->paylen < header_size(pkt) + bth_pad(pkt) +
+ RXE_ICRC_SIZE))
goto drop;
err = hdr_check(pkt);
diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
index 98c60e971b48..4c28fb7d4da0 100644
--- a/drivers/leds/rgb/leds-qcom-lpg.c
+++ b/drivers/leds/rgb/leds-qcom-lpg.c
@@ -1272,7 +1272,12 @@ static int lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
if (chan->subtype == LPG_SUBTYPE_HI_RES_PWM) {
- refclk = lpg_clk_rates_hi_res[FIELD_GET(PWM_CLK_SELECT_HI_RES_MASK, val)];
+ unsigned int clk_idx = FIELD_GET(PWM_CLK_SELECT_HI_RES_MASK, val);
+
+ if (clk_idx >= ARRAY_SIZE(lpg_clk_rates_hi_res))
+ return -EINVAL;
+
+ refclk = lpg_clk_rates_hi_res[clk_idx];
resolution = lpg_pwm_resolution_hi_res[FIELD_GET(PWM_SIZE_HI_RES_MASK, val)];
} else {
refclk = lpg_clk_rates[FIELD_GET(PWM_CLK_SELECT_MASK, val)];
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 94b6c43dfa5c..93e3470a701c 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -993,13 +993,13 @@ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
return NULL;
}
- *args_used = 2 + param_count;
-
- if (argc < *args_used) {
+ if (param_count > argc - 2) {
ti->error = "Insufficient mirror log arguments";
return NULL;
}
+ *args_used = 2 + param_count;
+
dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
argv + 2);
if (!dl) {
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index db07c99c4d94..4b02313854b6 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1182,7 +1182,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
}
if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) {
- raid_end_bio_io(r10_bio);
+ free_r10bio(r10_bio);
return;
}
@@ -1381,7 +1381,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
sectors = r10_bio->sectors;
if (!regular_request_wait(mddev, conf, bio, sectors)) {
- raid_end_bio_io(r10_bio);
+ free_r10bio(r10_bio);
return;
}
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 011246e16a99..da6f91d4f3b8 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -2003,15 +2003,27 @@ r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
return -ENOMEM;
while (mb_offset < le32_to_cpu(mb->meta_size)) {
+ sector_t payload_len;
+
payload = (void *)mb + mb_offset;
payload_flush = (void *)mb + mb_offset;
if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
+ payload_len = sizeof(struct r5l_payload_data_parity) +
+ (sector_t)sizeof(__le32) *
+ (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
+ if (mb_offset + payload_len > le32_to_cpu(mb->meta_size))
+ goto mismatch;
if (r5l_recovery_verify_data_checksum(
log, ctx, page, log_offset,
payload->checksum[0]) < 0)
goto mismatch;
} else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) {
+ payload_len = sizeof(struct r5l_payload_data_parity) +
+ (sector_t)sizeof(__le32) *
+ (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
+ if (mb_offset + payload_len > le32_to_cpu(mb->meta_size))
+ goto mismatch;
if (r5l_recovery_verify_data_checksum(
log, ctx, page, log_offset,
payload->checksum[0]) < 0)
@@ -2024,22 +2036,18 @@ r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
payload->checksum[1]) < 0)
goto mismatch;
} else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
- /* nothing to do for R5LOG_PAYLOAD_FLUSH here */
+ payload_len = sizeof(struct r5l_payload_flush) +
+ (sector_t)le32_to_cpu(payload_flush->size);
+ if (mb_offset + payload_len > le32_to_cpu(mb->meta_size))
+ goto mismatch;
} else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */
goto mismatch;
- if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
- mb_offset += sizeof(struct r5l_payload_flush) +
- le32_to_cpu(payload_flush->size);
- } else {
- /* DATA or PARITY payload */
+ if (le16_to_cpu(payload->header.type) != R5LOG_PAYLOAD_FLUSH) {
log_offset = r5l_ring_add(log, log_offset,
le32_to_cpu(payload->size));
- mb_offset += sizeof(struct r5l_payload_data_parity) +
- sizeof(__le32) *
- (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
}
-
+ mb_offset += payload_len;
}
put_page(page);
@@ -2090,6 +2098,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
while (mb_offset < le32_to_cpu(mb->meta_size)) {
+ sector_t payload_len;
int dd;
payload = (void *)mb + mb_offset;
@@ -2098,6 +2107,12 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
int i, count;
+ payload_len = sizeof(struct r5l_payload_flush) +
+ (sector_t)le32_to_cpu(payload_flush->size);
+ if (mb_offset + payload_len >
+ le32_to_cpu(mb->meta_size))
+ return -EINVAL;
+
count = le32_to_cpu(payload_flush->size) / sizeof(__le64);
for (i = 0; i < count; ++i) {
stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]);
@@ -2111,12 +2126,17 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
}
}
- mb_offset += sizeof(struct r5l_payload_flush) +
- le32_to_cpu(payload_flush->size);
+ mb_offset += payload_len;
continue;
}
/* DATA or PARITY payload */
+ payload_len = sizeof(struct r5l_payload_data_parity) +
+ (sector_t)sizeof(__le32) *
+ (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
+ if (mb_offset + payload_len > le32_to_cpu(mb->meta_size))
+ return -EINVAL;
+
stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ?
raid5_compute_sector(
conf, le64_to_cpu(payload->location), 0, &dd,
@@ -2181,9 +2201,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
log_offset = r5l_ring_add(log, log_offset,
le32_to_cpu(payload->size));
- mb_offset += sizeof(struct r5l_payload_data_parity) +
- sizeof(__le32) *
- (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
+ mb_offset += payload_len;
}
return 0;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 507994304674..f71ed0f53825 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6625,7 +6625,13 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio,
}
if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
- raid5_release_stripe(sh);
+ int hash;
+
+ spin_lock_irq(&conf->device_lock);
+ hash = sh->hash_lock_index;
+ __release_stripe(conf, sh,
+ &conf->temp_inactive_list[hash]);
+ spin_unlock_irq(&conf->device_lock);
conf->retry_read_aligned = raid_bio;
conf->retry_read_offset = scnt;
return handled;
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index e0714abe8540..d057c74f7172 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -1176,6 +1176,9 @@ static int imx219_probe(struct i2c_client *client)
/* Request optional enable pin */
imx219->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
+ if (IS_ERR(imx219->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(imx219->reset_gpio),
+ "failed to get reset gpio\n");
/*
* The sensor must be powered for imx219_identify_module()
diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
index 72e23f95d6b7..24426782a98f 100644
--- a/drivers/media/platform/amphion/vpu_v4l2.c
+++ b/drivers/media/platform/amphion/vpu_v4l2.c
@@ -448,17 +448,14 @@ static void vpu_m2m_device_run(void *priv)
{
}
-static void vpu_m2m_job_abort(void *priv)
+static int vpu_m2m_job_ready(void *priv)
{
- struct vpu_inst *inst = priv;
- struct v4l2_m2m_ctx *m2m_ctx = inst->fh.m2m_ctx;
-
- v4l2_m2m_job_finish(m2m_ctx->m2m_dev, m2m_ctx);
+ return 0;
}
static const struct v4l2_m2m_ops vpu_m2m_ops = {
.device_run = vpu_m2m_device_run,
- .job_abort = vpu_m2m_job_abort
+ .job_ready = vpu_m2m_job_ready,
};
static int vpu_vb2_queue_setup(struct vb2_queue *vq,
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
index ff2694676355..7560b9e38394 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
@@ -1213,6 +1213,7 @@ static int mtk_jpeg_release(struct file *file)
struct mtk_jpeg_dev *jpeg = video_drvdata(file);
struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(file->private_data);
+ cancel_work_sync(&ctx->jpeg_work);
mutex_lock(&jpeg->lock);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c
index 1464ef9c55bc..f3616607d4f5 100644
--- a/drivers/media/rc/igorplugusb.c
+++ b/drivers/media/rc/igorplugusb.c
@@ -34,7 +34,7 @@ struct igorplugusb {
struct device *dev;
struct urb *urb;
- struct usb_ctrlrequest request;
+ struct usb_ctrlrequest *request;
struct timer_list timer;
@@ -122,7 +122,7 @@ static void igorplugusb_cmd(struct igorplugusb *ir, int cmd)
{
int ret;
- ir->request.bRequest = cmd;
+ ir->request->bRequest = cmd;
ir->urb->transfer_flags = 0;
ret = usb_submit_urb(ir->urb, GFP_ATOMIC);
if (ret && ret != -EPERM)
@@ -164,13 +164,17 @@ static int igorplugusb_probe(struct usb_interface *intf,
if (!ir)
return -ENOMEM;
+ ir->request = kzalloc(sizeof(*ir->request), GFP_KERNEL);
+ if (!ir->request)
+ goto fail;
+
ir->dev = &intf->dev;
timer_setup(&ir->timer, igorplugusb_timer, 0);
- ir->request.bRequest = GET_INFRACODE;
- ir->request.bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
- ir->request.wLength = cpu_to_le16(MAX_PACKET);
+ ir->request->bRequest = GET_INFRACODE;
+ ir->request->bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
+ ir->request->wLength = cpu_to_le16(MAX_PACKET);
ir->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!ir->urb)
@@ -228,6 +232,7 @@ static int igorplugusb_probe(struct usb_interface *intf,
usb_free_urb(ir->urb);
rc_free_device(ir->rc);
kfree(ir->buf_in);
+ kfree(ir->request);
return ret;
}
@@ -243,6 +248,7 @@ static void igorplugusb_disconnect(struct usb_interface *intf)
usb_unpoison_urb(ir->urb);
usb_free_urb(ir->urb);
kfree(ir->buf_in);
+ kfree(ir->request);
}
static const struct usb_device_id igorplugusb_table[] = {
diff --git a/drivers/media/rc/ttusbir.c b/drivers/media/rc/ttusbir.c
index 560a26f3965c..dde446a95eaa 100644
--- a/drivers/media/rc/ttusbir.c
+++ b/drivers/media/rc/ttusbir.c
@@ -32,7 +32,7 @@ struct ttusbir {
struct led_classdev led;
struct urb *bulk_urb;
- uint8_t bulk_buffer[5];
+ u8 *bulk_buffer;
int bulk_out_endp, iso_in_endp;
bool led_on, is_led_on;
atomic_t led_complete;
@@ -186,13 +186,16 @@ static int ttusbir_probe(struct usb_interface *intf,
struct rc_dev *rc;
int i, j, ret;
int altsetting = -1;
+ u8 *buffer;
tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ buffer = kzalloc(5, GFP_KERNEL);
rc = rc_allocate_device(RC_DRIVER_IR_RAW);
- if (!tt || !rc) {
+ if (!tt || !rc || buffer) {
ret = -ENOMEM;
goto out;
}
+ tt->bulk_buffer = buffer;
/* find the correct alt setting */
for (i = 0; i < intf->num_altsetting && altsetting == -1; i++) {
@@ -281,8 +284,8 @@ static int ttusbir_probe(struct usb_interface *intf,
tt->bulk_buffer[3] = 0x01;
usb_fill_bulk_urb(tt->bulk_urb, tt->udev, usb_sndbulkpipe(tt->udev,
- tt->bulk_out_endp), tt->bulk_buffer, sizeof(tt->bulk_buffer),
- ttusbir_bulk_complete, tt);
+ tt->bulk_out_endp), tt->bulk_buffer, 5,
+ ttusbir_bulk_complete, tt);
tt->led.name = "ttusbir:green:power";
tt->led.default_trigger = "rc-feedback";
@@ -351,6 +354,7 @@ static int ttusbir_probe(struct usb_interface *intf,
kfree(tt);
}
rc_free_device(rc);
+ kfree(buffer);
return ret;
}
@@ -373,6 +377,7 @@ static void ttusbir_disconnect(struct usb_interface *intf)
}
usb_kill_urb(tt->bulk_urb);
usb_free_urb(tt->bulk_urb);
+ kfree(tt->bulk_buffer);
usb_set_intfdata(intf, NULL);
kfree(tt);
}
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index c55223ce4327..256058a6e247 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -88,7 +88,17 @@ static void mfd_acpi_add_device(const struct mfd_cell *cell,
}
}
- device_set_node(&pdev->dev, acpi_fwnode_handle(adev ?: parent));
+ /*
+ * NOTE: The fwnode design doesn't allow proper stacking/sharing. This
+ * should eventually turn into a device fwnode API call that will allow
+ * prepending to a list of fwnodes (with ACPI taking precedence).
+ *
+ * set_primary_fwnode() is used here, instead of device_set_node(), as
+ * device_set_node() will overwrite the existing fwnode, which may be an
+ * OF node that was populated earlier. To support a use case where ACPI
+ * and OF is used in conjunction, we call set_primary_fwnode() instead.
+ */
+ set_primary_fwnode(&pdev->dev, acpi_fwnode_handle(adev ?: parent));
}
#else
static inline void mfd_acpi_add_device(const struct mfd_cell *cell,
diff --git a/drivers/mfd/stpmic1.c b/drivers/mfd/stpmic1.c
index d8a603d95aa6..a98c983ff206 100644
--- a/drivers/mfd/stpmic1.c
+++ b/drivers/mfd/stpmic1.c
@@ -16,6 +16,8 @@
#include <dt-bindings/mfd/st,stpmic1.h>
+#define STPMIC1_MAX_RETRIES 2
+
#define STPMIC1_MAIN_IRQ 0
static const struct regmap_range stpmic1_readable_ranges[] = {
@@ -121,9 +123,23 @@ static const struct regmap_irq_chip stpmic1_regmap_irq_chip = {
static int stpmic1_power_off(struct sys_off_data *data)
{
struct stpmic1 *ddata = data->cb_data;
+ int ret;
+
+ /*
+ * Attempt to shut down again, in case the first attempt failed.
+ * The STPMIC1 might get confused and the first regmap_update_bits()
+ * returns with -ETIMEDOUT / -110 . If that or similar transient
+ * failure occurs, try to shut down again. If the second attempt
+ * fails, there is some bigger problem, report it to user.
+ */
+ for (int retries = 0; retries < STPMIC1_MAX_RETRIES; retries++) {
+ ret = regmap_update_bits(ddata->regmap, MAIN_CR, SOFTWARE_SWITCH_OFF,
+ SOFTWARE_SWITCH_OFF);
+ if (!ret)
+ return NOTIFY_DONE;
+ }
- regmap_update_bits(ddata->regmap, MAIN_CR,
- SOFTWARE_SWITCH_OFF, SOFTWARE_SWITCH_OFF);
+ dev_err(ddata->dev, "Failed to access PMIC I2C bus (%d)\n", ret);
return NOTIFY_DONE;
}
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index c44de892a61e..b8b22717c05e 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -303,6 +303,8 @@ static ssize_t command_file_write(struct file *file, const char __user *ubuff, s
return -EINVAL;
if (count == 0 || count > IBMASM_CMD_MAX_BUFFER_SIZE)
return 0;
+ if (count < sizeof(struct dot_command_header))
+ return -EINVAL;
if (*offset != 0)
return 0;
@@ -319,6 +321,11 @@ static ssize_t command_file_write(struct file *file, const char __user *ubuff, s
return -EFAULT;
}
+ if (count < get_dot_command_size(cmd->buffer)) {
+ command_put(cmd);
+ return -EINVAL;
+ }
+
spin_lock_irqsave(&command_data->sp->lock, flags);
if (command_data->command) {
spin_unlock_irqrestore(&command_data->sp->lock, flags);
diff --git a/drivers/misc/ibmasm/lowlevel.c b/drivers/misc/ibmasm/lowlevel.c
index 6922dc6c10db..5313230f36ad 100644
--- a/drivers/misc/ibmasm/lowlevel.c
+++ b/drivers/misc/ibmasm/lowlevel.c
@@ -19,17 +19,21 @@ static struct i2o_header header = I2O_HEADER_TEMPLATE;
int ibmasm_send_i2o_message(struct service_processor *sp)
{
u32 mfa;
- unsigned int command_size;
+ size_t command_size;
struct i2o_message *message;
struct command *command = sp->current_command;
+ command_size = get_dot_command_size(command->buffer);
+ if (command_size > command->buffer_size)
+ return 1;
+ if (command_size > I2O_COMMAND_SIZE)
+ command_size = I2O_COMMAND_SIZE;
+
mfa = get_mfa_inbound(sp->base_address);
if (!mfa)
return 1;
- command_size = get_dot_command_size(command->buffer);
- header.message_size = outgoing_message_size(command_size);
-
+ header.message_size = outgoing_message_size((unsigned int)command_size);
message = get_i2o_message(sp->base_address, mfa);
memcpy_toio(&message->header, &header, sizeof(struct i2o_header));
diff --git a/drivers/misc/ibmasm/remote.c b/drivers/misc/ibmasm/remote.c
index ec816d3b38cb..521531738c9a 100644
--- a/drivers/misc/ibmasm/remote.c
+++ b/drivers/misc/ibmasm/remote.c
@@ -177,6 +177,11 @@ void ibmasm_handle_mouse_interrupt(struct service_processor *sp)
writer = get_queue_writer(sp);
while (reader != writer) {
+ if (reader >= REMOTE_QUEUE_SIZE || writer >= REMOTE_QUEUE_SIZE) {
+ set_queue_reader(sp, 0);
+ break;
+ }
+
memcpy_fromio(&input, get_queue_entry(sp, reader),
sizeof(struct remote_input));
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 08b8276e1da9..d91b3a9cf376 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1440,6 +1440,9 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
rq_data_dir(req) == WRITE &&
(md->flags & MMC_BLK_REL_WR);
+ if (mqrq->flags & MQRQ_XFER_SINGLE_BLOCK)
+ recovery_mode = 1;
+
memset(brq, 0, sizeof(struct mmc_blk_request));
mmc_crypto_prepare_req(mqrq);
@@ -1579,10 +1582,13 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
err = 0;
if (err) {
- if (mqrq->retries++ < MMC_CQE_RETRIES)
+ if (mqrq->retries++ < MMC_CQE_RETRIES) {
+ if (rq_data_dir(req) == WRITE)
+ mqrq->flags |= MQRQ_XFER_SINGLE_BLOCK;
blk_mq_requeue_request(req, true);
- else
+ } else {
blk_mq_end_request(req, BLK_STS_IOERR);
+ }
} else if (mrq->data) {
if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered))
blk_mq_requeue_request(req, true);
@@ -2120,6 +2126,8 @@ static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
} else if (!blk_rq_bytes(req)) {
__blk_mq_end_request(req, BLK_STS_IOERR);
} else if (mqrq->retries++ < MMC_MAX_RETRIES) {
+ if (rq_data_dir(req) == WRITE)
+ mqrq->flags |= MQRQ_XFER_SINGLE_BLOCK;
blk_mq_requeue_request(req, true);
} else {
if (mmc_card_removed(mq->card))
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 1498840a4ea0..c254e6580afd 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -61,6 +61,8 @@ enum mmc_drv_op {
MMC_DRV_OP_GET_EXT_CSD,
};
+#define MQRQ_XFER_SINGLE_BLOCK BIT(0)
+
struct mmc_queue_req {
struct mmc_blk_request brq;
struct scatterlist *sg;
@@ -69,6 +71,7 @@ struct mmc_queue_req {
void *drv_op_data;
unsigned int ioc_count;
int retries;
+ u32 flags;
};
struct mmc_queue {
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 9852f95e0c26..da42f27fc178 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -649,12 +649,15 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
extra &= ~BIT(0);
sdhci_writel(host, extra, reg);
+ /* Disable clock while config DLL */
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
if (clock <= 52000000) {
if (host->mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
dev_err(mmc_dev(host->mmc),
"Can't reduce the clock below 52MHz in HS200/HS400 mode");
- return;
+ goto enable_clk;
}
/*
@@ -674,7 +677,7 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
DLL_STRBIN_DELAY_NUM_SEL |
DLL_STRBIN_DELAY_NUM_DEFAULT << DLL_STRBIN_DELAY_NUM_OFFSET;
sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
- return;
+ goto enable_clk;
}
/* Reset DLL */
@@ -701,7 +704,7 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
500 * USEC_PER_MSEC);
if (err) {
dev_err(mmc_dev(host->mmc), "DLL lock timeout!\n");
- return;
+ goto enable_clk;
}
extra = 0x1 << 16 | /* tune clock stop en */
@@ -734,6 +737,16 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
DLL_STRBIN_TAPNUM_DEFAULT |
DLL_STRBIN_TAPNUM_FROM_SW;
sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
+
+enable_clk:
+ /*
+ * The sdclk frequency select bits in SDHCI_CLOCK_CONTROL are not functional
+ * on Rockchip's SDHCI implementation. Instead, the clock frequency is fully
+ * controlled via external clk provider by calling clk_set_rate(). Consequently,
+ * passing 0 to sdhci_enable_clk() only re-enables the already-configured clock,
+ * which matches the hardware's actual behavior.
+ */
+ sdhci_enable_clk(host, 0);
}
static void rk35xx_sdhci_reset(struct sdhci_host *host, u8 mask)
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index a2b643af7019..e37fb1155647 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -2049,7 +2049,6 @@ static int __init docg3_probe(struct platform_device *pdev)
static void docg3_release(struct platform_device *pdev)
{
struct docg3_cascade *cascade = platform_get_drvdata(pdev);
- struct docg3 *docg3 = cascade->floors[0]->priv;
int floor;
doc_unregister_sysfs(pdev, cascade);
@@ -2057,7 +2056,7 @@ static void docg3_release(struct platform_device *pdev)
if (cascade->floors[floor])
doc_release_device(cascade->floors[floor]);
- bch_free(docg3->cascade->bch);
+ bch_free(cascade->bch);
}
#ifdef CONFIG_OF
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
index 175211fe6a5e..db02c14ba16f 100644
--- a/drivers/mtd/spi-nor/sst.c
+++ b/drivers/mtd/spi-nor/sst.c
@@ -203,6 +203,8 @@ static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Start write from odd address. */
if (to % 2) {
+ bool needs_write_enable = (len > 1);
+
/* write one byte. */
ret = sst_nor_write_data(nor, to, 1, buf);
if (ret < 0)
@@ -210,6 +212,17 @@ static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
to++;
actual++;
+
+ /*
+ * Byte program clears the write enable latch. If more
+ * data needs to be written using the AAI sequence,
+ * re-enable writes.
+ */
+ if (needs_write_enable) {
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto out;
+ }
}
/* Write out most of the data here. */
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 5035cfa74f1a..20043f1094df 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5322,18 +5322,22 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave = NULL;
- struct list_head *iter;
+ struct bond_up_slave *slaves;
bool xmit_suc = false;
bool skb_used = false;
+ int slaves_count, i;
- bond_for_each_slave_rcu(bond, slave, iter) {
+ slaves = rcu_dereference(bond->all_slaves);
+
+ slaves_count = slaves ? READ_ONCE(slaves->count) : 0;
+ for (i = 0; i < slaves_count; i++) {
+ struct slave *slave = slaves->arr[i];
struct sk_buff *skb2;
if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP))
continue;
- if (bond_is_last_slave(bond, slave)) {
+ if (i + 1 == slaves_count) {
skb2 = skb;
skb_used = true;
} else {
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 6c90b4a7d955..c3ebb648d8b0 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -1399,7 +1399,7 @@ static int ucan_probe(struct usb_interface *intf,
*/
/* Prepare Memory for control transfers */
- ctl_msg_buffer = devm_kzalloc(&udev->dev,
+ ctl_msg_buffer = devm_kzalloc(&intf->dev,
sizeof(union ucan_ctl_payload),
GFP_KERNEL);
if (!ctl_msg_buffer) {
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 31f75b4a67fd..b795a3a60571 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -408,10 +408,8 @@ struct ks8851_net {
struct gpio_desc *gpio;
struct mii_bus *mii_bus;
- void (*lock)(struct ks8851_net *ks,
- unsigned long *flags);
- void (*unlock)(struct ks8851_net *ks,
- unsigned long *flags);
+ void (*lock)(struct ks8851_net *ks);
+ void (*unlock)(struct ks8851_net *ks);
unsigned int (*rdreg16)(struct ks8851_net *ks,
unsigned int reg);
void (*wrreg16)(struct ks8851_net *ks,
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 7fa1820db9cc..b1e9d1495c01 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -28,25 +28,23 @@
/**
* ks8851_lock - register access lock
* @ks: The chip state
- * @flags: Spinlock flags
*
* Claim chip register access lock
*/
-static void ks8851_lock(struct ks8851_net *ks, unsigned long *flags)
+static void ks8851_lock(struct ks8851_net *ks)
{
- ks->lock(ks, flags);
+ ks->lock(ks);
}
/**
* ks8851_unlock - register access unlock
* @ks: The chip state
- * @flags: Spinlock flags
*
* Release chip register access lock
*/
-static void ks8851_unlock(struct ks8851_net *ks, unsigned long *flags)
+static void ks8851_unlock(struct ks8851_net *ks)
{
- ks->unlock(ks, flags);
+ ks->unlock(ks);
}
/**
@@ -129,11 +127,10 @@ static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
static int ks8851_write_mac_addr(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
- unsigned long flags;
u16 val;
int i;
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
/*
* Wake up chip in case it was powered off when stopped; otherwise,
@@ -149,7 +146,7 @@ static int ks8851_write_mac_addr(struct net_device *dev)
if (!netif_running(dev))
ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
return 0;
}
@@ -163,12 +160,11 @@ static int ks8851_write_mac_addr(struct net_device *dev)
static void ks8851_read_mac_addr(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
- unsigned long flags;
u8 addr[ETH_ALEN];
u16 reg;
int i;
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
for (i = 0; i < ETH_ALEN; i += 2) {
reg = ks8851_rdreg16(ks, KS_MAR(i));
@@ -177,7 +173,7 @@ static void ks8851_read_mac_addr(struct net_device *dev)
}
eth_hw_addr_set(dev, addr);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
}
/**
@@ -328,11 +324,10 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
{
struct ks8851_net *ks = _ks;
struct sk_buff_head rxq;
- unsigned long flags;
unsigned int status;
struct sk_buff *skb;
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
status = ks8851_rdreg16(ks, KS_ISR);
ks8851_wrreg16(ks, KS_ISR, status);
@@ -389,14 +384,17 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
ks8851_wrreg16(ks, KS_RXCR1, rxc->rxcr1);
}
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
if (status & IRQ_LCI)
mii_check_link(&ks->mii);
- if (status & IRQ_RXI)
+ if (status & IRQ_RXI) {
+ local_bh_disable();
while ((skb = __skb_dequeue(&rxq)))
netif_rx(skb);
+ local_bh_enable();
+ }
return IRQ_HANDLED;
}
@@ -421,7 +419,6 @@ static void ks8851_flush_tx_work(struct ks8851_net *ks)
static int ks8851_net_open(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
- unsigned long flags;
int ret;
ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
@@ -434,7 +431,7 @@ static int ks8851_net_open(struct net_device *dev)
/* lock the card, even if we may not actually be doing anything
* else at the moment */
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
netif_dbg(ks, ifup, ks->netdev, "opening\n");
@@ -487,7 +484,7 @@ static int ks8851_net_open(struct net_device *dev)
netif_dbg(ks, ifup, ks->netdev, "network device up\n");
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
mii_check_link(&ks->mii);
return 0;
}
@@ -503,23 +500,22 @@ static int ks8851_net_open(struct net_device *dev)
static int ks8851_net_stop(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
- unsigned long flags;
netif_info(ks, ifdown, dev, "shutting down\n");
netif_stop_queue(dev);
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
/* turn off the IRQs and ack any outstanding */
ks8851_wrreg16(ks, KS_IER, 0x0000);
ks8851_wrreg16(ks, KS_ISR, 0xffff);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
/* stop any outstanding work */
ks8851_flush_tx_work(ks);
flush_work(&ks->rxctrl_work);
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
/* shutdown RX process */
ks8851_wrreg16(ks, KS_RXCR1, 0x0000);
@@ -528,7 +524,7 @@ static int ks8851_net_stop(struct net_device *dev)
/* set powermode to soft power down to save power */
ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
/* ensure any queued tx buffers are dumped */
while (!skb_queue_empty(&ks->txq)) {
@@ -582,14 +578,13 @@ static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb,
static void ks8851_rxctrl_work(struct work_struct *work)
{
struct ks8851_net *ks = container_of(work, struct ks8851_net, rxctrl_work);
- unsigned long flags;
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
/* need to shutdown RXQ before modifying filter parameters */
ks8851_wrreg16(ks, KS_RXCR1, 0x00);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
}
static void ks8851_set_rx_mode(struct net_device *dev)
@@ -796,7 +791,6 @@ static int ks8851_set_eeprom(struct net_device *dev,
{
struct ks8851_net *ks = netdev_priv(dev);
int offset = ee->offset;
- unsigned long flags;
int len = ee->len;
u16 tmp;
@@ -810,7 +804,7 @@ static int ks8851_set_eeprom(struct net_device *dev,
if (!(ks->rc_ccr & CCR_EEPROM))
return -ENOENT;
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
ks8851_eeprom_claim(ks);
@@ -833,7 +827,7 @@ static int ks8851_set_eeprom(struct net_device *dev,
eeprom_93cx6_wren(&ks->eeprom, false);
ks8851_eeprom_release(ks);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
return 0;
}
@@ -843,7 +837,6 @@ static int ks8851_get_eeprom(struct net_device *dev,
{
struct ks8851_net *ks = netdev_priv(dev);
int offset = ee->offset;
- unsigned long flags;
int len = ee->len;
/* must be 2 byte aligned */
@@ -853,7 +846,7 @@ static int ks8851_get_eeprom(struct net_device *dev,
if (!(ks->rc_ccr & CCR_EEPROM))
return -ENOENT;
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
ks8851_eeprom_claim(ks);
@@ -861,7 +854,7 @@ static int ks8851_get_eeprom(struct net_device *dev,
eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2);
ks8851_eeprom_release(ks);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
return 0;
}
@@ -920,7 +913,6 @@ static int ks8851_phy_reg(int reg)
static int ks8851_phy_read_common(struct net_device *dev, int phy_addr, int reg)
{
struct ks8851_net *ks = netdev_priv(dev);
- unsigned long flags;
int result;
int ksreg;
@@ -928,9 +920,9 @@ static int ks8851_phy_read_common(struct net_device *dev, int phy_addr, int reg)
if (ksreg < 0)
return ksreg;
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
result = ks8851_rdreg16(ks, ksreg);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
return result;
}
@@ -965,14 +957,13 @@ static void ks8851_phy_write(struct net_device *dev,
int phy, int reg, int value)
{
struct ks8851_net *ks = netdev_priv(dev);
- unsigned long flags;
int ksreg;
ksreg = ks8851_phy_reg(reg);
if (ksreg >= 0) {
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
ks8851_wrreg16(ks, ksreg, value);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
}
}
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 381b9cd285eb..7c89ed64b233 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -55,29 +55,27 @@ struct ks8851_net_par {
/**
* ks8851_lock_par - register access lock
* @ks: The chip state
- * @flags: Spinlock flags
*
* Claim chip register access lock
*/
-static void ks8851_lock_par(struct ks8851_net *ks, unsigned long *flags)
+static void ks8851_lock_par(struct ks8851_net *ks)
{
struct ks8851_net_par *ksp = to_ks8851_par(ks);
- spin_lock_irqsave(&ksp->lock, *flags);
+ spin_lock_bh(&ksp->lock);
}
/**
* ks8851_unlock_par - register access unlock
* @ks: The chip state
- * @flags: Spinlock flags
*
* Release chip register access lock
*/
-static void ks8851_unlock_par(struct ks8851_net *ks, unsigned long *flags)
+static void ks8851_unlock_par(struct ks8851_net *ks)
{
struct ks8851_net_par *ksp = to_ks8851_par(ks);
- spin_unlock_irqrestore(&ksp->lock, *flags);
+ spin_unlock_bh(&ksp->lock);
}
/**
@@ -233,7 +231,6 @@ static netdev_tx_t ks8851_start_xmit_par(struct sk_buff *skb,
{
struct ks8851_net *ks = netdev_priv(dev);
netdev_tx_t ret = NETDEV_TX_OK;
- unsigned long flags;
unsigned int txqcr;
u16 txmir;
int err;
@@ -241,7 +238,7 @@ static netdev_tx_t ks8851_start_xmit_par(struct sk_buff *skb,
netif_dbg(ks, tx_queued, ks->netdev,
"%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
- ks8851_lock_par(ks, &flags);
+ ks8851_lock_par(ks);
txmir = ks8851_rdreg16_par(ks, KS_TXMIR) & 0x1fff;
@@ -262,7 +259,7 @@ static netdev_tx_t ks8851_start_xmit_par(struct sk_buff *skb,
ret = NETDEV_TX_BUSY;
}
- ks8851_unlock_par(ks, &flags);
+ ks8851_unlock_par(ks);
return ret;
}
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 3062cc0f9199..a7a198507374 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -73,11 +73,10 @@ struct ks8851_net_spi {
/**
* ks8851_lock_spi - register access lock
* @ks: The chip state
- * @flags: Spinlock flags
*
* Claim chip register access lock
*/
-static void ks8851_lock_spi(struct ks8851_net *ks, unsigned long *flags)
+static void ks8851_lock_spi(struct ks8851_net *ks)
{
struct ks8851_net_spi *kss = to_ks8851_spi(ks);
@@ -87,11 +86,10 @@ static void ks8851_lock_spi(struct ks8851_net *ks, unsigned long *flags)
/**
* ks8851_unlock_spi - register access unlock
* @ks: The chip state
- * @flags: Spinlock flags
*
* Release chip register access lock
*/
-static void ks8851_unlock_spi(struct ks8851_net *ks, unsigned long *flags)
+static void ks8851_unlock_spi(struct ks8851_net *ks)
{
struct ks8851_net_spi *kss = to_ks8851_spi(ks);
@@ -311,7 +309,6 @@ static void ks8851_tx_work(struct work_struct *work)
struct ks8851_net_spi *kss;
unsigned short tx_space;
struct ks8851_net *ks;
- unsigned long flags;
struct sk_buff *txb;
bool last;
@@ -319,7 +316,7 @@ static void ks8851_tx_work(struct work_struct *work)
ks = &kss->ks8851;
last = skb_queue_empty(&ks->txq);
- ks8851_lock_spi(ks, &flags);
+ ks8851_lock_spi(ks);
while (!last) {
txb = skb_dequeue(&ks->txq);
@@ -345,7 +342,7 @@ static void ks8851_tx_work(struct work_struct *work)
ks->tx_space = tx_space;
spin_unlock_bh(&ks->statelock);
- ks8851_unlock_spi(ks, &flags);
+ ks8851_unlock_spi(ks);
}
/**
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 969dd4430356..e527139936de 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -2392,6 +2392,13 @@ static void mana_rss_table_init(struct mana_port_context *apc)
ethtool_rxfh_indir_default(i, apc->num_queues);
}
+int mana_disable_vport_rx(struct mana_port_context *apc)
+{
+ return mana_cfg_vport_steering(apc, TRI_STATE_FALSE, false, false,
+ false);
+}
+EXPORT_SYMBOL_NS(mana_disable_vport_rx, NET_MANA);
+
int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
bool update_hash, bool update_tab)
{
@@ -2676,12 +2683,14 @@ static int mana_dealloc_queues(struct net_device *ndev)
*/
apc->rss_state = TRI_STATE_FALSE;
- err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
+ err = mana_disable_vport_rx(apc);
if (err) {
netdev_err(ndev, "Failed to disable vPort: %d\n", err);
return err;
}
+ mana_fence_rqs(apc);
+
mana_destroy_vport(apc);
return 0;
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 9ede260b85dc..32fc1bec2728 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -668,7 +668,8 @@ static int txgbe_probe(struct pci_dev *pdev,
"0x%08x", etrack_id);
}
- if (etrack_id < 0x20010)
+ if (wx->mac.type == wx_mac_sp &&
+ ((etrack_id & 0xfffff) < 0x20010))
dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n");
txgbe = devm_kzalloc(&pdev->dev, sizeof(*txgbe), GFP_KERNEL);
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index de20928f7402..2f20f9ed3a0d 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -300,6 +300,8 @@ static void trim_newline(char *s, size_t maxlen)
size_t len;
len = strnlen(s, maxlen);
+ if (!len)
+ return;
if (s[len - 1] == '\n')
s[len - 1] = '\0';
}
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index d80b80ba20a1..1d6216a96d7f 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -705,8 +705,8 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
return -EINVAL;
if (bus->parent && bus->parent->of_node)
- bus->parent->of_node->fwnode.flags |=
- FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD;
+ fwnode_set_flag(&bus->parent->of_node->fwnode,
+ FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD);
WARN(bus->state != MDIOBUS_ALLOCATED &&
bus->state != MDIOBUS_UNREGISTERED,
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 8b61e45cd667..4caf54954894 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -390,7 +390,7 @@ static void mwifiex_invalidate_lists(struct mwifiex_adapter *adapter)
static void
mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
{
- del_timer(&adapter->wakeup_timer);
+ del_timer_sync(&adapter->wakeup_timer);
cancel_delayed_work_sync(&adapter->devdump_work);
mwifiex_cancel_all_pending_cmd(adapter);
wake_up_interruptible(&adapter->cmd_wait_q.wait);
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_regs.h b/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
index 458cfd0260b1..b0c6dfa55cc6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
@@ -390,6 +390,10 @@
#define MT_CBTOP_RGU_WF_SUBSYS_RST MT_CBTOP_RGU(0x600)
#define MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH BIT(0)
+#define MT7925_CBTOP_RGU_WF_SUBSYS_RST 0x70028600
+#define MT7925_WFSYS_INIT_DONE_ADDR 0x184c1604
+#define MT7925_WFSYS_INIT_DONE 0x00001d1e
+
#define MT_HW_BOUND 0x70010020
#define MT_HW_CHIPID 0x70010200
#define MT_HW_REV 0x70010204
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
index 76272a03b22e..98d1d14342cd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
@@ -206,6 +206,33 @@ static void mt792xu_epctl_rst_opt(struct mt792x_dev *dev, bool reset)
mt792xu_uhw_wr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT, val);
}
+struct mt792xu_wfsys_desc {
+ u32 rst_reg;
+ u32 done_reg;
+ u32 done_mask;
+ u32 done_val;
+ u32 delay_ms;
+ bool need_status_sel;
+};
+
+static const struct mt792xu_wfsys_desc mt7921_wfsys_desc = {
+ .rst_reg = MT_CBTOP_RGU_WF_SUBSYS_RST,
+ .done_reg = MT_UDMA_CONN_INFRA_STATUS,
+ .done_mask = MT_UDMA_CONN_WFSYS_INIT_DONE,
+ .done_val = MT_UDMA_CONN_WFSYS_INIT_DONE,
+ .delay_ms = 0,
+ .need_status_sel = true,
+};
+
+static const struct mt792xu_wfsys_desc mt7925_wfsys_desc = {
+ .rst_reg = MT7925_CBTOP_RGU_WF_SUBSYS_RST,
+ .done_reg = MT7925_WFSYS_INIT_DONE_ADDR,
+ .done_mask = U32_MAX,
+ .done_val = MT7925_WFSYS_INIT_DONE,
+ .delay_ms = 20,
+ .need_status_sel = false,
+};
+
int mt792xu_dma_init(struct mt792x_dev *dev, bool resume)
{
int err;
@@ -236,25 +263,33 @@ EXPORT_SYMBOL_GPL(mt792xu_dma_init);
int mt792xu_wfsys_reset(struct mt792x_dev *dev)
{
+ const struct mt792xu_wfsys_desc *desc = is_mt7925(&dev->mt76) ?
+ &mt7925_wfsys_desc :
+ &mt7921_wfsys_desc;
u32 val;
int i;
mt792xu_epctl_rst_opt(dev, false);
- val = mt792xu_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST);
+ val = mt792xu_uhw_rr(&dev->mt76, desc->rst_reg);
val |= MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH;
- mt792xu_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val);
+ mt792xu_uhw_wr(&dev->mt76, desc->rst_reg, val);
- usleep_range(10, 20);
+ if (desc->delay_ms)
+ msleep(desc->delay_ms);
+ else
+ usleep_range(10, 20);
- val = mt792xu_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST);
+ val = mt792xu_uhw_rr(&dev->mt76, desc->rst_reg);
val &= ~MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH;
- mt792xu_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val);
+ mt792xu_uhw_wr(&dev->mt76, desc->rst_reg, val);
+
+ if (desc->need_status_sel)
+ mt792xu_uhw_wr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS_SEL, 0);
- mt792xu_uhw_wr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS_SEL, 0);
for (i = 0; i < MT792x_WFSYS_INIT_RETRY_COUNT; i++) {
- val = mt792xu_uhw_rr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS);
- if (val & MT_UDMA_CONN_WFSYS_INIT_DONE)
+ val = mt792xu_uhw_rr(&dev->mt76, desc->done_reg);
+ if ((val & desc->done_mask) == desc->done_val)
break;
msleep(100);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
index 81350c4dee53..e2abcc85c434 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
@@ -4799,20 +4799,6 @@ static const struct ieee80211_rate rtl8xxxu_legacy_ratetable[] = {
{.bitrate = 540, .hw_value = 0x0b,},
};
-static void rtl8xxxu_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss)
-{
- if (rate <= DESC_RATE_54M)
- return;
-
- if (rate >= DESC_RATE_MCS0 && rate <= DESC_RATE_MCS15) {
- if (rate < DESC_RATE_MCS8)
- *nss = 1;
- else
- *nss = 2;
- *mcs = rate - DESC_RATE_MCS0;
- }
-}
-
static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
{
struct ieee80211_hw *hw = priv->hw;
@@ -4922,23 +4908,25 @@ static void rtl8xxxu_set_aifs(struct rtl8xxxu_priv *priv, u8 slot_time)
void rtl8xxxu_update_ra_report(struct rtl8xxxu_ra_report *rarpt,
u8 rate, u8 sgi, u8 bw)
{
- u8 mcs, nss;
-
rarpt->txrate.flags = 0;
if (rate <= DESC_RATE_54M) {
rarpt->txrate.legacy = rtl8xxxu_legacy_ratetable[rate].bitrate;
- } else {
- rtl8xxxu_desc_to_mcsrate(rate, &mcs, &nss);
+ } else if (rate >= DESC_RATE_MCS0 && rate <= DESC_RATE_MCS15) {
rarpt->txrate.flags |= RATE_INFO_FLAGS_MCS;
+ if (rate < DESC_RATE_MCS8)
+ rarpt->txrate.nss = 1;
+ else
+ rarpt->txrate.nss = 2;
- rarpt->txrate.mcs = mcs;
- rarpt->txrate.nss = nss;
+ rarpt->txrate.mcs = rate - DESC_RATE_MCS0;
if (sgi)
rarpt->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
rarpt->txrate.bw = bw;
+ } else {
+ return;
}
rarpt->bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index fab9bb9257dd..7e33518ce311 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -1767,7 +1767,8 @@ int rtw_pci_probe(struct pci_dev *pdev,
}
/* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */
- if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C && bridge->vendor == PCI_VENDOR_ID_INTEL)
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C &&
+ bridge && bridge->vendor == PCI_VENDOR_ID_INTEL)
rtwpci->rx_no_aspm = true;
rtw_pci_phy_cfg(rtwdev);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index de4b9e9db45d..122de4e35d45 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3169,7 +3169,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
ctrl->dmrl = id->dmrl;
ctrl->dmrsl = le32_to_cpu(id->dmrsl);
- if (id->wzsl)
+ if (id->wzsl && !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
free_data:
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c04858da28ea..8eb1e4d48c43 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3716,6 +3716,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x502F), /* KINGSTON OM3SGP4xxxxK NVMe SSD */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1f40, 0x1202), /* Netac Technologies Co. NV3000 NVMe SSD */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 576f119c2832..83f4065d009e 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1793,7 +1793,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
if (name)
of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
if (of_stdout)
- of_stdout->fwnode.flags |= FWNODE_FLAG_BEST_EFFORT;
+ fwnode_set_flag(&of_stdout->fwnode, FWNODE_FLAG_BEST_EFFORT);
}
if (!of_aliases)
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 492f0354a792..f5f624fc327f 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -225,7 +225,7 @@ static void __of_attach_node(struct device_node *np)
np->sibling = np->parent->child;
np->parent->child = np;
of_node_clear_flag(np, OF_DETACHED);
- np->fwnode.flags |= FWNODE_FLAG_NOT_DEVICE;
+ fwnode_set_flag(&np->fwnode, FWNODE_FLAG_NOT_DEVICE);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 11124e965f65..7fe456dddce3 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -744,7 +744,7 @@ static int of_platform_notify(struct notifier_block *nb,
* Clear the flag before adding the device so that fw_devlink
* doesn't skip adding consumers to this device.
*/
- rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
+ fwnode_clear_flag(&rd->dn->fwnode, FWNODE_FLAG_NOT_DEVICE);
/* pdev_parent may be NULL when no bus platform device */
pdev_parent = of_find_device_by_node(parent);
pdev = of_platform_device_create(rd->dn, NULL,
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index ba223736237e..96d2e7b63db3 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -887,8 +887,6 @@ static void __init of_unittest_changeset(void)
unittest(!of_changeset_apply(&chgset), "apply failed\n");
- of_node_put(nchangeset);
-
/* Make sure node names are constructed correctly */
unittest((np = of_find_node_by_path("/testcase-data/changeset/n2/n21")),
"'%pOF' not added\n", n21);
@@ -910,6 +908,7 @@ static void __init of_unittest_changeset(void)
if (!ret)
unittest(strcmp(propstr, "hello") == 0, "original value not in updated property after revert");
+ of_node_put(nchangeset);
of_changeset_destroy(&chgset);
of_node_put(n1);
@@ -4096,7 +4095,6 @@ static int testdrv_probe(struct pci_dev *pdev, const struct pci_device_id *id)
size = info->dtbo_end - info->dtbo_begin;
ret = of_overlay_fdt_apply(info->dtbo_begin, size, &ovcs_id, dn);
- of_node_put(dn);
if (ret)
return ret;
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index 6643a88c7a0c..2f077d0b7957 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -367,6 +367,8 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl,
dev_err(dev, "DMA transfer timeout\n");
dmaengine_terminate_sync(chan);
ret = -ETIMEDOUT;
+ } else {
+ ret = 0;
}
err_unmap:
@@ -438,6 +440,8 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
dev_err(dev, "DMA transfer timeout\n");
dmaengine_terminate_sync(chan);
ret = -ETIMEDOUT;
+ } else {
+ ret = 0;
}
err_unmap:
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index e01a98e74d21..7702ebb81d99 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -1494,47 +1494,6 @@ static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
return ret;
}
-/**
- * epf_ntb_epc_destroy_interface() - Cleanup NTB EPC interface
- * @ntb: NTB device that facilitates communication between HOST1 and HOST2
- * @type: PRIMARY interface or SECONDARY interface
- *
- * Unbind NTB function device from EPC and relinquish reference to pci_epc
- * for each of the interface.
- */
-static void epf_ntb_epc_destroy_interface(struct epf_ntb *ntb,
- enum pci_epc_interface_type type)
-{
- struct epf_ntb_epc *ntb_epc;
- struct pci_epc *epc;
- struct pci_epf *epf;
-
- if (type < 0)
- return;
-
- epf = ntb->epf;
- ntb_epc = ntb->epc[type];
- if (!ntb_epc)
- return;
- epc = ntb_epc->epc;
- pci_epc_remove_epf(epc, epf, type);
- pci_epc_put(epc);
-}
-
-/**
- * epf_ntb_epc_destroy() - Cleanup NTB EPC interface
- * @ntb: NTB device that facilitates communication between HOST1 and HOST2
- *
- * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
- */
-static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
-{
- enum pci_epc_interface_type type;
-
- for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++)
- epf_ntb_epc_destroy_interface(ntb, type);
-}
-
/**
* epf_ntb_epc_create_interface() - Create and initialize NTB EPC interface
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
@@ -1614,15 +1573,8 @@ static int epf_ntb_epc_create(struct epf_ntb *ntb)
ret = epf_ntb_epc_create_interface(ntb, epf->sec_epc,
SECONDARY_INTERFACE);
- if (ret) {
+ if (ret)
dev_err(dev, "SECONDARY intf: Fail to create NTB EPC\n");
- goto err_epc_create;
- }
-
- return 0;
-
-err_epc_create:
- epf_ntb_epc_destroy_interface(ntb, PRIMARY_INTERFACE);
return ret;
}
@@ -1887,7 +1839,7 @@ static int epf_ntb_bind(struct pci_epf *epf)
ret = epf_ntb_init_epc_bar(ntb);
if (ret) {
dev_err(dev, "Failed to create NTB EPC\n");
- goto err_bar_init;
+ return ret;
}
ret = epf_ntb_config_spad_bar_alloc_interface(ntb);
@@ -1909,9 +1861,6 @@ static int epf_ntb_bind(struct pci_epf *epf)
err_bar_alloc:
epf_ntb_config_spad_bar_free(ntb);
-err_bar_init:
- epf_ntb_epc_destroy(ntb);
-
return ret;
}
@@ -1927,7 +1876,6 @@ static void epf_ntb_unbind(struct pci_epf *epf)
epf_ntb_epc_cleanup(ntb);
epf_ntb_config_spad_bar_free(ntb);
- epf_ntb_epc_destroy(ntb);
}
#define EPF_NTB_R(_name) \
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index ac05942e4e6a..ca52c2c82b2c 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -10,6 +10,7 @@
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/module.h>
+#include <linux/devm-helpers.h>
#include <linux/device.h>
#include <linux/regmap.h>
#include <linux/workqueue.h>
@@ -821,14 +822,6 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info)
return 0;
}
-static void axp288_charger_cancel_work(void *data)
-{
- struct axp288_chrg_info *info = data;
-
- cancel_work_sync(&info->otg.work);
- cancel_work_sync(&info->cable.work);
-}
-
static int axp288_charger_probe(struct platform_device *pdev)
{
int ret, i, pirq;
@@ -911,12 +904,12 @@ static int axp288_charger_probe(struct platform_device *pdev)
}
/* Cancel our work on cleanup, register this before the notifiers */
- ret = devm_add_action(dev, axp288_charger_cancel_work, info);
+ ret = devm_work_autocancel(dev, &info->cable.work,
+ axp288_charger_extcon_evt_worker);
if (ret)
return ret;
/* Register for extcon notification */
- INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker);
info->cable.nb.notifier_call = axp288_charger_handle_cable_evt;
ret = devm_extcon_register_notifier_all(dev, info->cable.edev,
&info->cable.nb);
@@ -926,8 +919,12 @@ static int axp288_charger_probe(struct platform_device *pdev)
}
schedule_work(&info->cable.work);
+ ret = devm_work_autocancel(dev, &info->otg.work,
+ axp288_charger_otg_evt_worker);
+ if (ret)
+ return ret;
+
/* Register for OTG notification */
- INIT_WORK(&info->otg.work, axp288_charger_otg_evt_worker);
info->otg.id_nb.notifier_call = axp288_charger_handle_otg_evt;
if (info->otg.cable) {
ret = devm_extcon_register_notifier(dev, info->otg.cable,
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index 5b399de16d60..80fdb3303400 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -352,7 +352,7 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev)
struct clk *clk;
void __iomem *base;
int ret;
- unsigned int npwm;
+ unsigned int i, npwm;
u32 val;
base = devm_platform_ioremap_resource(pdev, 0);
@@ -382,6 +382,13 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev)
mutex_init(&tpm->lock);
+ /* count the enabled channels */
+ for (i = 0; i < npwm; ++i) {
+ val = readl(base + PWM_IMX_TPM_CnSC(i));
+ if (FIELD_GET(PWM_IMX_TPM_CnSC_ELS, val))
+ ++tpm->enable_count;
+ }
+
ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret)
return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index c165422d0651..6a64e5909f6a 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -232,17 +232,19 @@ static void zynqmp_r5_mb_rx_cb(struct mbox_client *cl, void *msg)
ipi = container_of(cl, struct mbox_info, mbox_cl);
- /* copy data from ipi buffer to r5_core */
+ /* copy data from ipi buffer to r5_core if IPI is buffered. */
ipi_msg = (struct zynqmp_ipi_message *)msg;
- buf_msg = (struct zynqmp_ipi_message *)ipi->rx_mc_buf;
- len = ipi_msg->len;
- if (len > IPI_BUF_LEN_MAX) {
- dev_warn(cl->dev, "msg size exceeded than %d\n",
- IPI_BUF_LEN_MAX);
- len = IPI_BUF_LEN_MAX;
+ if (ipi_msg) {
+ buf_msg = (struct zynqmp_ipi_message *)ipi->rx_mc_buf;
+ len = ipi_msg->len;
+ if (len > IPI_BUF_LEN_MAX) {
+ dev_warn(cl->dev, "msg size exceeded than %d\n",
+ IPI_BUF_LEN_MAX);
+ len = IPI_BUF_LEN_MAX;
+ }
+ buf_msg->len = len;
+ memcpy(buf_msg->data, ipi_msg->data, len);
}
- buf_msg->len = len;
- memcpy(buf_msg->data, ipi_msg->data, len);
/* received and processed interrupt ack */
if (mbox_send_message(ipi->rx_chan, NULL) < 0)
diff --git a/drivers/rtc/rtc-ntxec.c b/drivers/rtc/rtc-ntxec.c
index 850ca49186fd..d28ddb34e19e 100644
--- a/drivers/rtc/rtc-ntxec.c
+++ b/drivers/rtc/rtc-ntxec.c
@@ -110,7 +110,7 @@ static int ntxec_rtc_probe(struct platform_device *pdev)
struct rtc_device *dev;
struct ntxec_rtc *rtc;
- pdev->dev.of_node = pdev->dev.parent->of_node;
+ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3745cf856917..f37f031971df 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3982,6 +3982,7 @@ static int sd_probe(struct device *dev)
error = device_add(&sdkp->disk_dev);
if (error) {
put_device(&sdkp->disk_dev);
+ put_disk(gd);
goto out;
}
diff --git a/drivers/spi/spi-ch341.c b/drivers/spi/spi-ch341.c
index 0db74e95552f..850b428b8f64 100644
--- a/drivers/spi/spi-ch341.c
+++ b/drivers/spi/spi-ch341.c
@@ -173,17 +173,17 @@ static int ch341_probe(struct usb_interface *intf,
ch341->tx_buf =
devm_kzalloc(&udev->dev, CH341_PACKET_LENGTH, GFP_KERNEL);
- if (!ch341->tx_buf)
- return -ENOMEM;
+ if (!ch341->tx_buf) {
+ ret = -ENOMEM;
+ goto err_free_urb;
+ }
usb_fill_bulk_urb(ch341->rx_urb, udev, ch341->read_pipe, ch341->rx_buf,
ch341->rx_len, ch341_recv, ch341);
ret = usb_submit_urb(ch341->rx_urb, GFP_KERNEL);
- if (ret) {
- usb_free_urb(ch341->rx_urb);
- return -ENOMEM;
- }
+ if (ret)
+ goto err_free_urb;
ctrl->bus_num = -1;
ctrl->mode_bits = SPI_CPHA;
@@ -195,21 +195,34 @@ static int ch341_probe(struct usb_interface *intf,
ret = ch341_config_stream(ch341);
if (ret)
- return ret;
+ goto err_kill_urb;
ret = ch341_enable_pins(ch341, true);
if (ret)
- return ret;
+ goto err_kill_urb;
ret = spi_register_controller(ctrl);
if (ret)
- return ret;
+ goto err_disable_pins;
ch341->spidev = spi_new_device(ctrl, &chip);
- if (!ch341->spidev)
- return -ENOMEM;
+ if (!ch341->spidev) {
+ ret = -ENOMEM;
+ goto err_unregister;
+ }
return 0;
+
+err_unregister:
+ spi_unregister_controller(ctrl);
+err_disable_pins:
+ ch341_enable_pins(ch341, false);
+err_kill_urb:
+ usb_kill_urb(ch341->rx_urb);
+err_free_urb:
+ usb_free_urb(ch341->rx_urb);
+
+ return ret;
}
static void ch341_disconnect(struct usb_interface *intf)
@@ -219,6 +232,7 @@ static void ch341_disconnect(struct usb_interface *intf)
spi_unregister_device(ch341->spidev);
spi_unregister_controller(ch341->ctrl);
ch341_enable_pins(ch341, false);
+ usb_kill_urb(ch341->rx_urb);
usb_free_urb(ch341->rx_urb);
}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 94d0f7695d07..6779ebdec94c 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1893,6 +1893,8 @@ static void spi_imx_remove(struct platform_device *pdev)
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
int ret;
+ spi_controller_get(controller);
+
spi_unregister_controller(controller);
ret = pm_runtime_get_sync(spi_imx->dev);
@@ -1906,6 +1908,8 @@ static void spi_imx_remove(struct platform_device *pdev)
pm_runtime_disable(spi_imx->dev);
spi_imx_sdma_exit(spi_imx);
+
+ spi_controller_put(controller);
}
static int spi_imx_runtime_resume(struct device *dev)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 0c3200d08fe4..5a6c2f5672ae 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -42,6 +42,8 @@ EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
#include "internals.h"
+static int __spi_setup(struct spi_device *spi, bool initial_setup);
+
static DEFINE_IDR(spi_master_idr);
static void spidev_release(struct device *dev)
@@ -735,7 +737,7 @@ static int __spi_add_device(struct spi_device *spi)
* normally rely on the device being setup. Devices
* using SPI_CS_HIGH can't coexist well otherwise...
*/
- status = spi_setup(spi);
+ status = __spi_setup(spi, true);
if (status < 0) {
dev_err(dev, "can't setup %s, status %d\n",
dev_name(&spi->dev), status);
@@ -3879,27 +3881,7 @@ static int spi_set_cs_timing(struct spi_device *spi)
return status;
}
-/**
- * spi_setup - setup SPI mode and clock rate
- * @spi: the device whose settings are being modified
- * Context: can sleep, and no requests are queued to the device
- *
- * SPI protocol drivers may need to update the transfer mode if the
- * device doesn't work with its default. They may likewise need
- * to update clock rates or word sizes from initial values. This function
- * changes those settings, and must be called from a context that can sleep.
- * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
- * effect the next time the device is selected and data is transferred to
- * or from it. When this function returns, the SPI device is deselected.
- *
- * Note that this call will fail if the protocol driver specifies an option
- * that the underlying controller or its driver does not support. For
- * example, not all hardware supports wire transfers using nine bit words,
- * LSB-first wire encoding, or active-high chipselects.
- *
- * Return: zero on success, else a negative error code.
- */
-int spi_setup(struct spi_device *spi)
+static int __spi_setup(struct spi_device *spi, bool initial_setup)
{
unsigned bad_bits, ugly_bits;
int status;
@@ -3984,7 +3966,7 @@ int spi_setup(struct spi_device *spi)
status = spi_set_cs_timing(spi);
if (status) {
mutex_unlock(&spi->controller->io_mutex);
- return status;
+ goto err_cleanup;
}
if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
@@ -3993,7 +3975,7 @@ int spi_setup(struct spi_device *spi)
mutex_unlock(&spi->controller->io_mutex);
dev_err(&spi->controller->dev, "Failed to power device: %d\n",
status);
- return status;
+ goto err_cleanup;
}
/*
@@ -4030,6 +4012,37 @@ int spi_setup(struct spi_device *spi)
status);
return status;
+
+err_cleanup:
+ if (initial_setup)
+ spi_cleanup(spi);
+
+ return status;
+}
+
+/**
+ * spi_setup - setup SPI mode and clock rate
+ * @spi: the device whose settings are being modified
+ * Context: can sleep, and no requests are queued to the device
+ *
+ * SPI protocol drivers may need to update the transfer mode if the
+ * device doesn't work with its default. They may likewise need
+ * to update clock rates or word sizes from initial values. This function
+ * changes those settings, and must be called from a context that can sleep.
+ * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
+ * effect the next time the device is selected and data is transferred to
+ * or from it. When this function returns, the SPI device is deselected.
+ *
+ * Note that this call will fail if the protocol driver specifies an option
+ * that the underlying controller or its driver does not support. For
+ * example, not all hardware supports wire transfers using nine bit words,
+ * LSB-first wire encoding, or active-high chipselects.
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int spi_setup(struct spi_device *spi)
+{
+ return __spi_setup(spi, false);
}
EXPORT_SYMBOL_GPL(spi_setup);
@@ -4811,7 +4824,7 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action,
* Clear the flag before adding the device so that fw_devlink
* doesn't skip adding consumers to this device.
*/
- rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
+ fwnode_clear_flag(&rd->dn->fwnode, FWNODE_FLAG_NOT_DEVICE);
spi = of_register_spi_device(ctlr, rd->dn);
put_device(&ctlr->dev);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 8ce1134e15e5..1eaddce11aed 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -917,6 +917,7 @@ static void thermal_release(struct device *dev)
sizeof("thermal_zone") - 1)) {
tz = to_thermal_zone(dev);
thermal_zone_destroy_device_groups(tz);
+ thermal_set_governor(tz, NULL);
mutex_destroy(&tz->lock);
complete(&tz->removal);
} else if (!strncmp(dev_name(dev), "cooling_device",
@@ -1483,8 +1484,10 @@ thermal_zone_device_register_with_trips(const char *type,
/* sys I/F */
/* Add nodes that are always present via .groups */
result = thermal_zone_create_device_groups(tz);
- if (result)
+ if (result) {
+ thermal_set_governor(tz, NULL);
goto remove_id;
+ }
/* A new thermal zone needs to be updated anyway. */
atomic_set(&tz->need_update, 1);
@@ -1630,8 +1633,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
cancel_delayed_work_sync(&tz->poll_queue);
- thermal_set_governor(tz, NULL);
-
thermal_remove_hwmon_sysfs(tz);
ida_free(&thermal_tz_ida, tz->id);
ida_destroy(&tz->ida);
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 5aa16dbfc289..c60390a1d591 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -543,30 +543,31 @@ static irqreturn_t ci_irq_handler(int irq, void *data)
if (ret == IRQ_HANDLED)
return ret;
}
- }
- /*
- * Handle id change interrupt, it indicates device/host function
- * switch.
- */
- if (ci->is_otg && (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS)) {
- ci->id_event = true;
- /* Clear ID change irq status */
- hw_write_otgsc(ci, OTGSC_IDIS, OTGSC_IDIS);
- ci_otg_queue_work(ci);
- return IRQ_HANDLED;
- }
+ /*
+ * Handle id change interrupt, it indicates device/host function
+ * switch.
+ */
+ if ((otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS)) {
+ ci->id_event = true;
+ /* Clear ID change irq status */
+ hw_write_otgsc(ci, OTGSC_IDIS, OTGSC_IDIS);
+ }
- /*
- * Handle vbus change interrupt, it indicates device connection
- * and disconnection events.
- */
- if (ci->is_otg && (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) {
- ci->b_sess_valid_event = true;
- /* Clear BSV irq */
- hw_write_otgsc(ci, OTGSC_BSVIS, OTGSC_BSVIS);
- ci_otg_queue_work(ci);
- return IRQ_HANDLED;
+ /*
+ * Handle vbus change interrupt, it indicates device connection
+ * and disconnection events.
+ */
+ if ((otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) {
+ ci->b_sess_valid_event = true;
+ /* Clear BSV irq */
+ hw_write_otgsc(ci, OTGSC_BSVIS, OTGSC_BSVIS);
+ }
+
+ if (ci->id_event || ci->b_sess_valid_event) {
+ ci_otg_queue_work(ci);
+ return IRQ_HANDLED;
+ }
}
/* Handle device/host interrupt */
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 647e98f4e351..fecc7d7e2f0d 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -130,6 +130,9 @@ enum ci_role ci_otg_role(struct ci_hdrc *ci)
void ci_handle_vbus_change(struct ci_hdrc *ci)
{
+ if (ci->role != CI_ROLE_GADGET)
+ return;
+
if (!ci->is_otg) {
if (ci->platdata->flags & CI_HDRC_FORCE_VBUS_ACTIVE_ALWAYS)
usb_gadget_vbus_connect(&ci->gadget);
@@ -187,8 +190,8 @@ void ci_handle_id_switch(struct ci_hdrc *ci)
ci_role_stop(ci);
- if (role == CI_ROLE_GADGET &&
- IS_ERR(ci->platdata->vbus_extcon.edev))
+ if (role == CI_ROLE_GADGET && !ci->role_switch &&
+ IS_ERR(ci->platdata->vbus_extcon.edev))
/*
* Wait vbus lower than OTGSC_BSV before connecting
* to host. If connecting status is from an external
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 45e786852920..f04ca52bff29 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3080,7 +3080,6 @@ static void xhci_endpoint_disable(struct usb_hcd *hcd,
xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
ep->ep_state);
done:
- host_ep->hcpriv = NULL;
spin_unlock_irqrestore(&xhci->lock, flags);
}
diff --git a/drivers/vfio/cdx/intr.c b/drivers/vfio/cdx/intr.c
index 986fa2a45fa4..a588d6fc478b 100644
--- a/drivers/vfio/cdx/intr.c
+++ b/drivers/vfio/cdx/intr.c
@@ -152,6 +152,8 @@ static int vfio_cdx_set_msi_trigger(struct vfio_cdx_device *vdev,
if (start + count > cdx_dev->num_msi)
return -EINVAL;
+ guard(mutex)(&vdev->cdx_irqs_lock);
+
if (!count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
vfio_cdx_msi_disable(vdev);
return 0;
@@ -175,6 +177,10 @@ static int vfio_cdx_set_msi_trigger(struct vfio_cdx_device *vdev,
return ret;
}
+ /* Ensure MSI is configured before accessing cdx_irqs */
+ if (!vdev->config_msi)
+ return -EINVAL;
+
for (i = start; i < start + count; i++) {
if (!vdev->cdx_irqs[i].trigger)
continue;
@@ -206,12 +212,5 @@ int vfio_cdx_set_irqs_ioctl(struct vfio_cdx_device *vdev,
/* Free All IRQs for the given device */
void vfio_cdx_irqs_cleanup(struct vfio_cdx_device *vdev)
{
- /*
- * Device does not support any interrupt or the interrupts
- * were not configured
- */
- if (!vdev->cdx_irqs)
- return;
-
vfio_cdx_set_msi_trigger(vdev, 0, 0, 0, VFIO_IRQ_SET_DATA_NONE, NULL);
}
diff --git a/drivers/vfio/cdx/main.c b/drivers/vfio/cdx/main.c
index 67465fad5b4b..4cf2e4fb02c8 100644
--- a/drivers/vfio/cdx/main.c
+++ b/drivers/vfio/cdx/main.c
@@ -8,6 +8,23 @@
#include "private.h"
+static int vfio_cdx_init_dev(struct vfio_device *core_vdev)
+{
+ struct vfio_cdx_device *vdev =
+ container_of(core_vdev, struct vfio_cdx_device, vdev);
+
+ mutex_init(&vdev->cdx_irqs_lock);
+ return 0;
+}
+
+static void vfio_cdx_release_dev(struct vfio_device *core_vdev)
+{
+ struct vfio_cdx_device *vdev =
+ container_of(core_vdev, struct vfio_cdx_device, vdev);
+
+ mutex_destroy(&vdev->cdx_irqs_lock);
+}
+
static int vfio_cdx_open_device(struct vfio_device *core_vdev)
{
struct vfio_cdx_device *vdev =
@@ -281,6 +298,8 @@ static int vfio_cdx_mmap(struct vfio_device *core_vdev,
static const struct vfio_device_ops vfio_cdx_ops = {
.name = "vfio-cdx",
+ .init = vfio_cdx_init_dev,
+ .release = vfio_cdx_release_dev,
.open_device = vfio_cdx_open_device,
.close_device = vfio_cdx_close_device,
.ioctl = vfio_cdx_ioctl,
diff --git a/drivers/vfio/cdx/private.h b/drivers/vfio/cdx/private.h
index dc56729b3114..04fc00dc8692 100644
--- a/drivers/vfio/cdx/private.h
+++ b/drivers/vfio/cdx/private.h
@@ -6,6 +6,8 @@
#ifndef VFIO_CDX_PRIVATE_H
#define VFIO_CDX_PRIVATE_H
+#include <linux/mutex.h>
+
#define VFIO_CDX_OFFSET_SHIFT 40
static inline u64 vfio_cdx_index_to_offset(u32 index)
@@ -31,6 +33,7 @@ struct vfio_cdx_region {
struct vfio_cdx_device {
struct vfio_device vdev;
struct vfio_cdx_region *regions;
+ struct mutex cdx_irqs_lock;
struct vfio_cdx_irq *cdx_irqs;
u32 flags;
#define BME_SUPPORT BIT(0)
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index a0850916d353..181ac356ca53 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -769,7 +769,8 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
d_drop(dentry);
err = -ENOENT;
} else {
- d_add(dentry, NULL);
+ if (d_unhashed(dentry))
+ d_add(dentry, NULL);
}
}
}
@@ -840,7 +841,8 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
spin_unlock(&ci->i_ceph_lock);
doutc(cl, " dir %llx.%llx complete, -ENOENT\n",
ceph_vinop(dir));
- d_add(dentry, NULL);
+ if (d_unhashed(dentry))
+ d_add(dentry, NULL);
di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
return NULL;
}
diff --git a/fs/erofs/dir.c b/fs/erofs/dir.c
index c3b90abdee37..19efbff53048 100644
--- a/fs/erofs/dir.c
+++ b/fs/erofs/dir.c
@@ -18,20 +18,18 @@ static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx,
const char *de_name = (char *)dentry_blk + nameoff;
unsigned int de_namelen;
- /* the last dirent in the block? */
- if (de + 1 >= end)
- de_namelen = strnlen(de_name, maxsize - nameoff);
- else
+ /* non-trailing dirent in the directory block? */
+ if (de + 1 < end)
de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
+ else if (maxsize <= nameoff)
+ goto err_bogus;
+ else
+ de_namelen = strnlen(de_name, maxsize - nameoff);
- /* a corrupted entry is found */
- if (nameoff + de_namelen > maxsize ||
- de_namelen > EROFS_NAME_LEN) {
- erofs_err(dir->i_sb, "bogus dirent @ nid %llu",
- EROFS_I(dir)->nid);
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
+ /* a corrupted entry is found (including negative namelen) */
+ if (!in_range32(de_namelen, 1, EROFS_NAME_LEN) ||
+ nameoff + de_namelen > maxsize)
+ goto err_bogus;
if (!dir_emit(ctx, de_name, de_namelen,
le64_to_cpu(de->nid), d_type))
@@ -40,6 +38,10 @@ static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx,
ctx->pos += sizeof(struct erofs_dirent);
}
return 0;
+err_bogus:
+ erofs_err(dir->i_sb, "bogus dirent @ nid %llu", EROFS_I(dir)->nid);
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
}
static int erofs_readdir(struct file *f, struct dir_context *ctx)
@@ -67,7 +69,7 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
}
nameoff = le16_to_cpu(de->nameoff);
- if (nameoff < sizeof(struct erofs_dirent) || nameoff >= bsz) {
+ if (!nameoff || nameoff >= bsz || (nameoff % sizeof(*de))) {
erofs_err(sb, "invalid de[0].nameoff %u @ nid %llu",
nameoff, EROFS_I(dir)->nid);
err = -EFSCORRUPTED;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 177b1f852b63..3fcf8191da42 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1429,9 +1429,17 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
* the test is that same one that e2fsck uses
* NeilBrown 1999oct15
*/
- if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
- /* this inode is deleted */
- ret = -ESTALE;
+ if (inode->i_nlink == 0) {
+ if (inode->i_mode == 0 || ei->i_dtime) {
+ /* this inode is deleted */
+ ret = -ESTALE;
+ } else {
+ ext2_error(sb, __func__,
+ "inode %lu has zero i_nlink with mode 0%o and no dtime, "
+ "filesystem may be corrupt",
+ ino, inode->i_mode);
+ ret = -EFSCORRUPTED;
+ }
goto bad_inode;
}
inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index d62fec12600a..3f3102643852 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -226,7 +226,7 @@ check_xattrs(struct inode *inode, struct buffer_head *bh,
/* Find the end of the names list */
while (!IS_LAST_ENTRY(e)) {
struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
- if ((void *)next >= end) {
+ if ((void *)next + sizeof(u32) > end) {
err_str = "e_name out of bounds";
goto errout;
}
@@ -1165,7 +1165,7 @@ ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent,
{
struct inode *ea_inode;
struct ext4_xattr_entry *entry;
- struct ext4_iloc iloc;
+ struct ext4_iloc iloc = { .bh = NULL };
bool dirty = false;
unsigned int ea_ino;
int err;
@@ -1260,6 +1260,8 @@ ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent,
ext4_warning_inode(parent,
"handle dirty metadata err=%d", err);
}
+
+ brelse(iloc.bh);
}
/*
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 7bd6f9f108ec..598f82386c5f 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -355,6 +355,8 @@ static void f2fs_write_end_io(struct bio *bio)
f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
page_folio(page)->index != nid_of_node(page));
+ if (f2fs_in_warm_node_list(sbi, page))
+ f2fs_del_fsync_node_entry(sbi, page);
dec_page_count(sbi, type);
@@ -366,8 +368,6 @@ static void f2fs_write_end_io(struct bio *bio)
wq_has_sleeper(&sbi->cp_wait))
wake_up(&sbi->cp_wait);
- if (f2fs_in_warm_node_list(sbi, page))
- f2fs_del_fsync_node_entry(sbi, page);
clear_page_private_gcing(page);
end_page_writeback(page);
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index a6b06ac2751d..ab0d9ed02092 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -3782,7 +3782,7 @@ bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
int f2fs_start_discard_thread(struct f2fs_sb_info *sbi);
void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
-bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
+bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi, bool need_check);
void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
struct cp_control *cpc);
void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 819b92fd94e6..a1561b9ead42 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1885,7 +1885,7 @@ void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
*
* Return true if issued all discard cmd or no discard cmd need issue, otherwise return false.
*/
-bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
+bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi, bool need_check)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct discard_policy dpolicy;
@@ -1902,7 +1902,7 @@ bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
/* just to make sure there is no pending discard commands */
__wait_all_discard_cmd(sbi, NULL);
- f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
+ f2fs_bug_on(sbi, need_check && atomic_read(&dcc->discard_cmd_cnt));
return !dropped;
}
@@ -2371,7 +2371,7 @@ static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
* Recovery can cache discard commands, so in error path of
* fill_super(), it needs to give a chance to handle them.
*/
- f2fs_issue_discard_timeout(sbi);
+ f2fs_issue_discard_timeout(sbi, true);
kfree(dcc);
SM_I(sbi)->dcc_info = NULL;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 3001ad8df5d1..f25a259f37f1 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1628,7 +1628,7 @@ static void f2fs_put_super(struct super_block *sb)
}
/* be sure to wait for any on-going discard commands */
- done = f2fs_issue_discard_timeout(sbi);
+ done = f2fs_issue_discard_timeout(sbi, true);
if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && done) {
struct cp_control cpc = {
.reason = CP_UMOUNT | CP_TRIMMED,
@@ -1767,7 +1767,7 @@ static int f2fs_unfreeze(struct super_block *sb)
* will recover after removal of snapshot.
*/
if (test_opt(sbi, DISCARD) && !f2fs_hw_support_discard(sbi))
- f2fs_issue_discard_timeout(sbi);
+ f2fs_issue_discard_timeout(sbi, true);
clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
return 0;
@@ -2535,7 +2535,12 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
need_stop_discard = true;
} else {
f2fs_stop_discard_thread(sbi);
- f2fs_issue_discard_timeout(sbi);
+ /*
+ * f2fs_ioc_fitrim() won't race w/ "remount ro"
+ * so it's safe to check discard_cmd_cnt in
+ * f2fs_issue_discard_timeout().
+ */
+ f2fs_issue_discard_timeout(sbi, *flags & SB_RDONLY);
need_restart_discard = true;
}
}
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index f68fc8c255f0..5a557f6f921a 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -429,6 +429,7 @@ int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
int need_cancel;
int did_revoke = 0; /* akpm: debug */
struct buffer_head *bh = jh2bh(jh);
+ struct address_space *bh_mapping = bh->b_folio->mapping;
jbd2_debug(4, "journal_head %p, cancelling revoke\n", jh);
@@ -466,13 +467,14 @@ int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
* buffer_head? If so, we'd better make sure we clear the
* revoked status on any hashed alias too, otherwise the revoke
* state machine will get very upset later on. */
- if (need_cancel) {
+ if (need_cancel && !sb_is_blkdev_sb(bh_mapping->host->i_sb)) {
struct buffer_head *bh2;
+
bh2 = __find_get_block_nonatomic(bh->b_bdev, bh->b_blocknr,
bh->b_size);
if (bh2) {
- if (bh2 != bh)
- clear_buffer_revoked(bh2);
+ WARN_ON_ONCE(bh2 == bh);
+ clear_buffer_revoked(bh2);
__brelse(bh2);
}
}
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 0794dcaf1e47..26839972f609 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -621,6 +621,7 @@ static int inotify_new_watch(struct fsnotify_group *group,
if (ret) {
/* we failed to get on the inode, get off the idr */
inotify_remove_from_idr(group, tmp_i_mark);
+ dec_inotify_watches(group->inotify_data.ucounts);
goto out_err;
}
diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c
index 66fb690d7751..744c1a317f8e 100644
--- a/fs/ntfs3/run.c
+++ b/fs/ntfs3/run.c
@@ -963,6 +963,9 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
if (size_size > sizeof(len))
return -EINVAL;
+ if (run_buf + size_size > run_last)
+ return -EINVAL;
+
len = run_unpack_s64(run_buf, size_size, 0);
/* Skip size_size. */
run_buf += size_size;
@@ -975,6 +978,9 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
else if (offset_size <= sizeof(s64)) {
s64 dlcn;
+ if (run_buf + offset_size > run_last)
+ return -EINVAL;
+
/* Initial value of dlcn is -1 or 0. */
dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0;
dlcn = run_unpack_s64(run_buf, offset_size, dlcn);
@@ -1014,9 +1020,15 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
return -EOPNOTSUPP;
}
#endif
- if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) {
- /* LCN range is out of volume. */
- return -EINVAL;
+ if (lcn != SPARSE_LCN64) {
+ u64 lcn_end;
+
+ if (check_add_overflow(lcn, len, &lcn_end))
+ return -EINVAL;
+ if (lcn_end > sbi->used.bitmap.nbits) {
+ /* LCN range is out of volume. */
+ return -EINVAL;
+ }
}
if (!run)
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index de40a07bcb52..ba58572dfe63 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -37,6 +37,8 @@
#include "namei.h"
#include "sysfile.h"
+#define OCFS2_DIO_MARK_EXTENT_BATCH 200
+
static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
@@ -2301,7 +2303,7 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
struct ocfs2_alloc_context *meta_ac = NULL;
handle_t *handle = NULL;
loff_t end = offset + bytes;
- int ret = 0, credits = 0;
+ int ret = 0, credits = 0, batch = 0;
ocfs2_init_dealloc_ctxt(&dealloc);
@@ -2318,18 +2320,6 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
goto out;
}
- /* Delete orphan before acquire i_rwsem. */
- if (dwc->dw_orphaned) {
- BUG_ON(dwc->dw_writer_pid != task_pid_nr(current));
-
- end = end > i_size_read(inode) ? end : 0;
-
- ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
- !!end, end);
- if (ret < 0)
- mlog_errno(ret);
- }
-
down_write(&oi->ip_alloc_sem);
di = (struct ocfs2_dinode *)di_bh->b_data;
@@ -2350,24 +2340,25 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list);
- handle = ocfs2_start_trans(osb, credits);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- mlog_errno(ret);
- goto unlock;
- }
- ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret) {
- mlog_errno(ret);
- goto commit;
- }
-
list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) {
+ if (!handle) {
+ handle = ocfs2_start_trans(osb, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto unlock;
+ }
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto commit;
+ }
+ }
ret = ocfs2_assure_trans_credits(handle, credits);
if (ret < 0) {
mlog_errno(ret);
- break;
+ goto commit;
}
ret = ocfs2_mark_extent_written(inode, &et, handle,
ue->ue_cpos, 1,
@@ -2375,19 +2366,44 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
meta_ac, &dealloc);
if (ret < 0) {
mlog_errno(ret);
- break;
+ goto commit;
+ }
+
+ if (++batch == OCFS2_DIO_MARK_EXTENT_BATCH) {
+ ocfs2_commit_trans(osb, handle);
+ handle = NULL;
+ batch = 0;
}
}
if (end > i_size_read(inode)) {
+ if (!handle) {
+ handle = ocfs2_start_trans(osb, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto unlock;
+ }
+ }
ret = ocfs2_set_inode_size(handle, inode, di_bh, end);
if (ret < 0)
mlog_errno(ret);
}
+
commit:
- ocfs2_commit_trans(osb, handle);
+ if (handle)
+ ocfs2_commit_trans(osb, handle);
unlock:
up_write(&oi->ip_alloc_sem);
+
+ /* everything looks good, let's start the cleanup */
+ if (!ret && dwc->dw_orphaned) {
+ BUG_ON(dwc->dw_writer_pid != task_pid_nr(current));
+
+ ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, 0, 0);
+ if (ret < 0)
+ mlog_errno(ret);
+ }
ocfs2_inode_unlock(inode, 1);
brelse(di_bh);
out:
diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c
index 3c709b213b92..871fba0762ee 100644
--- a/fs/smb/client/cifsacl.c
+++ b/fs/smb/client/cifsacl.c
@@ -758,6 +758,77 @@ static void dump_ace(struct smb_ace *pace, char *end_of_acl)
}
#endif
+static int validate_dacl(struct smb_acl *pdacl, char *end_of_acl)
+{
+ int i, ace_hdr_size, ace_size, min_ace_size;
+ u16 dacl_size, num_aces;
+ char *acl_base, *end_of_dacl;
+ struct smb_ace *pace;
+
+ if (!pdacl)
+ return 0;
+
+ if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl)) {
+ cifs_dbg(VFS, "ACL too small to parse DACL\n");
+ return -EINVAL;
+ }
+
+ dacl_size = le16_to_cpu(pdacl->size);
+ if (dacl_size < sizeof(struct smb_acl) ||
+ end_of_acl < (char *)pdacl + dacl_size) {
+ cifs_dbg(VFS, "ACL too small to parse DACL\n");
+ return -EINVAL;
+ }
+
+ num_aces = le16_to_cpu(pdacl->num_aces);
+ if (!num_aces)
+ return 0;
+
+ ace_hdr_size = offsetof(struct smb_ace, sid) +
+ offsetof(struct smb_sid, sub_auth);
+ min_ace_size = ace_hdr_size + sizeof(__le32);
+ if (num_aces > (dacl_size - sizeof(struct smb_acl)) / min_ace_size) {
+ cifs_dbg(VFS, "ACL too small to parse DACL\n");
+ return -EINVAL;
+ }
+
+ end_of_dacl = (char *)pdacl + dacl_size;
+ acl_base = (char *)pdacl;
+ ace_size = sizeof(struct smb_acl);
+
+ for (i = 0; i < num_aces; ++i) {
+ if (end_of_dacl - acl_base < ace_size) {
+ cifs_dbg(VFS, "ACL too small to parse ACE\n");
+ return -EINVAL;
+ }
+
+ pace = (struct smb_ace *)(acl_base + ace_size);
+ acl_base = (char *)pace;
+
+ if (end_of_dacl - acl_base < ace_hdr_size ||
+ pace->sid.num_subauth == 0 ||
+ pace->sid.num_subauth > SID_MAX_SUB_AUTHORITIES) {
+ cifs_dbg(VFS, "ACL too small to parse ACE\n");
+ return -EINVAL;
+ }
+
+ ace_size = ace_hdr_size + sizeof(__le32) * pace->sid.num_subauth;
+ if (end_of_dacl - acl_base < ace_size ||
+ le16_to_cpu(pace->size) < ace_size) {
+ cifs_dbg(VFS, "ACL too small to parse ACE\n");
+ return -EINVAL;
+ }
+
+ ace_size = le16_to_cpu(pace->size);
+ if (end_of_dacl - acl_base < ace_size) {
+ cifs_dbg(VFS, "ACL too small to parse ACE\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
struct smb_sid *pownersid, struct smb_sid *pgrpsid,
struct cifs_fattr *fattr, bool mode_from_special_sid)
@@ -765,7 +836,7 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
int i;
u16 num_aces = 0;
int acl_size;
- char *acl_base;
+ char *acl_base, *end_of_dacl;
struct smb_ace **ppace;
/* BB need to add parm so we can store the SID BB */
@@ -777,12 +848,8 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
return;
}
- /* validate that we do not go past end of acl */
- if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) ||
- end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
- cifs_dbg(VFS, "ACL too small to parse DACL\n");
+ if (validate_dacl(pdacl, end_of_acl))
return;
- }
cifs_dbg(NOISY, "DACL revision %d size %d num aces %d\n",
le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
@@ -793,6 +860,7 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
user/group/other have no permissions */
fattr->cf_mode &= ~(0777);
+ end_of_dacl = (char *)pdacl + le16_to_cpu(pdacl->size);
acl_base = (char *)pdacl;
acl_size = sizeof(struct smb_acl);
@@ -800,36 +868,16 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
if (num_aces > 0) {
umode_t denied_mode = 0;
- if (num_aces > (le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) /
- (offsetof(struct smb_ace, sid) +
- offsetof(struct smb_sid, sub_auth) + sizeof(__le16)))
- return;
-
ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *),
GFP_KERNEL);
if (!ppace)
return;
for (i = 0; i < num_aces; ++i) {
- if (end_of_acl - acl_base < acl_size)
- break;
-
ppace[i] = (struct smb_ace *) (acl_base + acl_size);
- acl_base = (char *)ppace[i];
- acl_size = offsetof(struct smb_ace, sid) +
- offsetof(struct smb_sid, sub_auth);
-
- if (end_of_acl - acl_base < acl_size ||
- ppace[i]->sid.num_subauth == 0 ||
- ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES ||
- (end_of_acl - acl_base <
- acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) ||
- (le16_to_cpu(ppace[i]->size) <
- acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth))
- break;
#ifdef CONFIG_CIFS_DEBUG2
- dump_ace(ppace[i], end_of_acl);
+ dump_ace(ppace[i], end_of_dacl);
#endif
if (mode_from_special_sid &&
ppace[i]->sid.num_subauth >= 3 &&
@@ -872,6 +920,7 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
(void *)ppace[i],
sizeof(struct smb_ace)); */
+ acl_base = (char *)ppace[i];
acl_size = le16_to_cpu(ppace[i]->size);
}
@@ -1295,10 +1344,9 @@ static int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *pnntsd,
dacloffset = le32_to_cpu(pntsd->dacloffset);
if (dacloffset) {
dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
- if (end_of_acl < (char *)dacl_ptr + le16_to_cpu(dacl_ptr->size)) {
- cifs_dbg(VFS, "Server returned illegal ACL size\n");
- return -EINVAL;
- }
+ rc = validate_dacl(dacl_ptr, end_of_acl);
+ if (rc)
+ return rc;
}
owner_sid_ptr = (struct smb_sid *)((char *)pntsd +
@@ -1669,6 +1717,12 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
dacloffset = le32_to_cpu(pntsd->dacloffset);
if (dacloffset) {
dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
+ rc = validate_dacl(dacl_ptr, (char *)pntsd + secdesclen);
+ if (rc) {
+ kfree(pntsd);
+ cifs_put_tlink(tlink);
+ return rc;
+ }
if (mode_from_sid)
nsecdesclen +=
le16_to_cpu(dacl_ptr->num_aces) * sizeof(struct smb_ace);
diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
index 2537b52eea3e..d38d9c0272c2 100644
--- a/fs/smb/server/connection.c
+++ b/fs/smb/server/connection.c
@@ -19,7 +19,7 @@ static DEFINE_MUTEX(init_lock);
static struct ksmbd_conn_ops default_conn_ops;
-LIST_HEAD(conn_list);
+DEFINE_HASHTABLE(conn_list, CONN_HASH_BITS);
DECLARE_RWSEM(conn_list_lock);
/**
@@ -33,7 +33,7 @@ DECLARE_RWSEM(conn_list_lock);
void ksmbd_conn_free(struct ksmbd_conn *conn)
{
down_write(&conn_list_lock);
- list_del(&conn->conns_list);
+ hash_del(&conn->hlist);
up_write(&conn_list_lock);
xa_destroy(&conn->sessions);
@@ -78,7 +78,6 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
init_waitqueue_head(&conn->req_running_q);
init_waitqueue_head(&conn->r_count_q);
- INIT_LIST_HEAD(&conn->conns_list);
INIT_LIST_HEAD(&conn->requests);
INIT_LIST_HEAD(&conn->async_requests);
spin_lock_init(&conn->request_lock);
@@ -91,19 +90,17 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
init_rwsem(&conn->session_lock);
- down_write(&conn_list_lock);
- list_add(&conn->conns_list, &conn_list);
- up_write(&conn_list_lock);
return conn;
}
bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
{
struct ksmbd_conn *t;
+ int bkt;
bool ret = false;
down_read(&conn_list_lock);
- list_for_each_entry(t, &conn_list, conns_list) {
+ hash_for_each(conn_list, bkt, t, hlist) {
if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
continue;
@@ -164,9 +161,10 @@ void ksmbd_conn_unlock(struct ksmbd_conn *conn)
void ksmbd_all_conn_set_status(u64 sess_id, u32 status)
{
struct ksmbd_conn *conn;
+ int bkt;
down_read(&conn_list_lock);
- list_for_each_entry(conn, &conn_list, conns_list) {
+ hash_for_each(conn_list, bkt, conn, hlist) {
if (conn->binding || xa_load(&conn->sessions, sess_id))
WRITE_ONCE(conn->status, status);
}
@@ -182,17 +180,16 @@ int ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn *curr_conn, u64 sess_id)
{
struct ksmbd_conn *conn;
int rc, retry_count = 0, max_timeout = 120;
- int rcount = 1;
+ int rcount, bkt;
retry_idle:
if (retry_count >= max_timeout)
return -EIO;
down_read(&conn_list_lock);
- list_for_each_entry(conn, &conn_list, conns_list) {
+ hash_for_each(conn_list, bkt, conn, hlist) {
if (conn->binding || xa_load(&conn->sessions, sess_id)) {
- if (conn == curr_conn)
- rcount = 2;
+ rcount = (conn == curr_conn) ? 2 : 1;
if (atomic_read(&conn->req_running) >= rcount) {
rc = wait_event_timeout(conn->req_running_q,
atomic_read(&conn->req_running) < rcount,
@@ -480,10 +477,11 @@ static void stop_sessions(void)
{
struct ksmbd_conn *conn;
struct ksmbd_transport *t;
+ int bkt;
again:
down_read(&conn_list_lock);
- list_for_each_entry(conn, &conn_list, conns_list) {
+ hash_for_each(conn_list, bkt, conn, hlist) {
t = conn->transport;
ksmbd_conn_set_exiting(conn);
if (t->ops->shutdown) {
@@ -494,8 +492,8 @@ static void stop_sessions(void)
}
up_read(&conn_list_lock);
- if (!list_empty(&conn_list)) {
- schedule_timeout_interruptible(HZ / 10); /* 100ms */
+ if (!hash_empty(conn_list)) {
+ msleep(100);
goto again;
}
}
diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
index 2aa8084bb593..78633ad1175c 100644
--- a/fs/smb/server/connection.h
+++ b/fs/smb/server/connection.h
@@ -52,11 +52,12 @@ struct ksmbd_conn {
u8 inet6_addr[16];
#endif
};
+ unsigned int inet_hash;
char *request_buf;
struct ksmbd_transport *transport;
struct nls_table *local_nls;
struct unicode_map *um;
- struct list_head conns_list;
+ struct hlist_node hlist;
struct rw_semaphore session_lock;
/* smb session 1 per user */
struct xarray sessions;
@@ -151,7 +152,8 @@ struct ksmbd_transport {
#define KSMBD_TCP_SEND_TIMEOUT (5 * HZ)
#define KSMBD_TCP_PEER_SOCKADDR(c) ((struct sockaddr *)&((c)->peer_addr))
-extern struct list_head conn_list;
+#define CONN_HASH_BITS 12
+extern DECLARE_HASHTABLE(conn_list, CONN_HASH_BITS);
extern struct rw_semaphore conn_list_lock;
bool ksmbd_conn_alive(struct ksmbd_conn *conn);
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index bc0574b6f2c3..29fbdada7259 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -7427,7 +7427,7 @@ int smb2_lock(struct ksmbd_work *work)
int nolock = 0;
LIST_HEAD(lock_list);
LIST_HEAD(rollback_list);
- int prior_lock = 0;
+ int prior_lock = 0, bkt;
WORK_BUFFERS(work, req, rsp);
@@ -7537,7 +7537,7 @@ int smb2_lock(struct ksmbd_work *work)
nolock = 1;
/* check locks in connection list */
down_read(&conn_list_lock);
- list_for_each_entry(conn, &conn_list, conns_list) {
+ hash_for_each(conn_list, bkt, conn, hlist) {
spin_lock(&conn->llist_lock);
list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) {
if (file_inode(cmp_lock->fl->c.flc_file) !=
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index d15c33480e0a..3976009596d9 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -381,6 +381,11 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
conn = ksmbd_conn_alloc();
if (!conn)
goto err;
+
+ down_write(&conn_list_lock);
+ hash_add(conn_list, &conn->hlist, 0);
+ up_write(&conn_list_lock);
+
conn->transport = KSMBD_TRANS(t);
KSMBD_TRANS(t)->conn = conn;
KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops;
diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
index 4dfb0ccdaf98..d731bab98568 100644
--- a/fs/smb/server/transport_tcp.c
+++ b/fs/smb/server/transport_tcp.c
@@ -89,13 +89,21 @@ static struct tcp_transport *alloc_transport(struct socket *client_sk)
}
#if IS_ENABLED(CONFIG_IPV6)
- if (client_sk->sk->sk_family == AF_INET6)
+ if (client_sk->sk->sk_family == AF_INET6) {
memcpy(&conn->inet6_addr, &client_sk->sk->sk_v6_daddr, 16);
- else
+ conn->inet_hash = ipv6_addr_hash(&client_sk->sk->sk_v6_daddr);
+ } else {
conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr;
+ conn->inet_hash = ipv4_addr_hash(inet_sk(client_sk->sk)->inet_daddr);
+ }
#else
conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr;
+ conn->inet_hash = ipv4_addr_hash(inet_sk(client_sk->sk)->inet_daddr);
#endif
+ down_write(&conn_list_lock);
+ hash_add(conn_list, &conn->hlist, conn->inet_hash);
+ up_write(&conn_list_lock);
+
conn->transport = KSMBD_TRANS(t);
KSMBD_TRANS(t)->conn = conn;
KSMBD_TRANS(t)->ops = &ksmbd_tcp_transport_ops;
@@ -242,7 +250,7 @@ static int ksmbd_kthread_fn(void *p)
struct socket *client_sk = NULL;
struct interface *iface = (struct interface *)p;
struct ksmbd_conn *conn;
- int ret;
+ int ret, inet_hash;
unsigned int max_ip_conns;
while (!kthread_should_stop()) {
@@ -267,9 +275,18 @@ static int ksmbd_kthread_fn(void *p)
/*
* Limits repeated connections from clients with the same IP.
*/
+#if IS_ENABLED(CONFIG_IPV6)
+ if (client_sk->sk->sk_family == AF_INET6)
+ inet_hash = ipv6_addr_hash(&client_sk->sk->sk_v6_daddr);
+ else
+ inet_hash = ipv4_addr_hash(inet_sk(client_sk->sk)->inet_daddr);
+#else
+ inet_hash = ipv4_addr_hash(inet_sk(client_sk->sk)->inet_daddr);
+#endif
+
max_ip_conns = 0;
down_read(&conn_list_lock);
- list_for_each_entry(conn, &conn_list, conns_list) {
+ hash_for_each_possible(conn_list, conn, hlist, inet_hash) {
#if IS_ENABLED(CONFIG_IPV6)
if (client_sk->sk->sk_family == AF_INET6) {
if (memcmp(&client_sk->sk->sk_v6_daddr,
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 10df55aea512..29c9941e5ea7 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1215,8 +1215,6 @@ static __always_inline int validate_unaligned_range(
return -EINVAL;
if (!len)
return -EINVAL;
- if (start < mmap_min_addr)
- return -EINVAL;
if (start >= task_size)
return -EINVAL;
if (len > task_size - start)
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 6bcbdc8bf186..afcdfe317b7e 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -2127,6 +2127,7 @@ xfs_alloc_buftarg(
return btp;
error_free:
+ fs_put_dax(btp->bt_daxdev, mp);
kfree(btp);
return NULL;
}
diff --git a/include/linux/device.h b/include/linux/device.h
index b678bcca224c..ecbce2530eb7 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -499,6 +499,22 @@ struct device_physical_location {
bool lid;
};
+/**
+ * enum struct_device_flags - Flags in struct device
+ *
+ * Each flag should have a set of accessor functions created via
+ * __create_dev_flag_accessors() for each access.
+ *
+ * @DEV_FLAG_READY_TO_PROBE: If set then device_add() has finished enough
+ * initialization that probe could be called.
+ * @DEV_FLAG_COUNT: Number of defined struct_device_flags.
+ */
+enum struct_device_flags {
+ DEV_FLAG_READY_TO_PROBE = 0,
+
+ DEV_FLAG_COUNT
+};
+
/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
@@ -594,6 +610,7 @@ struct device_physical_location {
* @dma_skip_sync: DMA sync operations can be skipped for coherent buffers.
* @dma_iommu: Device is using default IOMMU implementation for DMA and
* doesn't rely on dma_ops structure.
+ * @flags: DEV_FLAG_XXX flags. Use atomic bitfield operations to modify.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
@@ -716,8 +733,36 @@ struct device {
#ifdef CONFIG_IOMMU_DMA
bool dma_iommu:1;
#endif
+
+ DECLARE_BITMAP(flags, DEV_FLAG_COUNT);
};
+#define __create_dev_flag_accessors(accessor_name, flag_name) \
+static inline bool dev_##accessor_name(const struct device *dev) \
+{ \
+ return test_bit(flag_name, dev->flags); \
+} \
+static inline void dev_set_##accessor_name(struct device *dev) \
+{ \
+ set_bit(flag_name, dev->flags); \
+} \
+static inline void dev_clear_##accessor_name(struct device *dev) \
+{ \
+ clear_bit(flag_name, dev->flags); \
+} \
+static inline void dev_assign_##accessor_name(struct device *dev, bool value) \
+{ \
+ assign_bit(flag_name, dev->flags, value); \
+} \
+static inline bool dev_test_and_set_##accessor_name(struct device *dev) \
+{ \
+ return test_and_set_bit(flag_name, dev->flags); \
+}
+
+__create_dev_flag_accessors(ready_to_probe, DEV_FLAG_READY_TO_PROBE);
+
+#undef __create_dev_flag_accessors
+
/**
* struct device_link - Device link representation.
* @supplier: The device on the supplier end of the link.
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 487d4bd9b0c9..1455e24ac29e 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -10,6 +10,7 @@
#define _LINUX_FWNODE_H_
#include <linux/bits.h>
+#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/types.h>
@@ -37,12 +38,12 @@ struct device;
* suppliers. Only enforce ordering with suppliers that have
* drivers.
*/
-#define FWNODE_FLAG_LINKS_ADDED BIT(0)
-#define FWNODE_FLAG_NOT_DEVICE BIT(1)
-#define FWNODE_FLAG_INITIALIZED BIT(2)
-#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3)
-#define FWNODE_FLAG_BEST_EFFORT BIT(4)
-#define FWNODE_FLAG_VISITED BIT(5)
+#define FWNODE_FLAG_LINKS_ADDED 0
+#define FWNODE_FLAG_NOT_DEVICE 1
+#define FWNODE_FLAG_INITIALIZED 2
+#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD 3
+#define FWNODE_FLAG_BEST_EFFORT 4
+#define FWNODE_FLAG_VISITED 5
struct fwnode_handle {
struct fwnode_handle *secondary;
@@ -52,7 +53,7 @@ struct fwnode_handle {
struct device *dev;
struct list_head suppliers;
struct list_head consumers;
- u8 flags;
+ unsigned long flags;
};
/*
@@ -204,16 +205,37 @@ static inline void fwnode_init(struct fwnode_handle *fwnode,
INIT_LIST_HEAD(&fwnode->suppliers);
}
+static inline void fwnode_set_flag(struct fwnode_handle *fwnode,
+ unsigned int bit)
+{
+ set_bit(bit, &fwnode->flags);
+}
+
+static inline void fwnode_clear_flag(struct fwnode_handle *fwnode,
+ unsigned int bit)
+{
+ clear_bit(bit, &fwnode->flags);
+}
+
+static inline void fwnode_assign_flag(struct fwnode_handle *fwnode,
+ unsigned int bit, bool value)
+{
+ assign_bit(bit, &fwnode->flags, value);
+}
+
+static inline bool fwnode_test_flag(struct fwnode_handle *fwnode,
+ unsigned int bit)
+{
+ return test_bit(bit, &fwnode->flags);
+}
+
static inline void fwnode_dev_initialized(struct fwnode_handle *fwnode,
bool initialized)
{
if (IS_ERR_OR_NULL(fwnode))
return;
- if (initialized)
- fwnode->flags |= FWNODE_FLAG_INITIALIZED;
- else
- fwnode->flags &= ~FWNODE_FLAG_INITIALIZED;
+ fwnode_assign_flag(fwnode, FWNODE_FLAG_INITIALIZED, initialized);
}
int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup,
diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h
index 0660a03d37d9..846185ea626c 100644
--- a/include/linux/hugetlb_inline.h
+++ b/include/linux/hugetlb_inline.h
@@ -6,14 +6,14 @@
#include <linux/mm.h>
-static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
+static inline bool is_vm_hugetlb_page(const struct vm_area_struct *vma)
{
return !!(vma->vm_flags & VM_HUGETLB);
}
#else
-static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
+static inline bool is_vm_hugetlb_page(const struct vm_area_struct *vma)
{
return false;
}
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 0146daf34430..765f2778e264 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -90,8 +90,6 @@ struct padata_cpumask {
* @processed: Number of already processed objects.
* @cpu: Next CPU to be processed.
* @cpumask: The cpumasks in use for parallel and serial workers.
- * @reorder_work: work struct for reordering.
- * @lock: Reorder lock.
*/
struct parallel_data {
struct padata_shell *ps;
@@ -102,8 +100,6 @@ struct parallel_data {
unsigned int processed;
int cpu;
struct padata_cpumask cpumask;
- struct work_struct reorder_work;
- spinlock_t ____cacheline_aligned lock;
};
/**
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
index 1d982dbdd0d0..5d3916ca747c 100644
--- a/include/linux/randomize_kstack.h
+++ b/include/linux/randomize_kstack.h
@@ -9,7 +9,6 @@
DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
randomize_kstack_offset);
-DECLARE_PER_CPU(u32, kstack_offset);
/*
* Do not use this anywhere else in the kernel. This is used here because
@@ -50,15 +49,14 @@ DECLARE_PER_CPU(u32, kstack_offset);
* add_random_kstack_offset - Increase stack utilization by previously
* chosen random offset
*
- * This should be used in the syscall entry path when interrupts and
- * preempt are disabled, and after user registers have been stored to
- * the stack. For testing the resulting entropy, please see:
- * tools/testing/selftests/lkdtm/stack-entropy.sh
+ * This should be used in the syscall entry path after user registers have been
+ * stored to the stack. Preemption may be enabled. For testing the resulting
+ * entropy, please see: tools/testing/selftests/lkdtm/stack-entropy.sh
*/
#define add_random_kstack_offset() do { \
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
&randomize_kstack_offset)) { \
- u32 offset = raw_cpu_read(kstack_offset); \
+ u32 offset = current->kstack_offset; \
u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset)); \
/* Keep allocation even after "ptr" loses scope. */ \
asm volatile("" :: "r"(ptr) : "memory"); \
@@ -69,9 +67,9 @@ DECLARE_PER_CPU(u32, kstack_offset);
* choose_random_kstack_offset - Choose the random offset for the next
* add_random_kstack_offset()
*
- * This should only be used during syscall exit when interrupts and
- * preempt are disabled. This position in the syscall flow is done to
- * frustrate attacks from userspace attempting to learn the next offset:
+ * This should only be used during syscall exit. Preemption may be enabled. This
+ * position in the syscall flow is done to frustrate attacks from userspace
+ * attempting to learn the next offset:
* - Maximize the timing uncertainty visible from userspace: if the
* offset is chosen at syscall entry, userspace has much more control
* over the timing between choosing offsets. "How long will we be in
@@ -85,14 +83,20 @@ DECLARE_PER_CPU(u32, kstack_offset);
#define choose_random_kstack_offset(rand) do { \
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
&randomize_kstack_offset)) { \
- u32 offset = raw_cpu_read(kstack_offset); \
+ u32 offset = current->kstack_offset; \
offset = ror32(offset, 5) ^ (rand); \
- raw_cpu_write(kstack_offset, offset); \
+ current->kstack_offset = offset; \
} \
} while (0)
+
+static inline void random_kstack_task_init(struct task_struct *tsk)
+{
+ tsk->kstack_offset = 0;
+}
#else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
#define add_random_kstack_offset() do { } while (0)
#define choose_random_kstack_offset(rand) do { } while (0)
+#define random_kstack_task_init(tsk) do { } while (0)
#endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index af143d3af85f..321e088f9ee7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1558,6 +1558,10 @@ struct task_struct {
unsigned long prev_lowest_stack;
#endif
+#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
+ u32 kstack_offset;
+#endif
+
#ifdef CONFIG_X86_MCE
void __user *mce_vaddr;
__u64 mce_kflags;
diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
index 7d68a5cc5881..6e5be15029fb 100644
--- a/include/linux/tpm_eventlog.h
+++ b/include/linux/tpm_eventlog.h
@@ -131,11 +131,16 @@ struct tcg_algorithm_info {
};
#ifndef TPM_MEMREMAP
-#define TPM_MEMREMAP(start, size) NULL
+static inline void *TPM_MEMREMAP(unsigned long start, size_t size)
+{
+ return NULL;
+}
#endif
#ifndef TPM_MEMUNMAP
-#define TPM_MEMUNMAP(start, size) do{} while(0)
+static inline void TPM_MEMUNMAP(void *mapping, size_t size)
+{
+}
#endif
/**
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 2c0827cce489..049b012638be 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -53,7 +53,8 @@ struct ep_device;
* @ssp_isoc_ep_comp: SuperSpeedPlus isoc companion descriptor for this endpoint
* @urb_list: urbs queued to this endpoint; maintained by usbcore
* @hcpriv: for use by HCD; typically holds hardware dma queue head (QH)
- * with one or more transfer descriptors (TDs) per urb
+ * with one or more transfer descriptors (TDs) per urb; must be preserved
+ * by core while BW is allocated for the endpoint
* @ep_dev: ep_device for sysfs info
* @extra: descriptors following this endpoint in the configuration
* @extralen: how many bytes of "extra" are valid
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index ac9a4b0bd49b..ab10b0cc1dc2 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -473,6 +473,7 @@ struct mana_port_context {
netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
bool update_hash, bool update_tab);
+int mana_disable_vport_rx(struct mana_port_context *apc);
int mana_alloc_queues(struct net_device *ndev);
int mana_attach(struct net_device *ndev);
diff --git a/include/net/mctp.h b/include/net/mctp.h
index 28d59ae94ca3..5819f2ef7718 100644
--- a/include/net/mctp.h
+++ b/include/net/mctp.h
@@ -26,6 +26,9 @@ struct mctp_hdr {
#define MCTP_VER_MIN 1
#define MCTP_VER_MAX 1
+/* Definitions for ver field */
+#define MCTP_HDR_VER_MASK GENMASK(3, 0)
+
/* Definitions for flags_seq_tag field */
#define MCTP_HDR_FLAG_SOM BIT(7)
#define MCTP_HDR_FLAG_EOM BIT(6)
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 3eb806f7bc6a..9377acad0c5f 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -36,6 +36,7 @@
EM(rxkad_abort_1_short_encdata, "rxkad1-short-encdata") \
EM(rxkad_abort_1_short_header, "rxkad1-short-hdr") \
EM(rxkad_abort_2_short_check, "rxkad2-short-check") \
+ EM(rxkad_abort_2_crypto_unaligned, "rxkad2-crypto-unaligned") \
EM(rxkad_abort_2_short_data, "rxkad2-short-data") \
EM(rxkad_abort_2_short_header, "rxkad2-short-hdr") \
EM(rxkad_abort_2_short_len, "rxkad2-short-len") \
@@ -126,8 +127,7 @@
E_(rxrpc_call_poke_timer_now, "Timer-now")
#define rxrpc_skb_traces \
- EM(rxrpc_skb_eaten_by_unshare, "ETN unshare ") \
- EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \
+ EM(rxrpc_skb_get_call_rx, "GET call-rx ") \
EM(rxrpc_skb_get_conn_secured, "GET conn-secd") \
EM(rxrpc_skb_get_conn_work, "GET conn-work") \
EM(rxrpc_skb_get_last_nack, "GET last-nack") \
@@ -152,6 +152,7 @@
EM(rxrpc_skb_see_recvmsg, "SEE recvmsg ") \
EM(rxrpc_skb_see_reject, "SEE reject ") \
EM(rxrpc_skb_see_rotate, "SEE rotate ") \
+ EM(rxrpc_skb_see_unshare_nomem, "SEE unshar-nm") \
E_(rxrpc_skb_see_version, "SEE version ")
#define rxrpc_local_traces \
@@ -235,7 +236,6 @@
EM(rxrpc_conn_put_unidle, "PUT unidle ") \
EM(rxrpc_conn_put_work, "PUT work ") \
EM(rxrpc_conn_queue_challenge, "QUE chall ") \
- EM(rxrpc_conn_queue_retry_work, "QUE retry-wk") \
EM(rxrpc_conn_queue_rx_work, "QUE rx-work ") \
EM(rxrpc_conn_see_new_service_conn, "SEE new-svc ") \
EM(rxrpc_conn_see_reap_service, "SEE reap-svc") \
diff --git a/init/main.c b/init/main.c
index 821df1f05e9c..dca88ac54c43 100644
--- a/init/main.c
+++ b/init/main.c
@@ -835,7 +835,6 @@ static inline void initcall_debug_enable(void)
#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
DEFINE_STATIC_KEY_MAYBE_RO(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
randomize_kstack_offset);
-DEFINE_PER_CPU(u32, kstack_offset);
static int __init early_randomize_kstack_offset(char *buf)
{
diff --git a/io_uring/poll.c b/io_uring/poll.c
index ec5058f0813a..002f1ae830b8 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -93,7 +93,7 @@ static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
*/
static inline bool io_poll_get_ownership(struct io_kiocb *req)
{
- if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
+ if (unlikely((unsigned int)atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
return io_poll_get_ownership_slowpath(req);
return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
}
@@ -295,6 +295,7 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
v &= ~IO_POLL_RETRY_FLAG;
}
+ v &= IO_POLL_REF_MASK;
}
/* the mask was stashed in __io_poll_execute */
@@ -327,7 +328,12 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
return IOU_POLL_REMOVE_POLL_USE_RES;
}
} else {
- int ret = io_poll_issue(req, ts);
+ int ret;
+
+ /* multiple refs and HUP, ensure we loop once more */
+ if ((req->cqe.res & (POLLHUP | POLLRDHUP)) && v != 1)
+ v--;
+ ret = io_poll_issue(req, ts);
if (ret == IOU_STOP_MULTISHOT)
return IOU_POLL_REMOVE_POLL_USE_RES;
else if (ret == IOU_REQUEUE)
@@ -343,7 +349,6 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
* Release all references, retry if someone tried to restart
* task_work while we were executing it.
*/
- v &= IO_POLL_REF_MASK;
} while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK);
io_napi_add(req);
@@ -446,8 +451,10 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
* disable multishot as there is a circular dependency between
* CQ posting and triggering the event.
*/
- if (mask & EPOLL_URING_WAKE)
+ if (mask & EPOLL_URING_WAKE) {
poll->events |= EPOLLONESHOT;
+ req->apoll_events |= EPOLLONESHOT;
+ }
/* optional, saves extra locking for removal in tw handler */
if (mask && poll->events & EPOLLONESHOT) {
diff --git a/io_uring/timeout.c b/io_uring/timeout.c
index b215b2fbddd0..9cc5776103fe 100644
--- a/io_uring/timeout.c
+++ b/io_uring/timeout.c
@@ -424,6 +424,8 @@ int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
return -EINVAL;
+ if (sqe->addr3 || sqe->__pad2[0])
+ return -EINVAL;
if (sqe->buf_index || sqe->len || sqe->splice_fd_in)
return -EINVAL;
@@ -496,6 +498,8 @@ static int __io_timeout_prep(struct io_kiocb *req,
unsigned flags;
u32 off = READ_ONCE(sqe->off);
+ if (sqe->addr3 || sqe->__pad2[0])
+ return -EINVAL;
if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in)
return -EINVAL;
if (off && is_timeout_link)
diff --git a/kernel/fork.c b/kernel/fork.c
index a01cf3a904bf..29532a57e0cd 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -96,6 +96,7 @@
#include <linux/thread_info.h>
#include <linux/stackleak.h>
#include <linux/kasan.h>
+#include <linux/randomize_kstack.h>
#include <linux/scs.h>
#include <linux/io_uring.h>
#include <linux/bpf.h>
@@ -2419,6 +2420,7 @@ __latent_entropy struct task_struct *copy_process(
if (retval)
goto bad_fork_cleanup_io;
+ random_kstack_task_init(p);
stackleak_task_init(p);
if (pid != &init_struct_pid) {
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index ebebd0eec7f6..66f9fb25d4be 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1531,20 +1531,23 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
*
* Must be called with lock->wait_lock held and interrupts disabled. It must
* have just failed to try_to_take_rt_mutex().
+ *
+ * When invoked from rt_mutex_start_proxy_lock() waiter::task != current !
*/
static void __sched remove_waiter(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter)
{
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
+ struct task_struct *waiter_task = waiter->task;
struct rt_mutex_base *next_lock;
lockdep_assert_held(&lock->wait_lock);
- raw_spin_lock(¤t->pi_lock);
- rt_mutex_dequeue(lock, waiter);
- current->pi_blocked_on = NULL;
- raw_spin_unlock(¤t->pi_lock);
+ scoped_guard(raw_spinlock, &waiter_task->pi_lock) {
+ rt_mutex_dequeue(lock, waiter);
+ waiter_task->pi_blocked_on = NULL;
+ }
/*
* Only update priority if the waiter was the highest priority
@@ -1580,7 +1583,7 @@ static void __sched remove_waiter(struct rt_mutex_base *lock,
raw_spin_unlock_irq(&lock->wait_lock);
rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
- next_lock, NULL, current);
+ next_lock, NULL, waiter_task);
raw_spin_lock_irq(&lock->wait_lock);
}
diff --git a/kernel/padata.c b/kernel/padata.c
index c3810f5bd715..e61bdc248551 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -261,20 +261,17 @@ EXPORT_SYMBOL(padata_do_parallel);
* be parallel processed by another cpu and is not yet present in
* the cpu's reorder queue.
*/
-static struct padata_priv *padata_find_next(struct parallel_data *pd,
- bool remove_object)
+static struct padata_priv *padata_find_next(struct parallel_data *pd, int cpu,
+ unsigned int processed)
{
struct padata_priv *padata;
struct padata_list *reorder;
- int cpu = pd->cpu;
reorder = per_cpu_ptr(pd->reorder_list, cpu);
spin_lock(&reorder->lock);
- if (list_empty(&reorder->list)) {
- spin_unlock(&reorder->lock);
- return NULL;
- }
+ if (list_empty(&reorder->list))
+ goto notfound;
padata = list_entry(reorder->list.next, struct padata_priv, list);
@@ -282,101 +279,52 @@ static struct padata_priv *padata_find_next(struct parallel_data *pd,
* Checks the rare case where two or more parallel jobs have hashed to
* the same CPU and one of the later ones finishes first.
*/
- if (padata->seq_nr != pd->processed) {
- spin_unlock(&reorder->lock);
- return NULL;
- }
-
- if (remove_object) {
- list_del_init(&padata->list);
- ++pd->processed;
- /* When sequence wraps around, reset to the first CPU. */
- if (unlikely(pd->processed == 0))
- pd->cpu = cpumask_first(pd->cpumask.pcpu);
- else
- pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
- }
+ if (padata->seq_nr != processed)
+ goto notfound;
+ list_del_init(&padata->list);
spin_unlock(&reorder->lock);
return padata;
+
+notfound:
+ pd->processed = processed;
+ pd->cpu = cpu;
+ spin_unlock(&reorder->lock);
+ return NULL;
}
-static void padata_reorder(struct parallel_data *pd)
+static void padata_reorder(struct padata_priv *padata)
{
+ struct parallel_data *pd = padata->pd;
struct padata_instance *pinst = pd->ps->pinst;
- int cb_cpu;
- struct padata_priv *padata;
- struct padata_serial_queue *squeue;
- struct padata_list *reorder;
+ unsigned int processed;
+ int cpu;
- /*
- * We need to ensure that only one cpu can work on dequeueing of
- * the reorder queue the time. Calculating in which percpu reorder
- * queue the next object will arrive takes some time. A spinlock
- * would be highly contended. Also it is not clear in which order
- * the objects arrive to the reorder queues. So a cpu could wait to
- * get the lock just to notice that there is nothing to do at the
- * moment. Therefore we use a trylock and let the holder of the lock
- * care for all the objects enqueued during the holdtime of the lock.
- */
- if (!spin_trylock_bh(&pd->lock))
- return;
+ processed = pd->processed;
+ cpu = pd->cpu;
- while (1) {
- padata = padata_find_next(pd, true);
+ do {
+ struct padata_serial_queue *squeue;
+ int cb_cpu;
- /*
- * If the next object that needs serialization is parallel
- * processed by another cpu and is still on it's way to the
- * cpu's reorder queue, nothing to do for now.
- */
- if (!padata)
- break;
+ cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
+ processed++;
cb_cpu = padata->cb_cpu;
squeue = per_cpu_ptr(pd->squeue, cb_cpu);
spin_lock(&squeue->serial.lock);
list_add_tail(&padata->list, &squeue->serial.list);
- spin_unlock(&squeue->serial.lock);
-
queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
- }
- spin_unlock_bh(&pd->lock);
-
- /*
- * The next object that needs serialization might have arrived to
- * the reorder queues in the meantime.
- *
- * Ensure reorder queue is read after pd->lock is dropped so we see
- * new objects from another task in padata_do_serial. Pairs with
- * smp_mb in padata_do_serial.
- */
- smp_mb();
-
- reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
- if (!list_empty(&reorder->list) && padata_find_next(pd, false)) {
/*
- * Other context(eg. the padata_serial_worker) can finish the request.
- * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish.
+ * If the next object that needs serialization is parallel
+ * processed by another cpu and is still on it's way to the
+ * cpu's reorder queue, end the loop.
*/
- padata_get_pd(pd);
- if (!queue_work(pinst->serial_wq, &pd->reorder_work))
- padata_put_pd(pd);
- }
-}
-
-static void invoke_padata_reorder(struct work_struct *work)
-{
- struct parallel_data *pd;
-
- local_bh_disable();
- pd = container_of(work, struct parallel_data, reorder_work);
- padata_reorder(pd);
- local_bh_enable();
- /* Pairs with putting the reorder_work in the serial_wq */
- padata_put_pd(pd);
+ padata = padata_find_next(pd, cpu, processed);
+ spin_unlock(&squeue->serial.lock);
+ } while (padata);
}
static void padata_serial_worker(struct work_struct *serial_work)
@@ -427,6 +375,7 @@ void padata_do_serial(struct padata_priv *padata)
struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
struct padata_priv *cur;
struct list_head *pos;
+ bool gotit = true;
spin_lock(&reorder->lock);
/* Sort in ascending order of sequence number. */
@@ -436,17 +385,14 @@ void padata_do_serial(struct padata_priv *padata)
if ((signed int)(cur->seq_nr - padata->seq_nr) < 0)
break;
}
- list_add(&padata->list, pos);
+ if (padata->seq_nr != pd->processed) {
+ gotit = false;
+ list_add(&padata->list, pos);
+ }
spin_unlock(&reorder->lock);
- /*
- * Ensure the addition to the reorder list is ordered correctly
- * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
- * in padata_reorder.
- */
- smp_mb();
-
- padata_reorder(pd);
+ if (gotit)
+ padata_reorder(padata);
}
EXPORT_SYMBOL(padata_do_serial);
@@ -643,9 +589,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
padata_init_squeues(pd);
pd->seq_nr = -1;
refcount_set(&pd->refcnt, 1);
- spin_lock_init(&pd->lock);
pd->cpu = cpumask_first(pd->cpumask.pcpu);
- INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
return pd;
@@ -1155,12 +1099,6 @@ void padata_free_shell(struct padata_shell *ps)
if (!ps)
return;
- /*
- * Wait for all _do_serial calls to finish to avoid touching
- * freed pd's and ps's.
- */
- synchronize_rcu();
-
mutex_lock(&ps->pinst->lock);
list_del(&ps->list);
pd = rcu_dereference_protected(ps->pd, 1);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b1895b330ff0..df76b32a013f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4794,7 +4794,7 @@ void sched_post_fork(struct task_struct *p)
scx_post_fork(p);
}
-unsigned long to_ratio(u64 period, u64 runtime)
+u64 to_ratio(u64 period, u64 runtime)
{
if (runtime == RUNTIME_INF)
return BW_UNIT;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ffcce501ed40..fb6edf97e60c 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2707,7 +2707,7 @@ static int tg_rt_schedulable(struct task_group *tg, void *data)
{
struct rt_schedulable_data *d = data;
struct task_group *child;
- unsigned long total, sum = 0;
+ u64 total, sum = 0;
u64 period, runtime;
period = ktime_to_ns(tg->rt_bandwidth.rt_period);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 62f90dcb10a1..a09e2d25edd5 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2698,7 +2698,7 @@ extern void init_dl_entity(struct sched_dl_entity *dl_se);
#define MAX_BW_BITS (64 - BW_SHIFT)
#define MAX_BW ((1ULL << MAX_BW_BITS) - 1)
-extern unsigned long to_ratio(u64 period, u64 runtime);
+extern u64 to_ratio(u64 period, u64 runtime);
extern void init_entity_runnable_average(struct sched_entity *se);
extern void post_init_entity_util_avg(struct task_struct *p);
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 0700f40c53ac..f0702a21a15f 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -655,6 +655,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
goto err;
memcpy(stats, tsk->signal->stats, sizeof(*stats));
+ stats->version = TASKSTATS_VERSION;
send:
send_cpu_listeners(rep_skb, listeners);
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 056f2e411d7b..850d23469ef7 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -183,11 +183,60 @@ static int dmirror_fops_open(struct inode *inode, struct file *filp)
return 0;
}
+static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
+{
+ unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT;
+ unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT;
+ unsigned long npages = end_pfn - start_pfn + 1;
+ unsigned long i;
+ unsigned long *src_pfns;
+ unsigned long *dst_pfns;
+
+ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
+
+ migrate_device_range(src_pfns, start_pfn, npages);
+ for (i = 0; i < npages; i++) {
+ struct page *dpage, *spage;
+
+ spage = migrate_pfn_to_page(src_pfns[i]);
+ if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ if (WARN_ON(!is_device_private_page(spage) &&
+ !is_device_coherent_page(spage)))
+ continue;
+ spage = BACKING_PAGE(spage);
+ dpage = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL);
+ lock_page(dpage);
+ copy_highpage(dpage, spage);
+ dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
+ if (src_pfns[i] & MIGRATE_PFN_WRITE)
+ dst_pfns[i] |= MIGRATE_PFN_WRITE;
+ }
+ migrate_device_pages(src_pfns, dst_pfns, npages);
+ migrate_device_finalize(src_pfns, dst_pfns, npages);
+ kvfree(src_pfns);
+ kvfree(dst_pfns);
+}
+
static int dmirror_fops_release(struct inode *inode, struct file *filp)
{
struct dmirror *dmirror = filp->private_data;
+ struct dmirror_device *mdevice = dmirror->mdevice;
+ int i;
mmu_interval_notifier_remove(&dmirror->notifier);
+
+ if (mdevice->devmem_chunks) {
+ for (i = 0; i < mdevice->devmem_count; i++) {
+ struct dmirror_chunk *devmem =
+ mdevice->devmem_chunks[i];
+
+ dmirror_device_evict_chunk(devmem);
+ }
+ }
+
xa_destroy(&dmirror->pt);
kfree(dmirror);
return 0;
@@ -1214,43 +1263,6 @@ static int dmirror_snapshot(struct dmirror *dmirror,
return ret;
}
-static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
-{
- unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT;
- unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT;
- unsigned long npages = end_pfn - start_pfn + 1;
- unsigned long i;
- unsigned long *src_pfns;
- unsigned long *dst_pfns;
-
- src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
- dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
-
- migrate_device_range(src_pfns, start_pfn, npages);
- for (i = 0; i < npages; i++) {
- struct page *dpage, *spage;
-
- spage = migrate_pfn_to_page(src_pfns[i]);
- if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
- continue;
-
- if (WARN_ON(!is_device_private_page(spage) &&
- !is_device_coherent_page(spage)))
- continue;
- spage = BACKING_PAGE(spage);
- dpage = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL);
- lock_page(dpage);
- copy_highpage(dpage, spage);
- dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
- if (src_pfns[i] & MIGRATE_PFN_WRITE)
- dst_pfns[i] |= MIGRATE_PFN_WRITE;
- }
- migrate_device_pages(src_pfns, dst_pfns, npages);
- migrate_device_finalize(src_pfns, dst_pfns, npages);
- kvfree(src_pfns);
- kvfree(dst_pfns);
-}
-
/* Removes free pages from the free list so they can't be re-allocated */
static void dmirror_remove_free_pages(struct dmirror_chunk *devmem)
{
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 5520dc28255a..29466c1803c9 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -94,8 +94,22 @@ static struct ts_config *kmp_init(const void *pattern, unsigned int len,
struct ts_config *conf;
struct ts_kmp *kmp;
int i;
- unsigned int prefix_tbl_len = len * sizeof(unsigned int);
- size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len;
+ unsigned int prefix_tbl_len;
+ size_t priv_size;
+
+ /* Zero-length patterns would make kmp_find() read beyond kmp->pattern. */
+ if (unlikely(!len))
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * kmp->pattern is stored immediately after the prefix_tbl[] table.
+ * Reject lengths that would wrap while sizing either region.
+ */
+ if (unlikely(check_mul_overflow(len, sizeof(*kmp->prefix_tbl),
+ &prefix_tbl_len) ||
+ check_add_overflow(sizeof(*kmp), (size_t)len, &priv_size) ||
+ check_add_overflow(priv_size, prefix_tbl_len, &priv_size)))
+ return ERR_PTR(-EINVAL);
conf = alloc_ts_config(priv_size, gfp_mask);
if (IS_ERR(conf))
diff --git a/mm/damon/core.c b/mm/damon/core.c
index ed2b75023181..d81748460861 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -1601,7 +1601,8 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
quota->charged_from = jiffies;
/* New charge window starts */
- if (time_after_eq(jiffies, quota->charged_from +
+ if (!time_in_range_open(jiffies, quota->charged_from,
+ quota->charged_from +
msecs_to_jiffies(quota->reset_interval))) {
if (quota->esz && quota->charged_sz >= quota->esz)
s->stat.qt_exceeds++;
diff --git a/mm/internal.h b/mm/internal.h
index b7b942767c70..3bfc1dc2d7ea 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1015,6 +1015,16 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
}
return fpin;
}
+
+static inline bool vma_supports_mlock(const struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & (VM_SPECIAL | VM_DROPPABLE))
+ return false;
+ if (vma_is_dax(vma) || is_vm_hugetlb_page(vma))
+ return false;
+ return vma != get_gate_vma(current->mm);
+}
+
#else /* !CONFIG_MMU */
static inline void unmap_mapping_folio(struct folio *folio) { }
static inline void mlock_new_folio(struct folio *folio) { }
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0a42e9a8caba..16d788547b9b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1802,8 +1802,14 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
goto put_folio;
if (folio_contain_hwpoisoned_page(folio)) {
- if (WARN_ON(folio_test_lru(folio)))
- folio_isolate_lru(folio);
+ /*
+ * unmap_poisoned_folio() cannot handle large folios
+ * in all cases yet.
+ */
+ if (folio_test_large(folio) && !folio_test_hugetlb(folio))
+ goto put_folio;
+ if (folio_test_lru(folio) && !folio_isolate_lru(folio))
+ goto put_folio;
if (folio_mapped(folio)) {
folio_lock(folio);
unmap_poisoned_folio(folio, pfn, false);
diff --git a/mm/migrate.c b/mm/migrate.c
index 6247317d6600..f3d1dc8d72b7 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -130,6 +130,47 @@ static void putback_movable_folio(struct folio *folio)
folio_clear_isolated(folio);
}
+/**
+ * migrate_movable_ops_page - migrate an isolated movable_ops page
+ * @dst: The destination page.
+ * @src: The source page.
+ * @mode: The migration mode.
+ *
+ * Migrate an isolated movable_ops page.
+ *
+ * If the src page was already released by its owner, the src page is
+ * un-isolated (putback) and migration succeeds; the migration core will be the
+ * owner of both pages.
+ *
+ * If the src page was not released by its owner and the migration was
+ * successful, the owner of the src page and the dst page are swapped and
+ * the src page is un-isolated.
+ *
+ * If migration fails, the ownership stays unmodified and the src page
+ * remains isolated: migration may be retried later or the page can be putback.
+ *
+ * TODO: migration core will treat both pages as folios and lock them before
+ * this call to unlock them after this call. Further, the folio refcounts on
+ * src and dst are also released by migration core. These pages will not be
+ * folios in the future, so that must be reworked.
+ *
+ * Returns MIGRATEPAGE_SUCCESS on success, otherwise a negative error
+ * code.
+ */
+static int migrate_movable_ops_page(struct page *dst, struct page *src,
+ enum migrate_mode mode)
+{
+ int rc = MIGRATEPAGE_SUCCESS;
+
+ VM_WARN_ON_ONCE_PAGE(!PageIsolated(src), src);
+ /* If the page was released by it's owner, there is nothing to do. */
+ if (PageMovable(src))
+ rc = page_movable_ops(src)->migrate_page(dst, src, mode);
+ if (rc == MIGRATEPAGE_SUCCESS)
+ ClearPageIsolated(src);
+ return rc;
+}
+
/*
* Put previously isolated pages back onto the appropriate lists
* from where they were once taken off for compaction/migration.
@@ -1006,11 +1047,12 @@ static int fallback_migrate_folio(struct address_space *mapping,
}
/*
- * Move a page to a newly allocated page
- * The page is locked and all ptes have been successfully removed.
+ * Move a src folio to a newly allocated dst folio.
+ *
+ * The src and dst folios are locked and the src folios was unmapped from
+ * the page tables.
*
- * The new page will have replaced the old page if this function
- * is successful.
+ * On success, the src folio was replaced by the dst folio.
*
* Return value:
* < 0 - error code
@@ -1019,78 +1061,40 @@ static int fallback_migrate_folio(struct address_space *mapping,
static int move_to_new_folio(struct folio *dst, struct folio *src,
enum migrate_mode mode)
{
+ struct address_space *mapping = folio_mapping(src);
int rc = -EAGAIN;
- bool is_lru = !__folio_test_movable(src);
VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
- if (likely(is_lru)) {
- struct address_space *mapping = folio_mapping(src);
-
- if (!mapping)
- rc = migrate_folio(mapping, dst, src, mode);
- else if (mapping_inaccessible(mapping))
- rc = -EOPNOTSUPP;
- else if (mapping->a_ops->migrate_folio)
- /*
- * Most folios have a mapping and most filesystems
- * provide a migrate_folio callback. Anonymous folios
- * are part of swap space which also has its own
- * migrate_folio callback. This is the most common path
- * for page migration.
- */
- rc = mapping->a_ops->migrate_folio(mapping, dst, src,
- mode);
- else
- rc = fallback_migrate_folio(mapping, dst, src, mode);
- } else {
- const struct movable_operations *mops;
-
+ if (!mapping)
+ rc = migrate_folio(mapping, dst, src, mode);
+ else if (mapping_inaccessible(mapping))
+ rc = -EOPNOTSUPP;
+ else if (mapping->a_ops->migrate_folio)
/*
- * In case of non-lru page, it could be released after
- * isolation step. In that case, we shouldn't try migration.
+ * Most folios have a mapping and most filesystems
+ * provide a migrate_folio callback. Anonymous folios
+ * are part of swap space which also has its own
+ * migrate_folio callback. This is the most common path
+ * for page migration.
*/
- VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
- if (!folio_test_movable(src)) {
- rc = MIGRATEPAGE_SUCCESS;
- folio_clear_isolated(src);
- goto out;
- }
-
- mops = folio_movable_ops(src);
- rc = mops->migrate_page(&dst->page, &src->page, mode);
- WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
- !folio_test_isolated(src));
- }
+ rc = mapping->a_ops->migrate_folio(mapping, dst, src,
+ mode);
+ else
+ rc = fallback_migrate_folio(mapping, dst, src, mode);
- /*
- * When successful, old pagecache src->mapping must be cleared before
- * src is freed; but stats require that PageAnon be left as PageAnon.
- */
if (rc == MIGRATEPAGE_SUCCESS) {
- if (__folio_test_movable(src)) {
- VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
-
- /*
- * We clear PG_movable under page_lock so any compactor
- * cannot try to migrate this page.
- */
- folio_clear_isolated(src);
- }
-
/*
- * Anonymous and movable src->mapping will be cleared by
- * free_pages_prepare so don't reset it here for keeping
- * the type to work PageAnon, for example.
+ * For pagecache folios, src->mapping must be cleared before src
+ * is freed. Anonymous folios must stay anonymous until freed.
*/
- if (!folio_mapping_flags(src))
+ if (!folio_test_anon(src))
src->mapping = NULL;
if (likely(!folio_is_zone_device(dst)))
flush_dcache_folio(dst);
}
-out:
return rc;
}
@@ -1341,20 +1345,31 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
int rc;
int old_page_state = 0;
struct anon_vma *anon_vma = NULL;
- bool is_lru = !__folio_test_movable(src);
+ bool src_deferred_split = false;
+ bool src_partially_mapped = false;
struct list_head *prev;
__migrate_folio_extract(dst, &old_page_state, &anon_vma);
prev = dst->lru.prev;
list_del(&dst->lru);
+ if (unlikely(__folio_test_movable(src))) {
+ rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
+ if (rc)
+ goto out;
+ goto out_unlock_both;
+ }
+
+ if (folio_order(src) > 1 &&
+ !data_race(list_empty(&src->_deferred_list))) {
+ src_deferred_split = true;
+ src_partially_mapped = folio_test_partially_mapped(src);
+ }
+
rc = move_to_new_folio(dst, src, mode);
if (rc)
goto out;
- if (unlikely(!is_lru))
- goto out_unlock_both;
-
/*
* When successful, push dst to LRU immediately: so that if it
* turns out to be an mlocked page, remove_migration_ptes() will
@@ -1371,6 +1386,15 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
if (old_page_state & PAGE_WAS_MAPPED)
remove_migration_ptes(src, dst, 0);
+ /*
+ * Requeue the destination folio on the deferred split queue if
+ * the source was on the queue. The source is unqueued in
+ * __folio_migrate_mapping(), so we recorded the state from
+ * before move_to_new_folio().
+ */
+ if (src_deferred_split)
+ deferred_split_folio(dst, src_partially_mapped);
+
out_unlock_both:
folio_unlock(dst);
set_page_owner_migrate_reason(&dst->page, reason);
diff --git a/mm/mlock.c b/mm/mlock.c
index 8c8d522efdd5..d16bf4dbd06d 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -472,10 +472,12 @@ static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma,
int ret = 0;
vm_flags_t oldflags = vma->vm_flags;
- if (newflags == oldflags || (oldflags & VM_SPECIAL) ||
- is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
- vma_is_dax(vma) || vma_is_secretmem(vma) || (oldflags & VM_DROPPABLE))
- /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
+ if (newflags == oldflags || vma_is_secretmem(vma) ||
+ !vma_supports_mlock(vma))
+ /*
+ * Don't set VM_LOCKED or VM_LOCKONFAULT and don't count.
+ * For secretmem, don't allow the memory to be unlocked.
+ */
goto out;
vma = vma_modify_flags(vmi, *prev, vma, start, end, newflags);
diff --git a/mm/mmap.c b/mm/mmap.c
index 6183805f6f9e..d361b1058da1 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1547,9 +1547,7 @@ static unsigned long __mmap_region(struct file *file, unsigned long addr,
vm_stat_account(mm, vm_flags, pglen);
if (vm_flags & VM_LOCKED) {
- if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
- is_vm_hugetlb_page(vma) ||
- vma == get_gate_vma(current->mm))
+ if (!vma_supports_mlock(vma))
vm_flags_clear(vma, VM_LOCKED_MASK);
else
mm->locked_vm += pglen;
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index e4326af00e5e..76adbce8d42b 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1808,6 +1808,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
*/
d_addr = kmap_atomic(newpage);
copy_page(d_addr, s_addr);
+ kmsan_copy_page_meta(newpage, page);
kunmap_atomic(d_addr);
for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 0dd021c881cc..7246a26723d0 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -5412,9 +5412,11 @@ static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "");
+ hci_dev_lock(hdev);
+
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (!conn)
- return;
+ goto unlock;
conn->passkey_notify = __le32_to_cpu(ev->passkey);
conn->passkey_entered = 0;
@@ -5423,6 +5425,9 @@ static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
conn->dst_type, conn->passkey_notify,
conn->passkey_entered);
+
+unlock:
+ hci_dev_unlock(hdev);
}
static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
@@ -5433,14 +5438,16 @@ static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "");
+ hci_dev_lock(hdev);
+
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (!conn)
- return;
+ goto unlock;
switch (ev->type) {
case HCI_KEYPRESS_STARTED:
conn->passkey_entered = 0;
- return;
+ goto unlock;
case HCI_KEYPRESS_ENTERED:
conn->passkey_entered++;
@@ -5455,13 +5462,16 @@ static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
break;
case HCI_KEYPRESS_COMPLETED:
- return;
+ goto unlock;
}
if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
conn->dst_type, conn->passkey_notify,
conn->passkey_entered);
+
+unlock:
+ hci_dev_unlock(hdev);
}
static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c
index f033a5167560..985aaf7ff156 100644
--- a/net/bridge/br_arp_nd_proxy.c
+++ b/net/bridge/br_arp_nd_proxy.c
@@ -199,11 +199,12 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
f = br_fdb_find_rcu(br, n->ha, vid);
if (f) {
+ const struct net_bridge_port *dst = READ_ONCE(f->dst);
bool replied = false;
if ((p && (p->flags & BR_PROXYARP)) ||
- (f->dst && (f->dst->flags & BR_PROXYARP_WIFI)) ||
- br_is_neigh_suppress_enabled(f->dst, vid)) {
+ (dst && (dst->flags & BR_PROXYARP_WIFI)) ||
+ br_is_neigh_suppress_enabled(dst, vid)) {
if (!vid)
br_arp_send(br, p, skb->dev, sip, tip,
sha, n->ha, sha, 0, 0);
@@ -463,9 +464,10 @@ void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br,
f = br_fdb_find_rcu(br, n->ha, vid);
if (f) {
+ const struct net_bridge_port *dst = READ_ONCE(f->dst);
bool replied = false;
- if (br_is_neigh_suppress_enabled(f->dst, vid)) {
+ if (br_is_neigh_suppress_enabled(dst, vid)) {
if (vid != 0)
br_nd_send(br, p, skb, n,
skb->vlan_proto,
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 9dd405b64fcc..39cc761012d4 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -243,6 +243,7 @@ struct net_device *br_fdb_find_port(const struct net_device *br_dev,
const unsigned char *addr,
__u16 vid)
{
+ const struct net_bridge_port *dst;
struct net_bridge_fdb_entry *f;
struct net_device *dev = NULL;
struct net_bridge *br;
@@ -255,8 +256,11 @@ struct net_device *br_fdb_find_port(const struct net_device *br_dev,
br = netdev_priv(br_dev);
rcu_read_lock();
f = br_fdb_find_rcu(br, addr, vid);
- if (f && f->dst)
- dev = f->dst->dev;
+ if (f) {
+ dst = READ_ONCE(f->dst);
+ if (dst)
+ dev = dst->dev;
+ }
rcu_read_unlock();
return dev;
@@ -353,7 +357,7 @@ static void fdb_delete_local(struct net_bridge *br,
vg = nbp_vlan_group(op);
if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
(!vid || br_vlan_find(vg, vid))) {
- f->dst = op;
+ WRITE_ONCE(f->dst, op);
clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
return;
}
@@ -364,7 +368,7 @@ static void fdb_delete_local(struct net_bridge *br,
/* Maybe bridge device has same hw addr? */
if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
(!vid || (v && br_vlan_should_use(v)))) {
- f->dst = NULL;
+ WRITE_ONCE(f->dst, NULL);
clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
return;
}
@@ -827,6 +831,7 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
int br_fdb_fillbuf(struct net_bridge *br, void *buf,
unsigned long maxnum, unsigned long skip)
{
+ const struct net_bridge_port *dst;
struct net_bridge_fdb_entry *f;
struct __fdb_entry *fe = buf;
unsigned long delta;
@@ -843,7 +848,8 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
continue;
/* ignore pseudo entry for local MAC address */
- if (!f->dst)
+ dst = READ_ONCE(f->dst);
+ if (!dst)
continue;
if (skip) {
@@ -855,8 +861,8 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
/* due to ABI compat need to split into hi/lo */
- fe->port_no = f->dst->port_no;
- fe->port_hi = f->dst->port_no >> 8;
+ fe->port_no = dst->port_no;
+ fe->port_hi = dst->port_no >> 8;
fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
if (!test_bit(BR_FDB_STATIC, &f->flags)) {
@@ -981,9 +987,11 @@ int br_fdb_dump(struct sk_buff *skb,
rcu_read_lock();
hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+ const struct net_bridge_port *dst = READ_ONCE(f->dst);
+
if (*idx < cb->args[2])
goto skip;
- if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
+ if (filter_dev && (!dst || dst->dev != filter_dev)) {
if (filter_dev != dev)
goto skip;
/* !f->dst is a special case for bridge
@@ -991,10 +999,10 @@ int br_fdb_dump(struct sk_buff *skb,
* Therefore need a little more filtering
* we only want to dump the !f->dst case
*/
- if (f->dst)
+ if (dst)
goto skip;
}
- if (!filter_dev && f->dst)
+ if (!filter_dev && dst)
goto skip;
err = fdb_fill_info(skb, br, f,
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 9cef9496a707..9a474d99bae8 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -197,10 +197,20 @@ bool cfsrvl_phyid_match(struct cflayer *layer, int phyid)
void caif_free_client(struct cflayer *adap_layer)
{
+ struct cflayer *serv_layer;
struct cfsrvl *servl;
- if (adap_layer == NULL || adap_layer->dn == NULL)
+
+ if (!adap_layer)
+ return;
+
+ serv_layer = adap_layer->dn;
+ if (!serv_layer)
return;
- servl = container_obj(adap_layer->dn);
+
+ layer_set_dn(adap_layer, NULL);
+ layer_set_up(serv_layer, NULL);
+
+ servl = container_obj(serv_layer);
servl->release(&servl->layer);
}
EXPORT_SYMBOL(caif_free_client);
diff --git a/net/ceph/auth.c b/net/ceph/auth.c
index 0d75679c6a7e..23d109cb0c6b 100644
--- a/net/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -245,7 +245,7 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac,
ac->protocol = 0;
ac->ops = NULL;
}
- if (ac->protocol != protocol) {
+ if (!ac->protocol) {
ret = init_protocol(ac, protocol);
if (ret) {
pr_err("auth protocol '%s' init failed: %d\n",
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 8e53b595a419..4f55b6041feb 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -64,6 +64,7 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/fcntl.h>
+#include <linux/nospec.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
@@ -361,7 +362,9 @@ static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
to, len);
skb->csum = csum_block_add(skb->csum, csum, odd);
- if (icmp_pointers[icmp_param->data.icmph.type].error)
+ if (icmp_param->data.icmph.type <= NR_ICMP_TYPES &&
+ icmp_pointers[array_index_nospec(icmp_param->data.icmph.type,
+ NR_ICMP_TYPES + 1)].error)
nf_ct_attach(skb, icmp_param->skb);
return 0;
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 2691d69f3350..bf4e5f49030b 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -1486,16 +1486,19 @@ void inet_csk_listen_stop(struct sock *sk)
if (nreq) {
refcount_set(&nreq->rsk_refcnt, 1);
+ rcu_read_lock();
if (inet_csk_reqsk_queue_add(nsk, nreq, child)) {
__NET_INC_STATS(sock_net(nsk),
LINUX_MIB_TCPMIGRATEREQSUCCESS);
reqsk_migrate_reset(req);
+ READ_ONCE(nsk->sk_data_ready)(nsk);
} else {
__NET_INC_STATS(sock_net(nsk),
LINUX_MIB_TCPMIGRATEREQFAILURE);
reqsk_migrate_reset(nreq);
__reqsk_free(nreq);
}
+ rcu_read_unlock();
/* inet_csk_reqsk_queue_add() has already
* called inet_child_forget() on failure case.
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 8a30dd83cf0b..d09ae48030b3 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -491,6 +491,7 @@ static int ipv6_rpl_srh_rcv(struct sk_buff *skb)
struct net *net = dev_net(skb->dev);
struct inet6_dev *idev;
struct ipv6hdr *oldhdr;
+ unsigned int chdr_len;
unsigned char *buf;
int accept_rpl_seg;
int i, err;
@@ -594,8 +595,10 @@ static int ipv6_rpl_srh_rcv(struct sk_buff *skb)
skb_pull(skb, ((hdr->hdrlen + 1) << 3));
skb_postpull_rcsum(skb, oldhdr,
sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3));
- if (unlikely(!hdr->segments_left)) {
- if (pskb_expand_head(skb, sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3), 0,
+ chdr_len = sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3);
+ if (unlikely(!hdr->segments_left ||
+ skb_headroom(skb) < chdr_len + skb->mac_len)) {
+ if (pskb_expand_head(skb, chdr_len + skb->mac_len, 0,
GFP_ATOMIC)) {
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
@@ -605,7 +608,7 @@ static int ipv6_rpl_srh_rcv(struct sk_buff *skb)
oldhdr = ipv6_hdr(skb);
}
- skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr));
+ skb_push(skb, chdr_len);
skb_reset_network_header(skb);
skb_mac_header_rebuild(skb);
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
index c7942cf65567..4e10adcd70e8 100644
--- a/net/ipv6/rpl_iptunnel.c
+++ b/net/ipv6/rpl_iptunnel.c
@@ -287,7 +287,16 @@ static int rpl_input(struct sk_buff *skb)
if (!dst) {
ip6_route_input(skb);
+
+ /* ip6_route_input() sets a NOREF dst; force a refcount on it
+ * before caching or further use.
+ */
+ skb_dst_force(skb);
dst = skb_dst(skb);
+ if (unlikely(!dst)) {
+ err = -ENETUNREACH;
+ goto drop;
+ }
/* cache only if we don't create a dst reference loop */
if (!dst->error && lwtst != dst->lwtstate) {
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index c292f38549fc..fb5a701f8d25 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -500,7 +500,16 @@ static int seg6_input_core(struct net *net, struct sock *sk,
if (!dst) {
ip6_route_input(skb);
+
+ /* ip6_route_input() sets a NOREF dst; force a refcount on it
+ * before caching or further use.
+ */
+ skb_dst_force(skb);
dst = skb_dst(skb);
+ if (unlikely(!dst)) {
+ err = -ENETUNREACH;
+ goto drop;
+ }
/* cache only if we don't create a dst reference loop */
if (!dst->error && lwtst != dst->lwtstate) {
@@ -715,7 +724,8 @@ static int seg6_build_state(struct net *net, struct nlattr *nla,
newts->type = LWTUNNEL_ENCAP_SEG6;
newts->flags |= LWTUNNEL_STATE_INPUT_REDIRECT;
- if (tuninfo->mode != SEG6_IPTUN_MODE_L2ENCAP)
+ if (tuninfo->mode != SEG6_IPTUN_MODE_L2ENCAP &&
+ tuninfo->mode != SEG6_IPTUN_MODE_L2ENCAP_RED)
newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
newts->headroom = seg6_lwt_headroom(tuninfo);
diff --git a/net/mctp/route.c b/net/mctp/route.c
index 08bbd861dc42..ccba2abbbbfb 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -388,6 +388,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
unsigned long f;
u8 tag, flags;
int rc;
+ u8 ver;
msk = NULL;
rc = -EINVAL;
@@ -411,7 +412,8 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
netid = mctp_cb(skb)->net;
skb_pull(skb, sizeof(struct mctp_hdr));
- if (mh->ver != 1)
+ ver = mh->ver & MCTP_HDR_VER_MASK;
+ if (ver < MCTP_VER_MIN || ver > MCTP_VER_MAX)
goto out;
flags = mh->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM);
@@ -1197,6 +1199,7 @@ static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev,
struct mctp_skb_cb *cb;
struct mctp_route *rt;
struct mctp_hdr *mh;
+ u8 ver;
rcu_read_lock();
mdev = __mctp_dev_get(dev);
@@ -1214,7 +1217,8 @@ static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev,
/* We have enough for a header; decode and route */
mh = mctp_hdr(skb);
- if (mh->ver < MCTP_VER_MIN || mh->ver > MCTP_VER_MAX)
+ ver = mh->ver & MCTP_HDR_VER_MASK;
+ if (ver < MCTP_VER_MIN || ver > MCTP_VER_MAX)
goto err_drop;
/* source must be valid unicast or null; drop reserved ranges and
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index 7de95674fd8c..2cfb0104680c 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -149,7 +149,8 @@ static int nft_bitwise_init_shift(struct nft_bitwise *priv,
if (err < 0)
return err;
- if (priv->data.data[0] >= BITS_PER_TYPE(u32)) {
+ if (!priv->data.data[0] ||
+ priv->data.data[0] >= BITS_PER_TYPE(u32)) {
nft_data_release(&priv->data, desc.type);
return -EINVAL;
}
diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
index 3de9350cbf30..a10e8de4f7e1 100644
--- a/net/qrtr/ns.c
+++ b/net/qrtr/ns.c
@@ -22,8 +22,10 @@ static struct {
struct socket *sock;
struct sockaddr_qrtr bcast_sq;
struct list_head lookups;
+ u32 lookup_count;
struct workqueue_struct *workqueue;
struct work_struct work;
+ void (*saved_data_ready)(struct sock *sk);
int local_node;
} qrtr_ns;
@@ -67,8 +69,26 @@ struct qrtr_server {
struct qrtr_node {
unsigned int id;
struct xarray servers;
+ u32 server_count;
};
+/* Max server limit is chosen based on the current platform requirements. If the
+ * requirement changes in the future, this value can be increased.
+ */
+#define QRTR_NS_MAX_SERVERS 256
+
+/* Max lookup limit is chosen based on the current platform requirements. If the
+ * requirement changes in the future, this value can be increased.
+ */
+#define QRTR_NS_MAX_LOOKUPS 64
+
+/* Max nodes limit is chosen based on the current platform requirements.
+ * If the requirement changes in the future, this value can be increased.
+ */
+#define QRTR_NS_MAX_NODES 64
+
+static u8 node_count;
+
static struct qrtr_node *node_get(unsigned int node_id)
{
struct qrtr_node *node;
@@ -77,6 +97,11 @@ static struct qrtr_node *node_get(unsigned int node_id)
if (node)
return node;
+ if (node_count >= QRTR_NS_MAX_NODES) {
+ pr_err_ratelimited("QRTR clients exceed max node limit!\n");
+ return NULL;
+ }
+
/* If node didn't exist, allocate and insert it to the tree */
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
@@ -90,6 +115,8 @@ static struct qrtr_node *node_get(unsigned int node_id)
return NULL;
}
+ node_count++;
+
return node;
}
@@ -229,6 +256,17 @@ static struct qrtr_server *server_add(unsigned int service,
if (!service || !port)
return NULL;
+ node = node_get(node_id);
+ if (!node)
+ return NULL;
+
+ /* Make sure the new servers per port are capped at the maximum value */
+ old = xa_load(&node->servers, port);
+ if (!old && node->server_count >= QRTR_NS_MAX_SERVERS) {
+ pr_err_ratelimited("QRTR client node %u exceeds max server limit!\n", node_id);
+ return NULL;
+ }
+
srv = kzalloc(sizeof(*srv), GFP_KERNEL);
if (!srv)
return NULL;
@@ -238,10 +276,6 @@ static struct qrtr_server *server_add(unsigned int service,
srv->node = node_id;
srv->port = port;
- node = node_get(node_id);
- if (!node)
- goto err;
-
/* Delete the old server on the same port */
old = xa_store(&node->servers, port, srv, GFP_KERNEL);
if (old) {
@@ -252,6 +286,8 @@ static struct qrtr_server *server_add(unsigned int service,
} else {
kfree(old);
}
+ } else {
+ node->server_count++;
}
trace_qrtr_ns_server_add(srv->service, srv->instance,
@@ -292,6 +328,7 @@ static int server_del(struct qrtr_node *node, unsigned int port, bool bcast)
}
kfree(srv);
+ node->server_count--;
return 0;
}
@@ -341,7 +378,7 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
struct qrtr_node *node;
unsigned long index;
struct kvec iv;
- int ret;
+ int ret = 0;
iv.iov_base = &pkt;
iv.iov_len = sizeof(pkt);
@@ -356,8 +393,10 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
/* Advertise the removal of this client to all local servers */
local_node = node_get(qrtr_ns.local_node);
- if (!local_node)
- return 0;
+ if (!local_node) {
+ ret = 0;
+ goto delete_node;
+ }
memset(&pkt, 0, sizeof(pkt));
pkt.cmd = cpu_to_le32(QRTR_TYPE_BYE);
@@ -374,10 +413,19 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
if (ret < 0 && ret != -ENODEV) {
pr_err("failed to send bye cmd\n");
- return ret;
+ goto delete_node;
}
}
- return 0;
+
+ /* Ignore -ENODEV */
+ ret = 0;
+
+delete_node:
+ xa_erase(&nodes, from->sq_node);
+ kfree(node);
+ node_count--;
+
+ return ret;
}
static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
@@ -417,6 +465,7 @@ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
list_del(&lookup->li);
kfree(lookup);
+ qrtr_ns.lookup_count--;
}
/* Remove the server belonging to this port but don't broadcast
@@ -534,6 +583,11 @@ static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from,
if (from->sq_node != qrtr_ns.local_node)
return -EINVAL;
+ if (qrtr_ns.lookup_count >= QRTR_NS_MAX_LOOKUPS) {
+ pr_err_ratelimited("QRTR client node exceeds max lookup limit!\n");
+ return -ENOSPC;
+ }
+
lookup = kzalloc(sizeof(*lookup), GFP_KERNEL);
if (!lookup)
return -ENOMEM;
@@ -542,6 +596,7 @@ static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from,
lookup->service = service;
lookup->instance = instance;
list_add_tail(&lookup->li, &qrtr_ns.lookups);
+ qrtr_ns.lookup_count++;
memset(&filter, 0, sizeof(filter));
filter.service = service;
@@ -582,6 +637,7 @@ static void ctrl_cmd_del_lookup(struct sockaddr_qrtr *from,
list_del(&lookup->li);
kfree(lookup);
+ qrtr_ns.lookup_count--;
}
}
@@ -670,7 +726,7 @@ static void qrtr_ns_worker(struct work_struct *work)
}
if (ret < 0)
- pr_err("failed while handling packet from %d:%d",
+ pr_err_ratelimited("failed while handling packet from %d:%d",
sq.sq_node, sq.sq_port);
}
@@ -709,6 +765,7 @@ int qrtr_ns_init(void)
goto err_sock;
}
+ qrtr_ns.saved_data_ready = qrtr_ns.sock->sk->sk_data_ready;
qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready;
sq.sq_port = QRTR_PORT_CTRL;
@@ -749,6 +806,10 @@ int qrtr_ns_init(void)
return 0;
err_wq:
+ write_lock_bh(&qrtr_ns.sock->sk->sk_callback_lock);
+ qrtr_ns.sock->sk->sk_data_ready = qrtr_ns.saved_data_ready;
+ write_unlock_bh(&qrtr_ns.sock->sk->sk_callback_lock);
+
destroy_workqueue(qrtr_ns.workqueue);
err_sock:
sock_release(qrtr_ns.sock);
@@ -758,7 +819,12 @@ EXPORT_SYMBOL_GPL(qrtr_ns_init);
void qrtr_ns_remove(void)
{
+ write_lock_bh(&qrtr_ns.sock->sk->sk_callback_lock);
+ qrtr_ns.sock->sk->sk_data_ready = qrtr_ns.saved_data_ready;
+ write_unlock_bh(&qrtr_ns.sock->sk->sk_callback_lock);
+
cancel_work_sync(&qrtr_ns.work);
+ synchronize_net();
destroy_workqueue(qrtr_ns.workqueue);
/* sock_release() expects the two references that were put during
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 00dbcd4d28e6..34d9333e4229 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -326,10 +326,6 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
if (args->cookie_addr &&
put_user(cookie, (u64 __user *)(unsigned long)args->cookie_addr)) {
- if (!need_odp) {
- unpin_user_pages(pages, nr_pages);
- kfree(sg);
- }
ret = -EFAULT;
goto out;
}
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 1494d162444d..63cd5217b4ee 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -1260,7 +1260,6 @@ int rxrpc_server_keyring(struct rxrpc_sock *, sockptr_t, int);
void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
-void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace);
void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
void rxrpc_purge_queue(struct sk_buff_head *);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 7bbb68504766..62ddaa129ce5 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -342,8 +342,29 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
if (skb && skb->mark == RXRPC_SKB_MARK_ERROR)
goto out;
- if (skb)
- rxrpc_input_call_packet(call, skb);
+ if (skb) {
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+ if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
+ sp->hdr.securityIndex != 0 &&
+ skb_cloned(skb)) {
+ /* Unshare the packet so that it can be modified for
+ * in-place decryption.
+ */
+ struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
+
+ if (nskb) {
+ rxrpc_new_skb(nskb, rxrpc_skb_new_unshared);
+ rxrpc_input_call_packet(call, nskb);
+ rxrpc_free_skb(nskb, rxrpc_skb_put_input);
+ } else {
+ /* OOM - Drop the packet. */
+ rxrpc_see_skb(skb, rxrpc_skb_see_unshare_nomem);
+ }
+ } else {
+ rxrpc_input_call_packet(call, skb);
+ }
+ }
/* If we see our async-event poke, check for timeout trippage. */
now = ktime_get_real();
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 6ef2dc1aa8cc..82cc72123c9c 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -344,7 +344,6 @@ void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn, bool force)
static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
{
struct sk_buff *skb;
- int ret;
if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
rxrpc_secure_connection(conn);
@@ -353,17 +352,8 @@ static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
* connection that each one has when we've finished with it */
while ((skb = skb_dequeue(&conn->rx_queue))) {
rxrpc_see_skb(skb, rxrpc_skb_see_conn_work);
- ret = rxrpc_process_event(conn, skb);
- switch (ret) {
- case -ENOMEM:
- case -EAGAIN:
- skb_queue_head(&conn->rx_queue, skb);
- rxrpc_queue_conn(conn, rxrpc_conn_queue_retry_work);
- break;
- default:
- rxrpc_free_skb(skb, rxrpc_skb_put_conn_work);
- break;
- }
+ rxrpc_process_event(conn, skb);
+ rxrpc_free_skb(skb, rxrpc_skb_put_conn_work);
}
}
diff --git a/net/rxrpc/io_thread.c b/net/rxrpc/io_thread.c
index 83c5f715c6b7..8270cac0488d 100644
--- a/net/rxrpc/io_thread.c
+++ b/net/rxrpc/io_thread.c
@@ -178,13 +178,12 @@ static bool rxrpc_extract_abort(struct sk_buff *skb)
/*
* Process packets received on the local endpoint
*/
-static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
+static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff *skb)
{
struct rxrpc_connection *conn;
struct sockaddr_rxrpc peer_srx;
struct rxrpc_skb_priv *sp;
struct rxrpc_peer *peer = NULL;
- struct sk_buff *skb = *_skb;
bool ret = false;
skb_pull(skb, sizeof(struct udphdr));
@@ -230,25 +229,6 @@ static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call);
if (sp->hdr.seq == 0)
return rxrpc_bad_message(skb, rxrpc_badmsg_zero_seq);
-
- /* Unshare the packet so that it can be modified for in-place
- * decryption.
- */
- if (sp->hdr.securityIndex != 0) {
- skb = skb_unshare(skb, GFP_ATOMIC);
- if (!skb) {
- rxrpc_eaten_skb(*_skb, rxrpc_skb_eaten_by_unshare_nomem);
- *_skb = NULL;
- return just_discard;
- }
-
- if (skb != *_skb) {
- rxrpc_eaten_skb(*_skb, rxrpc_skb_eaten_by_unshare);
- *_skb = skb;
- rxrpc_new_skb(skb, rxrpc_skb_new_unshared);
- sp = rxrpc_skb(skb);
- }
- }
break;
case RXRPC_PACKET_TYPE_CHALLENGE:
@@ -490,7 +470,7 @@ int rxrpc_io_thread(void *data)
switch (skb->mark) {
case RXRPC_SKB_MARK_PACKET:
skb->priority = 0;
- if (!rxrpc_input_packet(local, &skb))
+ if (!rxrpc_input_packet(local, skb))
rxrpc_reject_packet(local, skb);
trace_rxrpc_rx_done(skb->mark, skb->priority);
rxrpc_free_skb(skb, rxrpc_skb_put_input);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index ef2bf442f323..73bbe8cd391b 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -494,6 +494,9 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
rxkad_abort_2_short_header);
+ /* Don't let the crypto algo see a misaligned length. */
+ sp->len = round_down(sp->len, 8);
+
/* Decrypt the skbuff in-place. TODO: We really want to decrypt
* directly into the target buffer.
*/
@@ -527,8 +530,10 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
if (sg != _sg)
kfree(sg);
if (ret < 0) {
- WARN_ON_ONCE(ret != -ENOMEM);
- return ret;
+ if (ret == -ENOMEM)
+ return ret;
+ return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
+ rxkad_abort_2_crypto_unaligned);
}
/* Extract the decrypted packet length */
@@ -1048,7 +1053,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
struct rxrpc_crypt session_key;
struct key *server_key;
time64_t expiry;
- void *ticket;
+ void *ticket = NULL;
u32 version, kvno, ticket_len, level;
__be32 csum;
int ret, i;
@@ -1074,13 +1079,13 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
ret = -ENOMEM;
response = kzalloc(sizeof(struct rxkad_response), GFP_NOFS);
if (!response)
- goto temporary_error;
+ goto error;
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
response, sizeof(*response)) < 0) {
- rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO,
- rxkad_abort_resp_short);
- goto protocol_error;
+ ret = rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO,
+ rxkad_abort_resp_short);
+ goto error;
}
version = ntohl(response->version);
@@ -1090,62 +1095,62 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
trace_rxrpc_rx_response(conn, sp->hdr.serial, version, kvno, ticket_len);
if (version != RXKAD_VERSION) {
- rxrpc_abort_conn(conn, skb, RXKADINCONSISTENCY, -EPROTO,
- rxkad_abort_resp_version);
- goto protocol_error;
+ ret = rxrpc_abort_conn(conn, skb, RXKADINCONSISTENCY, -EPROTO,
+ rxkad_abort_resp_version);
+ goto error;
}
if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN) {
- rxrpc_abort_conn(conn, skb, RXKADTICKETLEN, -EPROTO,
- rxkad_abort_resp_tkt_len);
- goto protocol_error;
+ ret = rxrpc_abort_conn(conn, skb, RXKADTICKETLEN, -EPROTO,
+ rxkad_abort_resp_tkt_len);
+ goto error;
}
if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5) {
- rxrpc_abort_conn(conn, skb, RXKADUNKNOWNKEY, -EPROTO,
- rxkad_abort_resp_unknown_tkt);
- goto protocol_error;
+ ret = rxrpc_abort_conn(conn, skb, RXKADUNKNOWNKEY, -EPROTO,
+ rxkad_abort_resp_unknown_tkt);
+ goto error;
}
/* extract the kerberos ticket and decrypt and decode it */
ret = -ENOMEM;
ticket = kmalloc(ticket_len, GFP_NOFS);
if (!ticket)
- goto temporary_error_free_resp;
+ goto error;
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header) + sizeof(*response),
ticket, ticket_len) < 0) {
- rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO,
- rxkad_abort_resp_short_tkt);
- goto protocol_error;
+ ret = rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO,
+ rxkad_abort_resp_short_tkt);
+ goto error;
}
ret = rxkad_decrypt_ticket(conn, server_key, skb, ticket, ticket_len,
&session_key, &expiry);
if (ret < 0)
- goto temporary_error_free_ticket;
+ goto error;
/* use the session key from inside the ticket to decrypt the
* response */
ret = rxkad_decrypt_response(conn, response, &session_key);
if (ret < 0)
- goto temporary_error_free_ticket;
+ goto error;
if (ntohl(response->encrypted.epoch) != conn->proto.epoch ||
ntohl(response->encrypted.cid) != conn->proto.cid ||
ntohl(response->encrypted.securityIndex) != conn->security_ix) {
- rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
- rxkad_abort_resp_bad_param);
- goto protocol_error_free;
+ ret = rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+ rxkad_abort_resp_bad_param);
+ goto error;
}
csum = response->encrypted.checksum;
response->encrypted.checksum = 0;
rxkad_calc_response_checksum(response);
if (response->encrypted.checksum != csum) {
- rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
- rxkad_abort_resp_bad_checksum);
- goto protocol_error_free;
+ ret = rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+ rxkad_abort_resp_bad_checksum);
+ goto error;
}
for (i = 0; i < RXRPC_MAXCALLS; i++) {
@@ -1153,38 +1158,38 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
u32 counter = READ_ONCE(conn->channels[i].call_counter);
if (call_id > INT_MAX) {
- rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
- rxkad_abort_resp_bad_callid);
- goto protocol_error_free;
+ ret = rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+ rxkad_abort_resp_bad_callid);
+ goto error;
}
if (call_id < counter) {
- rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
- rxkad_abort_resp_call_ctr);
- goto protocol_error_free;
+ ret = rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+ rxkad_abort_resp_call_ctr);
+ goto error;
}
if (call_id > counter) {
if (conn->channels[i].call) {
- rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+ ret = rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
rxkad_abort_resp_call_state);
- goto protocol_error_free;
+ goto error;
}
conn->channels[i].call_counter = call_id;
}
}
if (ntohl(response->encrypted.inc_nonce) != conn->rxkad.nonce + 1) {
- rxrpc_abort_conn(conn, skb, RXKADOUTOFSEQUENCE, -EPROTO,
- rxkad_abort_resp_ooseq);
- goto protocol_error_free;
+ ret = rxrpc_abort_conn(conn, skb, RXKADOUTOFSEQUENCE, -EPROTO,
+ rxkad_abort_resp_ooseq);
+ goto error;
}
level = ntohl(response->encrypted.level);
if (level > RXRPC_SECURITY_ENCRYPT) {
- rxrpc_abort_conn(conn, skb, RXKADLEVELFAIL, -EPROTO,
- rxkad_abort_resp_level);
- goto protocol_error_free;
+ ret = rxrpc_abort_conn(conn, skb, RXKADLEVELFAIL, -EPROTO,
+ rxkad_abort_resp_level);
+ goto error;
}
conn->security_level = level;
@@ -1192,31 +1197,12 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
* this the connection security can be handled in exactly the same way
* as for a client connection */
ret = rxrpc_get_server_data_key(conn, &session_key, expiry, kvno);
- if (ret < 0)
- goto temporary_error_free_ticket;
-
- kfree(ticket);
- kfree(response);
- _leave(" = 0");
- return 0;
-protocol_error_free:
- kfree(ticket);
-protocol_error:
- kfree(response);
- key_put(server_key);
- return -EPROTO;
-
-temporary_error_free_ticket:
+error:
kfree(ticket);
-temporary_error_free_resp:
kfree(response);
-temporary_error:
- /* Ignore the response packet if we got a temporary error such as
- * ENOMEM. We just want to send the challenge again. Note that we
- * also come out this way if the ticket decryption fails.
- */
key_put(server_key);
+ _leave(" = %d", ret);
return ret;
}
diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c
index 3bcd6ee80396..e2169d1a14b5 100644
--- a/net/rxrpc/skbuff.c
+++ b/net/rxrpc/skbuff.c
@@ -46,15 +46,6 @@ void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
skb_get(skb);
}
-/*
- * Note the dropping of a ref on a socket buffer by the core.
- */
-void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
-{
- int n = atomic_inc_return(&rxrpc_n_rx_skbs);
- trace_rxrpc_skb(skb, 0, n, why);
-}
-
/*
* Note the destruction of a socket buffer.
*/
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index b3a8053d4ab4..c2b2b76a8f62 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -785,8 +785,8 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
dclc = (struct smc_clc_msg_decline *)clcm;
reason_code = SMC_CLC_DECL_PEERDECL;
smc->peer_diagnosis = ntohl(dclc->peer_diagnosis);
- if (((struct smc_clc_msg_decline *)buf)->hdr.typev2 &
- SMC_FIRST_CONTACT_MASK) {
+ if ((dclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK) &&
+ smc->conn.lgr) {
smc->conn.lgr->sync_err = 1;
smc_lgr_terminate_sched(smc->conn.lgr);
}
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index b61384b08e7c..2a805c964210 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -45,6 +45,14 @@ static void strp_abort_strp(struct strparser *strp, int err)
strp->stopped = 1;
+ if (strp->skb_head) {
+ kfree_skb(strp->skb_head);
+ strp->skb_head = NULL;
+ }
+
+ strp->skb_nextp = NULL;
+ strp->need_bytes = 0;
+
if (strp->sk) {
struct sock *sk = strp->sk;
diff --git a/rust/kernel/init/macros.rs b/rust/kernel/init/macros.rs
index e477e4de817b..d6e27c522115 100644
--- a/rust/kernel/init/macros.rs
+++ b/rust/kernel/init/macros.rs
@@ -1012,6 +1012,7 @@ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
self,
slot: &'__slot mut $p_type,
) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ // SAFETY: TODO.
unsafe { ::core::pin::Pin::new_unchecked(slot) }
}
)*
@@ -1235,11 +1236,11 @@ fn assert_zeroable<T: $crate::init::Zeroable>(_: *mut T) {}
// Unaligned fields will cause the compiler to emit E0793. We do not support
// unaligned fields since `Init::__init` requires an aligned pointer; the call to
// `ptr::write` below has the same requirement.
+ #[allow(unused_variables, unused_assignments)]
// SAFETY:
// - the project function does the correct field projection,
// - the field has been initialized,
// - the reference is only valid until the end of the initializer.
- #[allow(unused_variables, unused_assignments)]
let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
// Create the drop guard:
@@ -1278,11 +1279,11 @@ fn assert_zeroable<T: $crate::init::Zeroable>(_: *mut T) {}
// Unaligned fields will cause the compiler to emit E0793. We do not support
// unaligned fields since `Init::__init` requires an aligned pointer; the call to
// `ptr::write` below has the same requirement.
+ #[allow(unused_variables, unused_assignments)]
// SAFETY:
// - the field is not structurally pinned, since the line above must compile,
// - the field has been initialized,
// - the reference is only valid until the end of the initializer.
- #[allow(unused_variables, unused_assignments)]
let $field = unsafe { &mut (*$slot).$field };
// Create the drop guard:
@@ -1366,11 +1367,11 @@ fn assert_zeroable<T: $crate::init::Zeroable>(_: *mut T) {}
// Unaligned fields will cause the compiler to emit E0793. We do not support
// unaligned fields since `Init::__init` requires an aligned pointer; the call to
// `ptr::write` below has the same requirement.
+ #[allow(unused_variables, unused_assignments)]
// SAFETY:
// - the project function does the correct field projection,
// - the field has been initialized,
// - the reference is only valid until the end of the initializer.
- #[allow(unused_variables, unused_assignments)]
let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
// Create the drop guard:
diff --git a/scripts/check-uapi.sh b/scripts/check-uapi.sh
index 955581735cb3..9fa45cbdecc2 100755
--- a/scripts/check-uapi.sh
+++ b/scripts/check-uapi.sh
@@ -178,8 +178,11 @@ do_compile() {
local -r inc_dir="$1"
local -r header="$2"
local -r out="$3"
- printf "int main(void) { return 0; }\n" | \
- "$CC" -c \
+ printf "int f(void) { return 0; }\n" | \
+ "$CC" \
+ -shared \
+ -nostdlib \
+ -fPIC \
-o "$out" \
-x c \
-O0 \
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 8855fb4b8834..0182b09cc8a0 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -823,25 +823,23 @@ static int apparmor_getprocattr(struct task_struct *task, const char *name,
char **value)
{
int error = -ENOENT;
- /* released below */
- const struct cred *cred = get_task_cred(task);
- struct aa_task_ctx *ctx = task_ctx(current);
struct aa_label *label = NULL;
+ rcu_read_lock();
if (strcmp(name, "current") == 0)
- label = aa_get_newest_label(cred_label(cred));
- else if (strcmp(name, "prev") == 0 && ctx->previous)
- label = aa_get_newest_label(ctx->previous);
- else if (strcmp(name, "exec") == 0 && ctx->onexec)
- label = aa_get_newest_label(ctx->onexec);
+ label = aa_get_newest_cred_label(__task_cred(task));
+ else if (strcmp(name, "prev") == 0 && task_ctx(task)->previous)
+ label = aa_get_newest_label(task_ctx(task)->previous);
+ else if (strcmp(name, "exec") == 0 && task_ctx(task)->onexec)
+ label = aa_get_newest_label(task_ctx(task)->onexec);
else
error = -EINVAL;
+ rcu_read_unlock();
if (label)
error = aa_getprocattr(label, value, true);
aa_put_label(label);
- put_cred(cred);
return error;
}
diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
index ac347a14f282..7400a5aa47ca 100644
--- a/sound/aoa/codecs/onyx.c
+++ b/sound/aoa/codecs/onyx.c
@@ -122,10 +122,9 @@ static int onyx_snd_vol_get(struct snd_kcontrol *kcontrol,
struct onyx *onyx = snd_kcontrol_chip(kcontrol);
s8 l, r;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
onyx_read_register(onyx, ONYX_REG_DAC_ATTEN_LEFT, &l);
onyx_read_register(onyx, ONYX_REG_DAC_ATTEN_RIGHT, &r);
- mutex_unlock(&onyx->mutex);
ucontrol->value.integer.value[0] = l + VOLUME_RANGE_SHIFT;
ucontrol->value.integer.value[1] = r + VOLUME_RANGE_SHIFT;
@@ -146,15 +145,13 @@ static int onyx_snd_vol_put(struct snd_kcontrol *kcontrol,
ucontrol->value.integer.value[1] > -1 + VOLUME_RANGE_SHIFT)
return -EINVAL;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
onyx_read_register(onyx, ONYX_REG_DAC_ATTEN_LEFT, &l);
onyx_read_register(onyx, ONYX_REG_DAC_ATTEN_RIGHT, &r);
if (l + VOLUME_RANGE_SHIFT == ucontrol->value.integer.value[0] &&
- r + VOLUME_RANGE_SHIFT == ucontrol->value.integer.value[1]) {
- mutex_unlock(&onyx->mutex);
+ r + VOLUME_RANGE_SHIFT == ucontrol->value.integer.value[1])
return 0;
- }
onyx_write_register(onyx, ONYX_REG_DAC_ATTEN_LEFT,
ucontrol->value.integer.value[0]
@@ -162,7 +159,6 @@ static int onyx_snd_vol_put(struct snd_kcontrol *kcontrol,
onyx_write_register(onyx, ONYX_REG_DAC_ATTEN_RIGHT,
ucontrol->value.integer.value[1]
- VOLUME_RANGE_SHIFT);
- mutex_unlock(&onyx->mutex);
return 1;
}
@@ -198,9 +194,8 @@ static int onyx_snd_inputgain_get(struct snd_kcontrol *kcontrol,
struct onyx *onyx = snd_kcontrol_chip(kcontrol);
u8 ig;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
onyx_read_register(onyx, ONYX_REG_ADC_CONTROL, &ig);
- mutex_unlock(&onyx->mutex);
ucontrol->value.integer.value[0] =
(ig & ONYX_ADC_PGA_GAIN_MASK) + INPUTGAIN_RANGE_SHIFT;
@@ -217,14 +212,13 @@ static int onyx_snd_inputgain_put(struct snd_kcontrol *kcontrol,
if (ucontrol->value.integer.value[0] < 3 + INPUTGAIN_RANGE_SHIFT ||
ucontrol->value.integer.value[0] > 28 + INPUTGAIN_RANGE_SHIFT)
return -EINVAL;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
onyx_read_register(onyx, ONYX_REG_ADC_CONTROL, &v);
n = v;
n &= ~ONYX_ADC_PGA_GAIN_MASK;
n |= (ucontrol->value.integer.value[0] - INPUTGAIN_RANGE_SHIFT)
& ONYX_ADC_PGA_GAIN_MASK;
onyx_write_register(onyx, ONYX_REG_ADC_CONTROL, n);
- mutex_unlock(&onyx->mutex);
return n != v;
}
@@ -252,9 +246,8 @@ static int onyx_snd_capture_source_get(struct snd_kcontrol *kcontrol,
struct onyx *onyx = snd_kcontrol_chip(kcontrol);
s8 v;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
onyx_read_register(onyx, ONYX_REG_ADC_CONTROL, &v);
- mutex_unlock(&onyx->mutex);
ucontrol->value.enumerated.item[0] = !!(v&ONYX_ADC_INPUT_MIC);
@@ -265,13 +258,12 @@ static void onyx_set_capture_source(struct onyx *onyx, int mic)
{
s8 v;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
onyx_read_register(onyx, ONYX_REG_ADC_CONTROL, &v);
v &= ~ONYX_ADC_INPUT_MIC;
if (mic)
v |= ONYX_ADC_INPUT_MIC;
onyx_write_register(onyx, ONYX_REG_ADC_CONTROL, v);
- mutex_unlock(&onyx->mutex);
}
static int onyx_snd_capture_source_put(struct snd_kcontrol *kcontrol,
@@ -312,9 +304,8 @@ static int onyx_snd_mute_get(struct snd_kcontrol *kcontrol,
struct onyx *onyx = snd_kcontrol_chip(kcontrol);
u8 c;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
onyx_read_register(onyx, ONYX_REG_DAC_CONTROL, &c);
- mutex_unlock(&onyx->mutex);
ucontrol->value.integer.value[0] = !(c & ONYX_MUTE_LEFT);
ucontrol->value.integer.value[1] = !(c & ONYX_MUTE_RIGHT);
@@ -329,9 +320,9 @@ static int onyx_snd_mute_put(struct snd_kcontrol *kcontrol,
u8 v = 0, c = 0;
int err = -EBUSY;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
if (onyx->analog_locked)
- goto out_unlock;
+ return -EBUSY;
onyx_read_register(onyx, ONYX_REG_DAC_CONTROL, &v);
c = v;
@@ -342,9 +333,6 @@ static int onyx_snd_mute_put(struct snd_kcontrol *kcontrol,
c |= ONYX_MUTE_RIGHT;
err = onyx_write_register(onyx, ONYX_REG_DAC_CONTROL, c);
- out_unlock:
- mutex_unlock(&onyx->mutex);
-
return !err ? (v != c) : err;
}
@@ -373,9 +361,8 @@ static int onyx_snd_single_bit_get(struct snd_kcontrol *kcontrol,
u8 address = (pv >> 8) & 0xff;
u8 mask = pv & 0xff;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
onyx_read_register(onyx, address, &c);
- mutex_unlock(&onyx->mutex);
ucontrol->value.integer.value[0] = !!(c & mask) ^ polarity;
@@ -394,11 +381,10 @@ static int onyx_snd_single_bit_put(struct snd_kcontrol *kcontrol,
u8 address = (pv >> 8) & 0xff;
u8 mask = pv & 0xff;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
if (spdiflock && onyx->spdif_locked) {
/* even if alsamixer doesn't care.. */
- err = -EBUSY;
- goto out_unlock;
+ return -EBUSY;
}
onyx_read_register(onyx, address, &v);
c = v;
@@ -407,9 +393,6 @@ static int onyx_snd_single_bit_put(struct snd_kcontrol *kcontrol,
c |= mask;
err = onyx_write_register(onyx, address, c);
- out_unlock:
- mutex_unlock(&onyx->mutex);
-
return !err ? (v != c) : err;
}
@@ -490,7 +473,7 @@ static int onyx_spdif_get(struct snd_kcontrol *kcontrol,
struct onyx *onyx = snd_kcontrol_chip(kcontrol);
u8 v;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
onyx_read_register(onyx, ONYX_REG_DIG_INFO1, &v);
ucontrol->value.iec958.status[0] = v & 0x3e;
@@ -502,7 +485,6 @@ static int onyx_spdif_get(struct snd_kcontrol *kcontrol,
onyx_read_register(onyx, ONYX_REG_DIG_INFO4, &v);
ucontrol->value.iec958.status[4] = v & 0x0f;
- mutex_unlock(&onyx->mutex);
return 0;
}
@@ -513,7 +495,7 @@ static int onyx_spdif_put(struct snd_kcontrol *kcontrol,
struct onyx *onyx = snd_kcontrol_chip(kcontrol);
u8 v;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
onyx_read_register(onyx, ONYX_REG_DIG_INFO1, &v);
v = (v & ~0x3e) | (ucontrol->value.iec958.status[0] & 0x3e);
onyx_write_register(onyx, ONYX_REG_DIG_INFO1, v);
@@ -528,7 +510,6 @@ static int onyx_spdif_put(struct snd_kcontrol *kcontrol,
onyx_read_register(onyx, ONYX_REG_DIG_INFO4, &v);
v = (v & ~0x0f) | (ucontrol->value.iec958.status[4] & 0x0f);
onyx_write_register(onyx, ONYX_REG_DIG_INFO4, v);
- mutex_unlock(&onyx->mutex);
return 1;
}
@@ -673,14 +654,13 @@ static int onyx_usable(struct codec_info_item *cii,
struct onyx *onyx = cii->codec_data;
int spdif_enabled, analog_enabled;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
onyx_read_register(onyx, ONYX_REG_DIG_INFO4, &v);
spdif_enabled = !!(v & ONYX_SPDIF_ENABLE);
onyx_read_register(onyx, ONYX_REG_DAC_CONTROL, &v);
analog_enabled =
(v & (ONYX_MUTE_RIGHT|ONYX_MUTE_LEFT))
!= (ONYX_MUTE_RIGHT|ONYX_MUTE_LEFT);
- mutex_unlock(&onyx->mutex);
switch (ti->tag) {
case 0: return 1;
@@ -696,9 +676,8 @@ static int onyx_prepare(struct codec_info_item *cii,
{
u8 v;
struct onyx *onyx = cii->codec_data;
- int err = -EBUSY;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
#ifdef SNDRV_PCM_FMTBIT_COMPRESSED_16BE
if (substream->runtime->format == SNDRV_PCM_FMTBIT_COMPRESSED_16BE) {
@@ -707,10 +686,9 @@ static int onyx_prepare(struct codec_info_item *cii,
if (onyx_write_register(onyx,
ONYX_REG_DAC_CONTROL,
v | ONYX_MUTE_RIGHT | ONYX_MUTE_LEFT))
- goto out_unlock;
+ return -EBUSY;
onyx->analog_locked = 1;
- err = 0;
- goto out_unlock;
+ return 0;
}
#endif
switch (substream->runtime->rate) {
@@ -720,8 +698,7 @@ static int onyx_prepare(struct codec_info_item *cii,
/* these rates are ok for all outputs */
/* FIXME: program spdif channel control bits here so that
* userspace doesn't have to if it only plays pcm! */
- err = 0;
- goto out_unlock;
+ return 0;
default:
/* got some rate that the digital output can't do,
* so disable and lock it */
@@ -729,16 +706,12 @@ static int onyx_prepare(struct codec_info_item *cii,
if (onyx_write_register(onyx,
ONYX_REG_DIG_INFO4,
v & ~ONYX_SPDIF_ENABLE))
- goto out_unlock;
+ return -EBUSY;
onyx->spdif_locked = 1;
- err = 0;
- goto out_unlock;
+ return 0;
}
- out_unlock:
- mutex_unlock(&onyx->mutex);
-
- return err;
+ return -EBUSY;
}
static int onyx_open(struct codec_info_item *cii,
@@ -746,9 +719,8 @@ static int onyx_open(struct codec_info_item *cii,
{
struct onyx *onyx = cii->codec_data;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
onyx->open_count++;
- mutex_unlock(&onyx->mutex);
return 0;
}
@@ -758,11 +730,10 @@ static int onyx_close(struct codec_info_item *cii,
{
struct onyx *onyx = cii->codec_data;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
onyx->open_count--;
if (!onyx->open_count)
onyx->spdif_locked = onyx->analog_locked = 0;
- mutex_unlock(&onyx->mutex);
return 0;
}
@@ -772,7 +743,7 @@ static int onyx_switch_clock(struct codec_info_item *cii,
{
struct onyx *onyx = cii->codec_data;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
/* this *MUST* be more elaborate later... */
switch (what) {
case CLOCK_SWITCH_PREPARE_SLAVE:
@@ -784,7 +755,6 @@ static int onyx_switch_clock(struct codec_info_item *cii,
default: /* silence warning */
break;
}
- mutex_unlock(&onyx->mutex);
return 0;
}
@@ -795,27 +765,21 @@ static int onyx_suspend(struct codec_info_item *cii, pm_message_t state)
{
struct onyx *onyx = cii->codec_data;
u8 v;
- int err = -ENXIO;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
if (onyx_read_register(onyx, ONYX_REG_CONTROL, &v))
- goto out_unlock;
+ return -ENXIO;
onyx_write_register(onyx, ONYX_REG_CONTROL, v | ONYX_ADPSV | ONYX_DAPSV);
/* Apple does a sleep here but the datasheet says to do it on resume */
- err = 0;
- out_unlock:
- mutex_unlock(&onyx->mutex);
-
- return err;
+ return 0;
}
static int onyx_resume(struct codec_info_item *cii)
{
struct onyx *onyx = cii->codec_data;
u8 v;
- int err = -ENXIO;
- mutex_lock(&onyx->mutex);
+ guard(mutex)(&onyx->mutex);
/* reset codec */
onyx->codec.gpio->methods->set_hw_reset(onyx->codec.gpio, 0);
@@ -827,17 +791,13 @@ static int onyx_resume(struct codec_info_item *cii)
/* take codec out of suspend (if it still is after reset) */
if (onyx_read_register(onyx, ONYX_REG_CONTROL, &v))
- goto out_unlock;
+ return -ENXIO;
onyx_write_register(onyx, ONYX_REG_CONTROL, v & ~(ONYX_ADPSV | ONYX_DAPSV));
/* FIXME: should divide by sample rate, but 8k is the lowest we go */
msleep(2205000/8000);
/* reset all values */
onyx_register_init(onyx);
- err = 0;
- out_unlock:
- mutex_unlock(&onyx->mutex);
-
- return err;
+ return 0;
}
#endif /* CONFIG_PM */
diff --git a/sound/aoa/codecs/tas.c b/sound/aoa/codecs/tas.c
index 804b2ebbe28f..70216aa05965 100644
--- a/sound/aoa/codecs/tas.c
+++ b/sound/aoa/codecs/tas.c
@@ -235,10 +235,9 @@ static int tas_snd_vol_get(struct snd_kcontrol *kcontrol,
{
struct tas *tas = snd_kcontrol_chip(kcontrol);
- mutex_lock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
ucontrol->value.integer.value[0] = tas->cached_volume_l;
ucontrol->value.integer.value[1] = tas->cached_volume_r;
- mutex_unlock(&tas->mtx);
return 0;
}
@@ -254,18 +253,15 @@ static int tas_snd_vol_put(struct snd_kcontrol *kcontrol,
ucontrol->value.integer.value[1] > 177)
return -EINVAL;
- mutex_lock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
if (tas->cached_volume_l == ucontrol->value.integer.value[0]
- && tas->cached_volume_r == ucontrol->value.integer.value[1]) {
- mutex_unlock(&tas->mtx);
+ && tas->cached_volume_r == ucontrol->value.integer.value[1])
return 0;
- }
tas->cached_volume_l = ucontrol->value.integer.value[0];
tas->cached_volume_r = ucontrol->value.integer.value[1];
if (tas->hw_enabled)
tas_set_volume(tas);
- mutex_unlock(&tas->mtx);
return 1;
}
@@ -285,10 +281,9 @@ static int tas_snd_mute_get(struct snd_kcontrol *kcontrol,
{
struct tas *tas = snd_kcontrol_chip(kcontrol);
- mutex_lock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
ucontrol->value.integer.value[0] = !tas->mute_l;
ucontrol->value.integer.value[1] = !tas->mute_r;
- mutex_unlock(&tas->mtx);
return 0;
}
@@ -297,18 +292,15 @@ static int tas_snd_mute_put(struct snd_kcontrol *kcontrol,
{
struct tas *tas = snd_kcontrol_chip(kcontrol);
- mutex_lock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
if (tas->mute_l == !ucontrol->value.integer.value[0]
- && tas->mute_r == !ucontrol->value.integer.value[1]) {
- mutex_unlock(&tas->mtx);
+ && tas->mute_r == !ucontrol->value.integer.value[1])
return 0;
- }
tas->mute_l = !ucontrol->value.integer.value[0];
tas->mute_r = !ucontrol->value.integer.value[1];
if (tas->hw_enabled)
tas_set_volume(tas);
- mutex_unlock(&tas->mtx);
return 1;
}
@@ -337,10 +329,9 @@ static int tas_snd_mixer_get(struct snd_kcontrol *kcontrol,
struct tas *tas = snd_kcontrol_chip(kcontrol);
int idx = kcontrol->private_value;
- mutex_lock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
ucontrol->value.integer.value[0] = tas->mixer_l[idx];
ucontrol->value.integer.value[1] = tas->mixer_r[idx];
- mutex_unlock(&tas->mtx);
return 0;
}
@@ -351,19 +342,16 @@ static int tas_snd_mixer_put(struct snd_kcontrol *kcontrol,
struct tas *tas = snd_kcontrol_chip(kcontrol);
int idx = kcontrol->private_value;
- mutex_lock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
if (tas->mixer_l[idx] == ucontrol->value.integer.value[0]
- && tas->mixer_r[idx] == ucontrol->value.integer.value[1]) {
- mutex_unlock(&tas->mtx);
+ && tas->mixer_r[idx] == ucontrol->value.integer.value[1])
return 0;
- }
tas->mixer_l[idx] = ucontrol->value.integer.value[0];
tas->mixer_r[idx] = ucontrol->value.integer.value[1];
if (tas->hw_enabled)
tas_set_mixer(tas);
- mutex_unlock(&tas->mtx);
return 1;
}
@@ -396,9 +384,8 @@ static int tas_snd_drc_range_get(struct snd_kcontrol *kcontrol,
{
struct tas *tas = snd_kcontrol_chip(kcontrol);
- mutex_lock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
ucontrol->value.integer.value[0] = tas->drc_range;
- mutex_unlock(&tas->mtx);
return 0;
}
@@ -411,16 +398,13 @@ static int tas_snd_drc_range_put(struct snd_kcontrol *kcontrol,
ucontrol->value.integer.value[0] > TAS3004_DRC_MAX)
return -EINVAL;
- mutex_lock(&tas->mtx);
- if (tas->drc_range == ucontrol->value.integer.value[0]) {
- mutex_unlock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
+ if (tas->drc_range == ucontrol->value.integer.value[0])
return 0;
- }
tas->drc_range = ucontrol->value.integer.value[0];
if (tas->hw_enabled)
tas3004_set_drc(tas);
- mutex_unlock(&tas->mtx);
return 1;
}
@@ -440,9 +424,8 @@ static int tas_snd_drc_switch_get(struct snd_kcontrol *kcontrol,
{
struct tas *tas = snd_kcontrol_chip(kcontrol);
- mutex_lock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
ucontrol->value.integer.value[0] = tas->drc_enabled;
- mutex_unlock(&tas->mtx);
return 0;
}
@@ -451,16 +434,13 @@ static int tas_snd_drc_switch_put(struct snd_kcontrol *kcontrol,
{
struct tas *tas = snd_kcontrol_chip(kcontrol);
- mutex_lock(&tas->mtx);
- if (tas->drc_enabled == ucontrol->value.integer.value[0]) {
- mutex_unlock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
+ if (tas->drc_enabled == ucontrol->value.integer.value[0])
return 0;
- }
tas->drc_enabled = !!ucontrol->value.integer.value[0];
if (tas->hw_enabled)
tas3004_set_drc(tas);
- mutex_unlock(&tas->mtx);
return 1;
}
@@ -486,9 +466,8 @@ static int tas_snd_capture_source_get(struct snd_kcontrol *kcontrol,
{
struct tas *tas = snd_kcontrol_chip(kcontrol);
- mutex_lock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
ucontrol->value.enumerated.item[0] = !!(tas->acr & TAS_ACR_INPUT_B);
- mutex_unlock(&tas->mtx);
return 0;
}
@@ -500,7 +479,7 @@ static int tas_snd_capture_source_put(struct snd_kcontrol *kcontrol,
if (ucontrol->value.enumerated.item[0] > 1)
return -EINVAL;
- mutex_lock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
oldacr = tas->acr;
/*
@@ -512,13 +491,10 @@ static int tas_snd_capture_source_put(struct snd_kcontrol *kcontrol,
if (ucontrol->value.enumerated.item[0])
tas->acr |= TAS_ACR_INPUT_B | TAS_ACR_B_MONAUREAL |
TAS_ACR_B_MON_SEL_RIGHT;
- if (oldacr == tas->acr) {
- mutex_unlock(&tas->mtx);
+ if (oldacr == tas->acr)
return 0;
- }
if (tas->hw_enabled)
tas_write_reg(tas, TAS_REG_ACR, 1, &tas->acr);
- mutex_unlock(&tas->mtx);
return 1;
}
@@ -557,9 +533,8 @@ static int tas_snd_treble_get(struct snd_kcontrol *kcontrol,
{
struct tas *tas = snd_kcontrol_chip(kcontrol);
- mutex_lock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
ucontrol->value.integer.value[0] = tas->treble;
- mutex_unlock(&tas->mtx);
return 0;
}
@@ -571,16 +546,13 @@ static int tas_snd_treble_put(struct snd_kcontrol *kcontrol,
if (ucontrol->value.integer.value[0] < TAS3004_TREBLE_MIN ||
ucontrol->value.integer.value[0] > TAS3004_TREBLE_MAX)
return -EINVAL;
- mutex_lock(&tas->mtx);
- if (tas->treble == ucontrol->value.integer.value[0]) {
- mutex_unlock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
+ if (tas->treble == ucontrol->value.integer.value[0])
return 0;
- }
tas->treble = ucontrol->value.integer.value[0];
if (tas->hw_enabled)
tas_set_treble(tas);
- mutex_unlock(&tas->mtx);
return 1;
}
@@ -608,9 +580,8 @@ static int tas_snd_bass_get(struct snd_kcontrol *kcontrol,
{
struct tas *tas = snd_kcontrol_chip(kcontrol);
- mutex_lock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
ucontrol->value.integer.value[0] = tas->bass;
- mutex_unlock(&tas->mtx);
return 0;
}
@@ -622,16 +593,13 @@ static int tas_snd_bass_put(struct snd_kcontrol *kcontrol,
if (ucontrol->value.integer.value[0] < TAS3004_BASS_MIN ||
ucontrol->value.integer.value[0] > TAS3004_BASS_MAX)
return -EINVAL;
- mutex_lock(&tas->mtx);
- if (tas->bass == ucontrol->value.integer.value[0]) {
- mutex_unlock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
+ if (tas->bass == ucontrol->value.integer.value[0])
return 0;
- }
tas->bass = ucontrol->value.integer.value[0];
if (tas->hw_enabled)
tas_set_bass(tas);
- mutex_unlock(&tas->mtx);
return 1;
}
@@ -722,13 +690,13 @@ static int tas_switch_clock(struct codec_info_item *cii, enum clock_switch clock
break;
case CLOCK_SWITCH_SLAVE:
/* Clocks are back, re-init the codec */
- mutex_lock(&tas->mtx);
- tas_reset_init(tas);
- tas_set_volume(tas);
- tas_set_mixer(tas);
- tas->hw_enabled = 1;
- tas->codec.gpio->methods->all_amps_restore(tas->codec.gpio);
- mutex_unlock(&tas->mtx);
+ scoped_guard(mutex, &tas->mtx) {
+ tas_reset_init(tas);
+ tas_set_volume(tas);
+ tas_set_mixer(tas);
+ tas->hw_enabled = 1;
+ tas->codec.gpio->methods->all_amps_restore(tas->codec.gpio);
+ }
break;
default:
/* doesn't happen as of now */
@@ -743,23 +711,21 @@ static int tas_switch_clock(struct codec_info_item *cii, enum clock_switch clock
* our i2c device is suspended, and then take note of that! */
static int tas_suspend(struct tas *tas)
{
- mutex_lock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
tas->hw_enabled = 0;
tas->acr |= TAS_ACR_ANALOG_PDOWN;
tas_write_reg(tas, TAS_REG_ACR, 1, &tas->acr);
- mutex_unlock(&tas->mtx);
return 0;
}
static int tas_resume(struct tas *tas)
{
/* reset codec */
- mutex_lock(&tas->mtx);
+ guard(mutex)(&tas->mtx);
tas_reset_init(tas);
tas_set_volume(tas);
tas_set_mixer(tas);
tas->hw_enabled = 1;
- mutex_unlock(&tas->mtx);
return 0;
}
@@ -802,14 +768,13 @@ static int tas_init_codec(struct aoa_codec *codec)
return -EINVAL;
}
- mutex_lock(&tas->mtx);
- if (tas_reset_init(tas)) {
- printk(KERN_ERR PFX "tas failed to initialise\n");
- mutex_unlock(&tas->mtx);
- return -ENXIO;
+ scoped_guard(mutex, &tas->mtx) {
+ if (tas_reset_init(tas)) {
+ printk(KERN_ERR PFX "tas failed to initialise\n");
+ return -ENXIO;
+ }
+ tas->hw_enabled = 1;
}
- tas->hw_enabled = 1;
- mutex_unlock(&tas->mtx);
if (tas->codec.soundbus_dev->attach_codec(tas->codec.soundbus_dev,
aoa_get_card(),
diff --git a/sound/aoa/core/gpio-feature.c b/sound/aoa/core/gpio-feature.c
index 39bb409b27f6..19ed0e6907da 100644
--- a/sound/aoa/core/gpio-feature.c
+++ b/sound/aoa/core/gpio-feature.c
@@ -212,10 +212,9 @@ static void ftr_handle_notify(struct work_struct *work)
struct gpio_notification *notif =
container_of(work, struct gpio_notification, work.work);
- mutex_lock(¬if->mutex);
+ guard(mutex)(¬if->mutex);
if (notif->notify)
notif->notify(notif->data);
- mutex_unlock(¬if->mutex);
}
static void gpio_enable_dual_edge(int gpio)
@@ -341,19 +340,17 @@ static int ftr_set_notify(struct gpio_runtime *rt,
if (!irq)
return -ENODEV;
- mutex_lock(¬if->mutex);
+ guard(mutex)(¬if->mutex);
old = notif->notify;
- if (!old && !notify) {
- err = 0;
- goto out_unlock;
- }
+ if (!old && !notify)
+ return 0;
if (old && notify) {
if (old == notify && notif->data == data)
err = 0;
- goto out_unlock;
+ return err;
}
if (old && !notify)
@@ -362,16 +359,13 @@ static int ftr_set_notify(struct gpio_runtime *rt,
if (!old && notify) {
err = request_irq(irq, ftr_handle_notify_irq, 0, name, notif);
if (err)
- goto out_unlock;
+ return err;
}
notif->notify = notify;
notif->data = data;
- err = 0;
- out_unlock:
- mutex_unlock(¬if->mutex);
- return err;
+ return 0;
}
static int ftr_get_detect(struct gpio_runtime *rt,
diff --git a/sound/aoa/core/gpio-pmf.c b/sound/aoa/core/gpio-pmf.c
index 37866039d1ea..e76bde25e41a 100644
--- a/sound/aoa/core/gpio-pmf.c
+++ b/sound/aoa/core/gpio-pmf.c
@@ -74,10 +74,9 @@ static void pmf_handle_notify(struct work_struct *work)
struct gpio_notification *notif =
container_of(work, struct gpio_notification, work.work);
- mutex_lock(¬if->mutex);
+ guard(mutex)(¬if->mutex);
if (notif->notify)
notif->notify(notif->data);
- mutex_unlock(¬if->mutex);
}
static void pmf_gpio_init(struct gpio_runtime *rt)
@@ -154,19 +153,17 @@ static int pmf_set_notify(struct gpio_runtime *rt,
return -EINVAL;
}
- mutex_lock(¬if->mutex);
+ guard(mutex)(¬if->mutex);
old = notif->notify;
- if (!old && !notify) {
- err = 0;
- goto out_unlock;
- }
+ if (!old && !notify)
+ return 0;
if (old && notify) {
if (old == notify && notif->data == data)
err = 0;
- goto out_unlock;
+ return err;
}
if (old && !notify) {
@@ -178,10 +175,8 @@ static int pmf_set_notify(struct gpio_runtime *rt,
if (!old && notify) {
irq_client = kzalloc(sizeof(struct pmf_irq_client),
GFP_KERNEL);
- if (!irq_client) {
- err = -ENOMEM;
- goto out_unlock;
- }
+ if (!irq_client)
+ return -ENOMEM;
irq_client->data = notif;
irq_client->handler = pmf_handle_notify_irq;
irq_client->owner = THIS_MODULE;
@@ -192,17 +187,14 @@ static int pmf_set_notify(struct gpio_runtime *rt,
printk(KERN_ERR "snd-aoa: gpio layer failed to"
" register %s irq (%d)\n", name, err);
kfree(irq_client);
- goto out_unlock;
+ return err;
}
notif->gpio_private = irq_client;
}
notif->notify = notify;
notif->data = data;
- err = 0;
- out_unlock:
- mutex_unlock(¬if->mutex);
- return err;
+ return 0;
}
static int pmf_get_detect(struct gpio_runtime *rt,
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
index ce84288168e4..a22a236983d2 100644
--- a/sound/aoa/soundbus/i2sbus/core.c
+++ b/sound/aoa/soundbus/i2sbus/core.c
@@ -84,6 +84,7 @@ static void i2sbus_release_dev(struct device *dev)
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++)
free_irq(i2sdev->interrupts[i], i2sdev);
i2sbus_control_remove_dev(i2sdev->control, i2sdev);
+ of_node_put(i2sdev->sound.ofdev.dev.of_node);
mutex_destroy(&i2sdev->lock);
kfree(i2sdev);
}
@@ -149,7 +150,6 @@ static int i2sbus_get_and_fixup_rsrc(struct device_node *np, int index,
}
/* Returns 1 if added, 0 for otherwise; don't return a negative value! */
-/* FIXME: look at device node refcounting */
static int i2sbus_add_dev(struct macio_dev *macio,
struct i2sbus_control *control,
struct device_node *np)
@@ -180,8 +180,9 @@ static int i2sbus_add_dev(struct macio_dev *macio,
i = 0;
for_each_child_of_node(np, child) {
if (of_node_name_eq(child, "sound")) {
+ of_node_put(sound);
i++;
- sound = child;
+ sound = of_node_get(child);
}
}
if (i == 1) {
@@ -207,6 +208,7 @@ static int i2sbus_add_dev(struct macio_dev *macio,
}
}
}
+ of_node_put(sound);
/* for the time being, until we can handle non-layout-id
* things in some fabric, refuse to attach if there is no
* layout-id property or we haven't been forced to attach.
@@ -221,7 +223,7 @@ static int i2sbus_add_dev(struct macio_dev *macio,
mutex_init(&dev->lock);
spin_lock_init(&dev->low_lock);
dev->sound.ofdev.archdata.dma_mask = macio->ofdev.archdata.dma_mask;
- dev->sound.ofdev.dev.of_node = np;
+ dev->sound.ofdev.dev.of_node = of_node_get(np);
dev->sound.ofdev.dev.dma_mask = &dev->sound.ofdev.archdata.dma_mask;
dev->sound.ofdev.dev.parent = &macio->ofdev.dev;
dev->sound.ofdev.dev.release = i2sbus_release_dev;
@@ -329,6 +331,7 @@ static int i2sbus_add_dev(struct macio_dev *macio,
for (i=0;i<3;i++)
release_and_free_resource(dev->allocated_resource[i]);
mutex_destroy(&dev->lock);
+ of_node_put(dev->sound.ofdev.dev.of_node);
kfree(dev);
return 0;
}
@@ -407,6 +410,9 @@ static int i2sbus_resume(struct macio_dev* dev)
int err, ret = 0;
list_for_each_entry(i2sdev, &control->list, item) {
+ if (list_empty(&i2sdev->sound.codec_list))
+ continue;
+
/* reset i2s bus format etc. */
i2sbus_pcm_prepare_both(i2sdev);
diff --git a/sound/aoa/soundbus/i2sbus/pcm.c b/sound/aoa/soundbus/i2sbus/pcm.c
index 98b812ffbde6..3faa535b8d2f 100644
--- a/sound/aoa/soundbus/i2sbus/pcm.c
+++ b/sound/aoa/soundbus/i2sbus/pcm.c
@@ -79,11 +79,10 @@ static int i2sbus_pcm_open(struct i2sbus_dev *i2sdev, int in)
u64 formats = 0;
unsigned int rates = 0;
struct transfer_info v;
- int result = 0;
int bus_factor = 0, sysclock_factor = 0;
int found_this;
- mutex_lock(&i2sdev->lock);
+ guard(mutex)(&i2sdev->lock);
get_pcm_info(i2sdev, in, &pi, &other);
@@ -92,8 +91,7 @@ static int i2sbus_pcm_open(struct i2sbus_dev *i2sdev, int in)
if (pi->active) {
/* alsa messed up */
- result = -EBUSY;
- goto out_unlock;
+ return -EBUSY;
}
/* we now need to assign the hw */
@@ -117,10 +115,8 @@ static int i2sbus_pcm_open(struct i2sbus_dev *i2sdev, int in)
ti++;
}
}
- if (!masks_inited || !bus_factor || !sysclock_factor) {
- result = -ENODEV;
- goto out_unlock;
- }
+ if (!masks_inited || !bus_factor || !sysclock_factor)
+ return -ENODEV;
/* bus dependent stuff */
hw->info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_RESUME |
@@ -169,17 +165,16 @@ static int i2sbus_pcm_open(struct i2sbus_dev *i2sdev, int in)
* currently in use (if any). */
hw->rate_min = 5512;
hw->rate_max = 192000;
- /* if the other stream is active, then we can only
- * support what it is currently using.
- * FIXME: I lied. This comment is wrong. We can support
- * anything that works with the same serial format, ie.
- * when recording 24 bit sound we can well play 16 bit
- * sound at the same time iff using the same transfer mode.
+ /* If the other stream is already prepared, keep this stream
+ * on the same duplex format and rate.
+ *
+ * i2sbus_pcm_prepare() still programs one shared transport
+ * configuration for both directions, so mixed duplex formats
+ * are not supported here.
*/
if (other->active) {
- /* FIXME: is this guaranteed by the alsa api? */
hw->formats &= pcm_format_to_bits(i2sdev->format);
- /* see above, restrict rates to the one we already have */
+ /* Restrict rates to the one already in use. */
hw->rate_min = i2sdev->rate;
hw->rate_max = i2sdev->rate;
}
@@ -194,15 +189,12 @@ static int i2sbus_pcm_open(struct i2sbus_dev *i2sdev, int in)
hw->periods_max = MAX_DBDMA_COMMANDS;
err = snd_pcm_hw_constraint_integer(pi->substream->runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
- if (err < 0) {
- result = err;
- goto out_unlock;
- }
+ if (err < 0)
+ return err;
list_for_each_entry(cii, &sdev->codec_list, list) {
if (cii->codec->open) {
err = cii->codec->open(cii, pi->substream);
if (err) {
- result = err;
/* unwind */
found_this = 0;
list_for_each_entry_reverse(rev,
@@ -214,14 +206,12 @@ static int i2sbus_pcm_open(struct i2sbus_dev *i2sdev, int in)
if (rev == cii)
found_this = 1;
}
- goto out_unlock;
+ return err;
}
}
}
- out_unlock:
- mutex_unlock(&i2sdev->lock);
- return result;
+ return 0;
}
#undef CHECK_RATE
@@ -232,7 +222,7 @@ static int i2sbus_pcm_close(struct i2sbus_dev *i2sdev, int in)
struct pcm_info *pi;
int err = 0, tmp;
- mutex_lock(&i2sdev->lock);
+ guard(mutex)(&i2sdev->lock);
get_pcm_info(i2sdev, in, &pi, NULL);
@@ -246,7 +236,6 @@ static int i2sbus_pcm_close(struct i2sbus_dev *i2sdev, int in)
pi->substream = NULL;
pi->active = 0;
- mutex_unlock(&i2sdev->lock);
return err;
}
@@ -293,6 +282,23 @@ void i2sbus_wait_for_stop_both(struct i2sbus_dev *i2sdev)
}
#endif
+static void i2sbus_pcm_clear_active(struct i2sbus_dev *i2sdev, int in)
+{
+ struct pcm_info *pi;
+
+ guard(mutex)(&i2sdev->lock);
+
+ get_pcm_info(i2sdev, in, &pi, NULL);
+ pi->active = 0;
+}
+
+static inline int i2sbus_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, int in)
+{
+ i2sbus_pcm_clear_active(snd_pcm_substream_chip(substream), in);
+ return 0;
+}
+
static inline int i2sbus_hw_free(struct snd_pcm_substream *substream, int in)
{
struct i2sbus_dev *i2sdev = snd_pcm_substream_chip(substream);
@@ -301,14 +307,27 @@ static inline int i2sbus_hw_free(struct snd_pcm_substream *substream, int in)
get_pcm_info(i2sdev, in, &pi, NULL);
if (pi->dbdma_ring.stopping)
i2sbus_wait_for_stop(i2sdev, pi);
+ i2sbus_pcm_clear_active(i2sdev, in);
return 0;
}
+static int i2sbus_playback_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ return i2sbus_hw_params(substream, params, 0);
+}
+
static int i2sbus_playback_hw_free(struct snd_pcm_substream *substream)
{
return i2sbus_hw_free(substream, 0);
}
+static int i2sbus_record_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ return i2sbus_hw_params(substream, params, 1);
+}
+
static int i2sbus_record_hw_free(struct snd_pcm_substream *substream)
{
return i2sbus_hw_free(substream, 1);
@@ -330,33 +349,25 @@ static int i2sbus_pcm_prepare(struct i2sbus_dev *i2sdev, int in)
int input_16bit;
struct pcm_info *pi, *other;
int cnt;
- int result = 0;
unsigned int cmd, stopaddr;
- mutex_lock(&i2sdev->lock);
+ guard(mutex)(&i2sdev->lock);
get_pcm_info(i2sdev, in, &pi, &other);
- if (pi->dbdma_ring.running) {
- result = -EBUSY;
- goto out_unlock;
- }
+ if (pi->dbdma_ring.running)
+ return -EBUSY;
if (pi->dbdma_ring.stopping)
i2sbus_wait_for_stop(i2sdev, pi);
- if (!pi->substream || !pi->substream->runtime) {
- result = -EINVAL;
- goto out_unlock;
- }
+ if (!pi->substream || !pi->substream->runtime)
+ return -EINVAL;
runtime = pi->substream->runtime;
- pi->active = 1;
if (other->active &&
((i2sdev->format != runtime->format)
- || (i2sdev->rate != runtime->rate))) {
- result = -EINVAL;
- goto out_unlock;
- }
+ || (i2sdev->rate != runtime->rate)))
+ return -EINVAL;
i2sdev->format = runtime->format;
i2sdev->rate = runtime->rate;
@@ -400,6 +411,9 @@ static int i2sbus_pcm_prepare(struct i2sbus_dev *i2sdev, int in)
/* set stop command */
command->command = cpu_to_le16(DBDMA_STOP);
+ cii = list_first_entry(&i2sdev->sound.codec_list,
+ struct codec_info_item, list);
+
/* ok, let's set the serial format and stuff */
switch (runtime->format) {
/* 16 bit formats */
@@ -407,15 +421,7 @@ static int i2sbus_pcm_prepare(struct i2sbus_dev *i2sdev, int in)
case SNDRV_PCM_FORMAT_U16_BE:
/* FIXME: if we add different bus factors we need to
* do more here!! */
- bi.bus_factor = 0;
- list_for_each_entry(cii, &i2sdev->sound.codec_list, list) {
- bi.bus_factor = cii->codec->bus_factor;
- break;
- }
- if (!bi.bus_factor) {
- result = -ENODEV;
- goto out_unlock;
- }
+ bi.bus_factor = cii->codec->bus_factor;
input_16bit = 1;
break;
case SNDRV_PCM_FORMAT_S32_BE:
@@ -426,22 +432,16 @@ static int i2sbus_pcm_prepare(struct i2sbus_dev *i2sdev, int in)
input_16bit = 0;
break;
default:
- result = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
/* we assume all sysclocks are the same! */
- list_for_each_entry(cii, &i2sdev->sound.codec_list, list) {
- bi.sysclock_factor = cii->codec->sysclock_factor;
- break;
- }
+ bi.sysclock_factor = cii->codec->sysclock_factor;
if (clock_and_divisors(bi.sysclock_factor,
bi.bus_factor,
runtime->rate,
- &sfr) < 0) {
- result = -EINVAL;
- goto out_unlock;
- }
+ &sfr) < 0)
+ return -EINVAL;
switch (bi.bus_factor) {
case 32:
sfr |= I2S_SF_SERIAL_FORMAT_I2S_32X;
@@ -457,10 +457,8 @@ static int i2sbus_pcm_prepare(struct i2sbus_dev *i2sdev, int in)
int err = 0;
if (cii->codec->prepare)
err = cii->codec->prepare(cii, &bi, pi->substream);
- if (err) {
- result = err;
- goto out_unlock;
- }
+ if (err)
+ return err;
}
/* codecs are fine with it, so set our clocks */
if (input_16bit)
@@ -474,9 +472,11 @@ static int i2sbus_pcm_prepare(struct i2sbus_dev *i2sdev, int in)
/* early exit if already programmed correctly */
/* not locking these is fine since we touch them only in this function */
- if (in_le32(&i2sdev->intfregs->serial_format) == sfr
- && in_le32(&i2sdev->intfregs->data_word_sizes) == dws)
- goto out_unlock;
+ if (in_le32(&i2sdev->intfregs->serial_format) == sfr &&
+ in_le32(&i2sdev->intfregs->data_word_sizes) == dws) {
+ pi->active = 1;
+ return 0;
+ }
/* let's notify the codecs about clocks going away.
* For now we only do mastering on the i2s cell... */
@@ -514,9 +514,8 @@ static int i2sbus_pcm_prepare(struct i2sbus_dev *i2sdev, int in)
if (cii->codec->switch_clock)
cii->codec->switch_clock(cii, CLOCK_SWITCH_SLAVE);
- out_unlock:
- mutex_unlock(&i2sdev->lock);
- return result;
+ pi->active = 1;
+ return 0;
}
#ifdef CONFIG_PM
@@ -772,6 +771,7 @@ static snd_pcm_uframes_t i2sbus_playback_pointer(struct snd_pcm_substream
static const struct snd_pcm_ops i2sbus_playback_ops = {
.open = i2sbus_playback_open,
.close = i2sbus_playback_close,
+ .hw_params = i2sbus_playback_hw_params,
.hw_free = i2sbus_playback_hw_free,
.prepare = i2sbus_playback_prepare,
.trigger = i2sbus_playback_trigger,
@@ -840,6 +840,7 @@ static snd_pcm_uframes_t i2sbus_record_pointer(struct snd_pcm_substream
static const struct snd_pcm_ops i2sbus_record_ops = {
.open = i2sbus_record_open,
.close = i2sbus_record_close,
+ .hw_params = i2sbus_record_hw_params,
.hw_free = i2sbus_record_hw_free,
.prepare = i2sbus_record_prepare,
.trigger = i2sbus_record_trigger,
diff --git a/sound/core/control.c b/sound/core/control.c
index 0ddade871b52..6ceb5f977fcd 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -1574,6 +1574,10 @@ static int snd_ctl_elem_init_enum_names(struct user_element *ue)
/* check that there are enough valid names */
p = names;
for (i = 0; i < ue->info.value.enumerated.items; ++i) {
+ if (buf_len == 0) {
+ kvfree(names);
+ return -EINVAL;
+ }
name_len = strnlen(p, buf_len);
if (name_len == 0 || name_len >= 64 || name_len == buf_len) {
kvfree(names);
diff --git a/sound/core/misc.c b/sound/core/misc.c
index c2fda3bd90a0..37110dc3f425 100644
--- a/sound/core/misc.c
+++ b/sound/core/misc.c
@@ -100,14 +100,18 @@ static LIST_HEAD(snd_fasync_list);
static void snd_fasync_work_fn(struct work_struct *work)
{
struct snd_fasync *fasync;
+ int signal, poll;
spin_lock_irq(&snd_fasync_lock);
while (!list_empty(&snd_fasync_list)) {
fasync = list_first_entry(&snd_fasync_list, struct snd_fasync, list);
list_del_init(&fasync->list);
+ if (!fasync->on)
+ continue;
+ signal = fasync->signal;
+ poll = fasync->poll;
spin_unlock_irq(&snd_fasync_lock);
- if (fasync->on)
- kill_fasync(&fasync->fasync, fasync->signal, fasync->poll);
+ kill_fasync(&fasync->fasync, signal, poll);
spin_lock_irq(&snd_fasync_lock);
}
spin_unlock_irq(&snd_fasync_lock);
@@ -163,7 +167,10 @@ void snd_fasync_free(struct snd_fasync *fasync)
{
if (!fasync)
return;
- fasync->on = 0;
+
+ scoped_guard(spinlock_irq, &snd_fasync_lock)
+ list_del_init(&fasync->list);
+
flush_work(&snd_fasync_work);
kfree(fasync);
}
diff --git a/sound/core/seq/oss/seq_oss_rw.c b/sound/core/seq/oss/seq_oss_rw.c
index 8a142fd54a19..307ef98c44c7 100644
--- a/sound/core/seq/oss/seq_oss_rw.c
+++ b/sound/core/seq/oss/seq_oss_rw.c
@@ -101,9 +101,9 @@ snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int count,
break;
}
fmt = (*(unsigned short *)rec.c) & 0xffff;
- /* FIXME the return value isn't correct */
- return snd_seq_oss_synth_load_patch(dp, rec.s.dev,
- fmt, buf, 0, count);
+ err = snd_seq_oss_synth_load_patch(dp, rec.s.dev,
+ fmt, buf, 0, count);
+ return err < 0 ? err : count;
}
if (ev_is_long(&rec)) {
/* extended code */
diff --git a/sound/drivers/pcmtest.c b/sound/drivers/pcmtest.c
index 21cefaf5419a..5ffcaac53757 100644
--- a/sound/drivers/pcmtest.c
+++ b/sound/drivers/pcmtest.c
@@ -753,13 +753,24 @@ static int __init mod_init(void)
err = init_debug_files(buf_allocated);
if (err)
- return err;
+ goto err_free_patterns;
err = platform_device_register(&pcmtst_pdev);
- if (err)
- return err;
+ if (err) {
+ platform_device_put(&pcmtst_pdev);
+ goto err_clear_debug;
+ }
err = platform_driver_register(&pcmtst_pdrv);
- if (err)
+ if (err) {
platform_device_unregister(&pcmtst_pdev);
+ goto err_clear_debug;
+ }
+
+ return 0;
+
+err_clear_debug:
+ clear_debug_files();
+err_free_patterns:
+ free_pattern_buffers();
return err;
}
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index 2a3e9d8ba7db..74eecc9ebd84 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -788,7 +788,8 @@ static int spdif_passthru_playback_get_resources(struct ct_atc *atc,
struct src *src;
int err;
int n_amixer = apcm->substream->runtime->channels, i;
- unsigned int pitch, rsr = atc->pll_rate;
+ unsigned int pitch;
+ unsigned int rsr = atc->pll_rate ? atc->pll_rate : atc->rsr;
/* first release old resources */
atc_pcm_release_resources(atc, apcm);
diff --git a/sound/usb/6fire/control.c b/sound/usb/6fire/control.c
index 9bd8dcbb68e4..7c2274120c76 100644
--- a/sound/usb/6fire/control.c
+++ b/sound/usb/6fire/control.c
@@ -290,15 +290,17 @@ static int usb6fire_control_input_vol_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct control_runtime *rt = snd_kcontrol_chip(kcontrol);
+ int vol0 = ucontrol->value.integer.value[0] - 15;
+ int vol1 = ucontrol->value.integer.value[1] - 15;
int changed = 0;
- if (rt->input_vol[0] != ucontrol->value.integer.value[0]) {
- rt->input_vol[0] = ucontrol->value.integer.value[0] - 15;
+ if (rt->input_vol[0] != vol0) {
+ rt->input_vol[0] = vol0;
rt->ivol_updated &= ~(1 << 0);
changed = 1;
}
- if (rt->input_vol[1] != ucontrol->value.integer.value[1]) {
- rt->input_vol[1] = ucontrol->value.integer.value[1] - 15;
+ if (rt->input_vol[1] != vol1) {
+ rt->input_vol[1] = vol1;
rt->ivol_updated &= ~(1 << 1);
changed = 1;
}
diff --git a/sound/usb/caiaq/control.c b/sound/usb/caiaq/control.c
index af459c49baf4..4598fb7e8be0 100644
--- a/sound/usb/caiaq/control.c
+++ b/sound/usb/caiaq/control.c
@@ -87,6 +87,7 @@ static int control_put(struct snd_kcontrol *kcontrol,
struct snd_usb_caiaqdev *cdev = caiaqdev(chip->card);
int pos = kcontrol->private_value;
int v = ucontrol->value.integer.value[0];
+ int ret;
unsigned char cmd;
switch (cdev->chip.usb_id) {
@@ -103,6 +104,10 @@ static int control_put(struct snd_kcontrol *kcontrol,
if (pos & CNT_INTVAL) {
int i = pos & ~CNT_INTVAL;
+ unsigned char old = cdev->control_state[i];
+
+ if (old == v)
+ return 0;
cdev->control_state[i] = v;
@@ -113,10 +118,11 @@ static int control_put(struct snd_kcontrol *kcontrol,
cdev->ep8_out_buf[0] = i;
cdev->ep8_out_buf[1] = v;
- usb_bulk_msg(cdev->chip.dev,
- usb_sndbulkpipe(cdev->chip.dev, 8),
- cdev->ep8_out_buf, sizeof(cdev->ep8_out_buf),
- &actual_len, 200);
+ ret = usb_bulk_msg(cdev->chip.dev,
+ usb_sndbulkpipe(cdev->chip.dev, 8),
+ cdev->ep8_out_buf,
+ sizeof(cdev->ep8_out_buf),
+ &actual_len, 200);
} else if (cdev->chip.usb_id ==
USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER)) {
@@ -128,21 +134,36 @@ static int control_put(struct snd_kcontrol *kcontrol,
offset = MASCHINE_BANK_SIZE;
}
- snd_usb_caiaq_send_command_bank(cdev, cmd, bank,
- cdev->control_state + offset,
- MASCHINE_BANK_SIZE);
+ ret = snd_usb_caiaq_send_command_bank(cdev, cmd, bank,
+ cdev->control_state + offset,
+ MASCHINE_BANK_SIZE);
} else {
- snd_usb_caiaq_send_command(cdev, cmd,
- cdev->control_state, sizeof(cdev->control_state));
+ ret = snd_usb_caiaq_send_command(cdev, cmd,
+ cdev->control_state,
+ sizeof(cdev->control_state));
+ }
+
+ if (ret < 0) {
+ cdev->control_state[i] = old;
+ return ret;
}
} else {
- if (v)
- cdev->control_state[pos / 8] |= 1 << (pos % 8);
- else
- cdev->control_state[pos / 8] &= ~(1 << (pos % 8));
+ int idx = pos / 8;
+ unsigned char mask = 1 << (pos % 8);
+ unsigned char old = cdev->control_state[idx];
+ unsigned char val = v ? (old | mask) : (old & ~mask);
- snd_usb_caiaq_send_command(cdev, cmd,
- cdev->control_state, sizeof(cdev->control_state));
+ if (old == val)
+ return 0;
+
+ cdev->control_state[idx] = val;
+ ret = snd_usb_caiaq_send_command(cdev, cmd,
+ cdev->control_state,
+ sizeof(cdev->control_state));
+ if (ret < 0) {
+ cdev->control_state[idx] = old;
+ return ret;
+ }
}
return 1;
@@ -640,4 +661,3 @@ int snd_usb_caiaq_control_init(struct snd_usb_caiaqdev *cdev)
return ret;
}
-
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index 51177ebfb8c6..b20aae0caf60 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -290,7 +290,7 @@ int snd_usb_caiaq_set_auto_msg(struct snd_usb_caiaqdev *cdev,
tmp, sizeof(tmp));
}
-static void setup_card(struct snd_usb_caiaqdev *cdev)
+static int setup_card(struct snd_usb_caiaqdev *cdev)
{
int ret;
char val[4];
@@ -325,8 +325,10 @@ static void setup_card(struct snd_usb_caiaqdev *cdev)
snd_usb_caiaq_send_command(cdev, EP1_CMD_READ_IO, NULL, 0);
if (!wait_event_timeout(cdev->ep1_wait_queue,
- cdev->control_state[0] != 0xff, HZ))
- return;
+ cdev->control_state[0] != 0xff, HZ)) {
+ dev_err(dev, "Read timeout for control state\n");
+ return -EINVAL;
+ }
/* fix up some defaults */
if ((cdev->control_state[1] != 2) ||
@@ -347,33 +349,43 @@ static void setup_card(struct snd_usb_caiaqdev *cdev)
cdev->spec.num_digital_audio_out +
cdev->spec.num_digital_audio_in > 0) {
ret = snd_usb_caiaq_audio_init(cdev);
- if (ret < 0)
+ if (ret < 0) {
dev_err(dev, "Unable to set up audio system (ret=%d)\n", ret);
+ return ret;
+ }
}
if (cdev->spec.num_midi_in +
cdev->spec.num_midi_out > 0) {
ret = snd_usb_caiaq_midi_init(cdev);
- if (ret < 0)
+ if (ret < 0) {
dev_err(dev, "Unable to set up MIDI system (ret=%d)\n", ret);
+ return ret;
+ }
}
#ifdef CONFIG_SND_USB_CAIAQ_INPUT
ret = snd_usb_caiaq_input_init(cdev);
- if (ret < 0)
+ if (ret < 0 && ret != -ENODEV) {
dev_err(dev, "Unable to set up input system (ret=%d)\n", ret);
+ return ret;
+ }
#endif
/* finally, register the card and all its sub-instances */
ret = snd_card_register(cdev->chip.card);
if (ret < 0) {
dev_err(dev, "snd_card_register() returned %d\n", ret);
- snd_card_free(cdev->chip.card);
+ return ret;
}
ret = snd_usb_caiaq_control_init(cdev);
- if (ret < 0)
+ if (ret < 0) {
dev_err(dev, "Unable to set up control system (ret=%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
}
static void card_free(struct snd_card *card)
@@ -411,6 +423,7 @@ static int create_card(struct usb_device *usb_dev,
cdev = caiaqdev(card);
cdev->chip.dev = usb_get_dev(usb_dev);
+ card->private_free = card_free;
cdev->chip.card = card;
cdev->chip.usb_id = USB_ID(le16_to_cpu(usb_dev->descriptor.idVendor),
le16_to_cpu(usb_dev->descriptor.idProduct));
@@ -499,8 +512,10 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
scnprintf(card->longname, sizeof(card->longname), "%s %s (%s)",
cdev->vendor_name, cdev->product_name, usbpath);
- setup_card(cdev);
- card->private_free = card_free;
+ err = setup_card(cdev);
+ if (err < 0)
+ goto err_kill_urb;
+
return 0;
err_kill_urb:
diff --git a/sound/usb/caiaq/input.c b/sound/usb/caiaq/input.c
index a9130891bb69..5c70fdf61cc1 100644
--- a/sound/usb/caiaq/input.c
+++ b/sound/usb/caiaq/input.c
@@ -804,7 +804,7 @@ int snd_usb_caiaq_input_init(struct snd_usb_caiaqdev *cdev)
default:
/* no input methods supported on this device */
- ret = -EINVAL;
+ ret = -ENODEV;
goto exit_free_idev;
}
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 2616a7efcc21..7eb4c639761a 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -1400,9 +1400,6 @@ int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
goto unlock;
}
- ep->packsize[0] = min(ep->packsize[0], ep->maxframesize);
- ep->packsize[1] = min(ep->packsize[1], ep->maxframesize);
-
/* calculate the frequency in 16.16 format */
ep->freqm = ep->freqn;
ep->freqshift = INT_MIN;
@@ -1429,6 +1426,9 @@ int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
ep->maxframesize = ep->maxpacksize / ep->cur_frame_bytes;
ep->curframesize = ep->curpacksize / ep->cur_frame_bytes;
+ ep->packsize[0] = min(ep->packsize[0], ep->maxframesize);
+ ep->packsize[1] = min(ep->packsize[1], ep->maxframesize);
+
err = update_clock_ref_rate(chip, ep);
if (err >= 0) {
ep->need_setup = false;
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 7041633b0294..81a2088b398e 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -458,7 +458,7 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip,
nr_rates++;
if (nr_rates >= MAX_NR_RATES) {
usb_audio_err(chip, "invalid uac2 rates\n");
- break;
+ return nr_rates;
}
skip_rate:
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index c8882d581637..fd7a766331c7 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1814,10 +1814,11 @@ static void __build_feature_ctl(struct usb_mixer_interface *mixer,
range = (cval->max - cval->min) / cval->res;
/*
- * There are definitely devices with a range of ~20,000, so let's be
- * conservative and allow for a bit more.
+ * Are there devices with volume range more than 255? I use a bit more
+ * to be sure. 384 is a resolution magic number found on Logitech
+ * devices. It will definitively catch all buggy Logitech devices.
*/
- if (range > 65535) {
+ if (range > 384) {
usb_audio_warn(mixer->chip,
"Warning! Unlikely big volume range (=%u), cval->res is probably wrong.",
range);
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 6d6308ca4fa8..b67967731c85 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -1559,15 +1559,17 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
{
struct usb_mixer_interface *mixer;
struct usb_mixer_elem_info *cval;
+ int err;
int unitid = 12; /* SampleRate ExtensionUnit ID */
list_for_each_entry(mixer, &chip->mixer_list, list) {
if (mixer->id_elems[unitid]) {
cval = mixer_elem_list_to_info(mixer->id_elems[unitid]);
- snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR,
- cval->control << 8,
- samplerate_id);
- snd_usb_mixer_notify_id(mixer, unitid);
+ err = snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR,
+ cval->control << 8,
+ samplerate_id);
+ if (!err)
+ snd_usb_mixer_notify_id(mixer, unitid);
break;
}
}
@@ -2062,7 +2064,7 @@ static int snd_microii_spdif_switch_put(struct snd_kcontrol *kcontrol,
int err;
reg = ucontrol->value.integer.value[0] ? 0x28 : 0x2a;
- if (reg != list->kctl->private_value)
+ if (reg == list->kctl->private_value)
return 0;
kcontrol->private_value = reg;
diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c
index 1334214546d7..3fa750535567 100644
--- a/tools/accounting/getdelays.c
+++ b/tools/accounting/getdelays.c
@@ -59,7 +59,7 @@ int print_task_context_switch_counts;
}
/* Maximum size of response requested or message sent */
-#define MAX_MSG_SIZE 1024
+#define MAX_MSG_SIZE 2048
/* Maximum number of cpus expected to be specified in a cpumask */
#define MAX_CPUS 32
@@ -114,6 +114,32 @@ static int create_nl_socket(int protocol)
return -1;
}
+static int recv_taskstats_msg(int sd, struct msgtemplate *msg)
+{
+ struct sockaddr_nl nladdr;
+ struct iovec iov = {
+ .iov_base = msg,
+ .iov_len = sizeof(*msg),
+ };
+ struct msghdr hdr = {
+ .msg_name = &nladdr,
+ .msg_namelen = sizeof(nladdr),
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ };
+ int ret;
+
+ ret = recvmsg(sd, &hdr, 0);
+ if (ret < 0)
+ return -1;
+ if (hdr.msg_flags & MSG_TRUNC) {
+ errno = EMSGSIZE;
+ return -1;
+ }
+
+ return ret;
+}
+
static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
__u8 genl_cmd, __u16 nla_type,
@@ -465,12 +491,16 @@ int main(int argc, char *argv[])
}
do {
- rep_len = recv(nl_sd, &msg, sizeof(msg), 0);
+ rep_len = recv_taskstats_msg(nl_sd, &msg);
PRINTF("received %d bytes\n", rep_len);
if (rep_len < 0) {
- fprintf(stderr, "nonfatal reply error: errno %d\n",
- errno);
+ if (errno == EMSGSIZE)
+ fprintf(stderr,
+ "dropped truncated taskstats netlink message, please increase MAX_MSG_SIZE\n");
+ else
+ fprintf(stderr, "nonfatal reply error: errno %d\n",
+ errno);
continue;
}
if (msg.n.nlmsg_type == NLMSG_ERROR ||
@@ -512,6 +542,9 @@ int main(int argc, char *argv[])
printf("TGID\t%d\n", rtid);
break;
case TASKSTATS_TYPE_STATS:
+ PRINTF("version %u\n",
+ ((struct taskstats *)
+ NLA_DATA(na))->version);
if (print_delays)
print_delayacct((struct taskstats *) NLA_DATA(na));
if (print_io_accounting)
diff --git a/tools/accounting/procacct.c b/tools/accounting/procacct.c
index 90c4a37f53d9..298fb3f6d80b 100644
--- a/tools/accounting/procacct.c
+++ b/tools/accounting/procacct.c
@@ -71,7 +71,7 @@ int print_task_context_switch_counts;
}
/* Maximum size of response requested or message sent */
-#define MAX_MSG_SIZE 1024
+#define MAX_MSG_SIZE 2048
/* Maximum number of cpus expected to be specified in a cpumask */
#define MAX_CPUS 32
@@ -121,6 +121,32 @@ static int create_nl_socket(int protocol)
return -1;
}
+static int recv_taskstats_msg(int sd, struct msgtemplate *msg)
+{
+ struct sockaddr_nl nladdr;
+ struct iovec iov = {
+ .iov_base = msg,
+ .iov_len = sizeof(*msg),
+ };
+ struct msghdr hdr = {
+ .msg_name = &nladdr,
+ .msg_namelen = sizeof(nladdr),
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ };
+ int ret;
+
+ ret = recvmsg(sd, &hdr, 0);
+ if (ret < 0)
+ return -1;
+ if (hdr.msg_flags & MSG_TRUNC) {
+ errno = EMSGSIZE;
+ return -1;
+ }
+
+ return ret;
+}
+
static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
__u8 genl_cmd, __u16 nla_type,
@@ -239,6 +265,8 @@ void handle_aggr(int mother, struct nlattr *na, int fd)
PRINTF("TGID\t%d\n", rtid);
break;
case TASKSTATS_TYPE_STATS:
+ PRINTF("version %u\n",
+ ((struct taskstats *)NLA_DATA(na))->version);
if (mother == TASKSTATS_TYPE_AGGR_PID)
print_procacct((struct taskstats *) NLA_DATA(na));
if (fd) {
@@ -348,12 +376,16 @@ int main(int argc, char *argv[])
}
do {
- rep_len = recv(nl_sd, &msg, sizeof(msg), 0);
+ rep_len = recv_taskstats_msg(nl_sd, &msg);
PRINTF("received %d bytes\n", rep_len);
if (rep_len < 0) {
- fprintf(stderr, "nonfatal reply error: errno %d\n",
- errno);
+ if (errno == EMSGSIZE)
+ fprintf(stderr,
+ "dropped truncated taskstats netlink message, please increase MAX_MSG_SIZE\n");
+ else
+ fprintf(stderr, "nonfatal reply error: errno %d\n",
+ errno);
continue;
}
if (msg.n.nlmsg_type == NLMSG_ERROR ||
diff --git a/tools/perf/arch/loongarch/annotate/instructions.c b/tools/perf/arch/loongarch/annotate/instructions.c
index ab43b1ab51e3..e16350155bf1 100644
--- a/tools/perf/arch/loongarch/annotate/instructions.c
+++ b/tools/perf/arch/loongarch/annotate/instructions.c
@@ -95,6 +95,7 @@ static int loongarch_jump__parse(struct arch *arch, struct ins_operands *ops, st
}
static struct ins_ops loongarch_jump_ops = {
+ .free = jump__delete,
.parse = loongarch_jump__parse,
.scnprintf = jump__scnprintf,
};
diff --git a/tools/perf/util/disasm.c b/tools/perf/util/disasm.c
index 8a6f450c6f8e..8f35232f7f22 100644
--- a/tools/perf/util/disasm.c
+++ b/tools/perf/util/disasm.c
@@ -44,6 +44,7 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops, int max_ins_name);
static int call__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops, int max_ins_name);
+static void jump__delete(struct ins_operands *ops);
static void ins__sort(struct arch *arch);
static int disasm_line__parse(char *line, const char **namep, char **rawp);
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index bad227ee1b5b..3242a216af9e 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -1790,7 +1790,7 @@ sub save_logs {
my ($result, $basedir) = @_;
my @t = localtime;
my $date = sprintf "%04d%02d%02d%02d%02d%02d",
- 1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0];
+ 1900+$t[5],$t[4]+1,$t[3],$t[2],$t[1],$t[0];
my $type = $build_type;
if ($type =~ /useconfig/) {
diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c
index c3642c17b251..897131bc8a13 100644
--- a/tools/testing/selftests/landlock/net_test.c
+++ b/tools/testing/selftests/landlock/net_test.c
@@ -1343,7 +1343,7 @@ TEST_F(mini, network_access_rights)
&net_port, 0))
{
TH_LOG("Failed to add rule with access 0x%llx: %s",
- access, strerror(errno));
+ (unsigned long long)access, strerror(errno));
}
}
EXPECT_EQ(0, close(ruleset_fd));
diff --git a/tools/testing/selftests/mqueue/setting b/tools/testing/selftests/mqueue/setting
deleted file mode 100644
index a953c96aa16e..000000000000
--- a/tools/testing/selftests/mqueue/setting
+++ /dev/null
@@ -1 +0,0 @@
-timeout=180
diff --git a/tools/testing/selftests/mqueue/settings b/tools/testing/selftests/mqueue/settings
new file mode 100644
index 000000000000..a953c96aa16e
--- /dev/null
+++ b/tools/testing/selftests/mqueue/settings
@@ -0,0 +1 @@
+timeout=180
^ permalink raw reply related [flat|nested] 2+ messages in thread
end of thread, other threads:[~2026-05-07 4:53 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-05-07 4:52 Linux 6.12.86 Greg Kroah-Hartman
2026-05-07 4:52 ` Greg Kroah-Hartman
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox