* [PATCH 1/3] usb: host: export symbols for xhci hooks usage
2022-02-03 2:57 ` [PATCH 0/3] support USB offload feature Daehwan Jung
@ 2022-02-03 2:57 ` Daehwan Jung
2022-02-03 5:12 ` kernel test robot
2022-02-03 2:57 ` [PATCH 2/3] usb: host: add xhci hooks for USB offload Daehwan Jung
` (3 subsequent siblings)
4 siblings, 1 reply; 8+ messages in thread
From: Daehwan Jung @ 2022-02-03 2:57 UTC (permalink / raw)
To: Mathias Nyman, Greg Kroah-Hartman
Cc: open list:USB XHCI DRIVER, open list, Howard Yen, Jack Pham,
Puma Hsu, J . Avila, Daehwan Jung
Export symbols for xhci hooks usage:
xhci_ring_free
- Allow xhci hook to free xhci_ring.
xhci_get_slot_ctx
- Allow xhci hook to get slot_ctx from the xhci_container_ctx
for getting the slot_ctx information to know which slot is
offloading and compare the context in remote subsystem memory
if needed.
xhci_get_ep_ctx
- Allow xhci hook to get ep_ctx from the xhci_container_ctx for
getting the ep_ctx information to know which ep is offloading and
comparing the context in remote subsystem memory if needed.
xhci_handle_event
- Allow xhci hook to handle the xhci events from the USB
controller.
xhci_update_erst_dequeue
- If xhci events was handle by xhci hook, it needs to update
the erst dequeue pointer to let the USB controller know the
events was handled.
xhci_ring_alloc
- Allocate a struct xhci_ring.
xhci_alloc_erst
xhci_free_erst
- Allocate and free event ring segment tables.
xhci_trb_virt_to_dma
- Used to retrieve the DMA address of a TRB
xhci_ring_cmd_db
- Notify the controller when a new command is issued
xhci_alloc_command
xhci_free_command
- Allocate and free a struct xhci_command
xhci_queue_stop_endpoint
- Issue a stop endpoint command to the controller
xhci_segment_free
- Free a segment struct.
xhci_link_segments
- Make the prev segment point to the next segment.
xhci_initialize_ring_info
- Initialize a ring struct.
xhci_check_trb_in_td_math
- Check TRB math for validation.
xhci_get_endpoint_address
- Get endpoint address from endpoint index.
xhci_address_device
- Issue an address device command
xhci_bus_suspend
xhci_bus_resume
- Suspend and resume for power scenario
xhci_remove_stream_mapping
- Remove stream mapping in stream endpoint
xhci_remove_segment_mapping
- Remove segment mapping
Signed-off-by: Daehwan Jung <dh10.jung@samsung.com>
---
drivers/usb/host/xhci-hub.c | 2 ++
drivers/usb/host/xhci-mem.c | 29 +++++++++++++++++++++--------
drivers/usb/host/xhci-ring.c | 9 +++++++--
drivers/usb/host/xhci.c | 4 +++-
4 files changed, 33 insertions(+), 11 deletions(-)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index df3522dab31b..6c1b8d748d0f 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1809,6 +1809,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
return 0;
}
+EXPORT_SYMBOL_GPL(xhci_bus_suspend);
/*
* Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
@@ -1953,6 +1954,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
}
+EXPORT_SYMBOL_GPL(xhci_bus_resume);
unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd)
{
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 0e312066c5c6..e6d56ef91ddb 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -65,7 +65,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
return seg;
}
-static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
+void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
{
if (seg->trbs) {
dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
@@ -74,8 +74,9 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
kfree(seg->bounce_buf);
kfree(seg);
}
+EXPORT_SYMBOL_GPL(xhci_segment_free);
-static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
+void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
struct xhci_segment *first)
{
struct xhci_segment *seg;
@@ -96,9 +97,9 @@ static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
* DMA address of the next segment. The caller needs to set any Link TRB
* related flags, such as End TRB, Toggle Cycle, and no snoop.
*/
-static void xhci_link_segments(struct xhci_segment *prev,
- struct xhci_segment *next,
- enum xhci_ring_type type, bool chain_links)
+void xhci_link_segments(struct xhci_segment *prev,
+ struct xhci_segment *next,
+ enum xhci_ring_type type, bool chain_links)
{
u32 val;
@@ -118,6 +119,7 @@ static void xhci_link_segments(struct xhci_segment *prev,
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
}
}
+EXPORT_SYMBOL_GPL(xhci_link_segments);
/*
* Link the ring to the new segments.
@@ -206,7 +208,7 @@ static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
return ret;
}
-static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
+void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
struct xhci_segment *seg)
{
unsigned long key;
@@ -215,6 +217,7 @@ static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
if (radix_tree_lookup(trb_address_map, key))
radix_tree_delete(trb_address_map, key);
}
+EXPORT_SYMBOL_GPL(xhci_remove_segment_mapping);
static int xhci_update_stream_segment_mapping(
struct radix_tree_root *trb_address_map,
@@ -256,7 +259,7 @@ static int xhci_update_stream_segment_mapping(
return ret;
}
-static void xhci_remove_stream_mapping(struct xhci_ring *ring)
+void xhci_remove_stream_mapping(struct xhci_ring *ring)
{
struct xhci_segment *seg;
@@ -269,6 +272,7 @@ static void xhci_remove_stream_mapping(struct xhci_ring *ring)
seg = seg->next;
} while (seg != ring->first_seg);
}
+EXPORT_SYMBOL_GPL(xhci_remove_stream_mapping);
static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
{
@@ -292,6 +296,7 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
kfree(ring);
}
+EXPORT_SYMBOL_GPL(xhci_ring_free);
void xhci_initialize_ring_info(struct xhci_ring *ring,
unsigned int cycle_state)
@@ -316,6 +321,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring,
*/
ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
}
+EXPORT_SYMBOL_GPL(xhci_initialize_ring_info);
/* Allocate segments and link them for a ring */
static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
@@ -407,6 +413,7 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
kfree(ring);
return NULL;
}
+EXPORT_SYMBOL_GPL(xhci_ring_alloc);
void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
@@ -519,6 +526,7 @@ struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
return (struct xhci_slot_ctx *)
(ctx->bytes + CTX_SIZE(xhci->hcc_params));
}
+EXPORT_SYMBOL_GPL(xhci_get_slot_ctx);
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
@@ -1755,6 +1763,7 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
INIT_LIST_HEAD(&command->cmd_list);
return command;
}
+EXPORT_SYMBOL_GPL(xhci_alloc_command);
struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
bool allocate_completion, gfp_t mem_flags)
@@ -1788,6 +1797,7 @@ void xhci_free_command(struct xhci_hcd *xhci,
kfree(command->completion);
kfree(command);
}
+EXPORT_SYMBOL_GPL(xhci_free_command);
int xhci_alloc_erst(struct xhci_hcd *xhci,
struct xhci_ring *evt_ring,
@@ -1818,6 +1828,7 @@ int xhci_alloc_erst(struct xhci_hcd *xhci,
return 0;
}
+EXPORT_SYMBOL_GPL(xhci_alloc_erst);
void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
{
@@ -1831,6 +1842,7 @@ void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
erst->erst_dma_addr);
erst->entries = NULL;
}
+EXPORT_SYMBOL_GPL(xhci_free_erst);
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
@@ -1969,7 +1981,7 @@ static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
}
/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
-static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
+int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
{
struct {
dma_addr_t input_dma;
@@ -2089,6 +2101,7 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
xhci_dbg(xhci, "TRB math tests passed.\n");
return 0;
}
+EXPORT_SYMBOL_GPL(xhci_check_trb_in_td_math);
static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
{
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index d0b6806275e0..2e99393560e5 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -79,6 +79,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
return 0;
return seg->dma + (segment_offset * sizeof(*trb));
}
+EXPORT_SYMBOL_GPL(xhci_trb_virt_to_dma);
static bool trb_is_noop(union xhci_trb *trb)
{
@@ -311,6 +312,7 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
/* Flush PCI posted writes */
readl(&xhci->dba->doorbell[0]);
}
+EXPORT_SYMBOL_GPL(xhci_ring_cmd_db);
static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
{
@@ -2965,7 +2967,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* Returns >0 for "possibly more events to process" (caller should call again),
* otherwise 0 if done. In future, <0 returns should indicate error code.
*/
-static int xhci_handle_event(struct xhci_hcd *xhci)
+int xhci_handle_event(struct xhci_hcd *xhci)
{
union xhci_trb *event;
int update_ptrs = 1;
@@ -3034,13 +3036,14 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
*/
return 1;
}
+EXPORT_SYMBOL_GPL(xhci_handle_event);
/*
* Update Event Ring Dequeue Pointer:
* - When all events have finished
* - To avoid "Event Ring Full Error" condition
*/
-static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
+void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
union xhci_trb *event_ring_deq)
{
u64 temp_64;
@@ -3070,6 +3073,7 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
temp_64 |= ERST_EHB;
xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
}
+EXPORT_SYMBOL_GPL(xhci_update_erst_dequeue);
/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
@@ -4420,6 +4424,7 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
return queue_command(xhci, cmd, 0, 0, 0,
trb_slot_id | trb_ep_index | type | trb_suspend, false);
}
+EXPORT_SYMBOL_GPL(xhci_queue_stop_endpoint);
int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
int slot_id, unsigned int ep_index,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index dc357cabb265..041a65a6f175 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1449,6 +1449,7 @@ unsigned int xhci_get_endpoint_address(unsigned int ep_index)
unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
return direction | number;
}
+EXPORT_SYMBOL_GPL(xhci_get_endpoint_address);
/* Find the flag for this endpoint (for use in the control context). Use the
* endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
@@ -4306,10 +4307,11 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
return ret;
}
-static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
{
return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
}
+EXPORT_SYMBOL_GPL(xhci_address_device);
static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
{
--
2.31.1
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH 2/3] usb: host: add xhci hooks for USB offload
2022-02-03 2:57 ` [PATCH 0/3] support USB offload feature Daehwan Jung
2022-02-03 2:57 ` [PATCH 1/3] usb: host: export symbols for xhci hooks usage Daehwan Jung
@ 2022-02-03 2:57 ` Daehwan Jung
2022-02-03 2:57 ` [PATCH 3/3] usb: host: add some to xhci overrides " Daehwan Jung
` (2 subsequent siblings)
4 siblings, 0 replies; 8+ messages in thread
From: Daehwan Jung @ 2022-02-03 2:57 UTC (permalink / raw)
To: Mathias Nyman, Greg Kroah-Hartman
Cc: open list:USB XHCI DRIVER, open list, Howard Yen, Jack Pham,
Puma Hsu, J . Avila, Daehwan Jung
To enable supporting for USB offload, define "offload" in usb controller
node of device tree. "offload" value can be used to determine which type
of offload was been enabled in the SoC.
For example:
&usbdrd_dwc3 {
...
/* support usb offloading, 0: disabled, 1: audio */
offload = <1>;
...
};
There are several vendor_ops introduced by this patch:
struct xhci_vendor_ops - function callbacks for vendor specific operations
{
@vendor_init:
- called for vendor init process during xhci-plat-hcd
probe.
@vendor_cleanup:
- called for vendor cleanup process during xhci-plat-hcd
remove.
@is_usb_offload_enabled:
- called to check if usb offload enabled.
@queue_irq_work:
- called to queue vendor specific irq work.
@alloc_dcbaa:
- called when allocating vendor specific dcbaa during
memory initializtion.
@free_dcbaa:
- called to free vendor specific dcbaa when cleanup the
memory.
@alloc_transfer_ring:
- called when vendor specific transfer ring allocation is required
@free_transfer_ring:
- called to free vendor specific transfer ring
@sync_dev_ctx:
- called when synchronization for device context is required
}
The xhci hooks with prefix "xhci_vendor_" on the ops in xhci_vendor_ops.
For example, vendor_init ops will be invoked by xhci_vendor_init() hook,
is_usb_offload_enabled ops will be invoked by
xhci_vendor_is_usb_offload_enabled(), and so on.
Signed-off-by: Daehwan Jung <dh10.jung@samsung.com>
---
drivers/usb/host/xhci-hub.c | 5 ++
drivers/usb/host/xhci-mem.c | 131 +++++++++++++++++++++++++++++++----
drivers/usb/host/xhci-plat.c | 43 +++++++++++-
drivers/usb/host/xhci-plat.h | 8 +++
drivers/usb/host/xhci-ring.c | 13 ++++
drivers/usb/host/xhci.c | 94 ++++++++++++++++++++++++-
drivers/usb/host/xhci.h | 48 +++++++++++++
7 files changed, 325 insertions(+), 17 deletions(-)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 6c1b8d748d0f..f7ed19cba8ad 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -535,8 +535,13 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
cmd->status == COMP_COMMAND_RING_STOPPED) {
xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
ret = -ETIME;
+ goto cmd_cleanup;
}
+ ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
+ if (ret)
+ xhci_warn(xhci, "Sync device context failed, ret=%d\n", ret);
+
cmd_cleanup:
xhci_free_command(xhci, cmd);
return ret;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index e6d56ef91ddb..5defbaf427ed 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -367,6 +367,54 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
return 0;
}
+static void xhci_vendor_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->free_container_ctx)
+ ops->free_container_ctx(xhci, ctx);
+}
+
+static void xhci_vendor_alloc_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
+ int type, gfp_t flags)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->alloc_container_ctx)
+ ops->alloc_container_ctx(xhci, ctx, type, flags);
+}
+
+static struct xhci_ring *xhci_vendor_alloc_transfer_ring(struct xhci_hcd *xhci,
+ u32 endpoint_type, enum xhci_ring_type ring_type,
+ unsigned int max_packet, gfp_t mem_flags)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->alloc_transfer_ring)
+ return ops->alloc_transfer_ring(xhci, endpoint_type, ring_type,
+ max_packet, mem_flags);
+ return 0;
+}
+
+void xhci_vendor_free_transfer_ring(struct xhci_hcd *xhci,
+ struct xhci_ring *ring, unsigned int ep_index)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->free_transfer_ring)
+ ops->free_transfer_ring(xhci, ring, ep_index);
+}
+
+bool xhci_vendor_is_usb_offload_enabled(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, unsigned int ep_index)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->is_usb_offload_enabled)
+ return ops->is_usb_offload_enabled(xhci, virt_dev, ep_index);
+ return false;
+}
+
/*
* Create a new ring with zero or more segments.
*
@@ -419,7 +467,11 @@ void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index)
{
- xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
+ if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index))
+ xhci_vendor_free_transfer_ring(xhci, virt_dev->eps[ep_index].ring, ep_index);
+ else
+ xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
+
virt_dev->eps[ep_index].ring = NULL;
}
@@ -478,6 +530,7 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
{
struct xhci_container_ctx *ctx;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
return NULL;
@@ -491,7 +544,12 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
if (type == XHCI_CTX_TYPE_INPUT)
ctx->size += CTX_SIZE(xhci->hcc_params);
- ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
+ if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) &&
+ (ops && ops->alloc_container_ctx))
+ xhci_vendor_alloc_container_ctx(xhci, ctx, type, flags);
+ else
+ ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
+
if (!ctx->bytes) {
kfree(ctx);
return NULL;
@@ -502,9 +560,16 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
if (!ctx)
return;
- dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
+ if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) &&
+ (ops && ops->free_container_ctx))
+ xhci_vendor_free_container_ctx(xhci, ctx);
+ else
+ dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
+
kfree(ctx);
}
@@ -897,7 +962,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
for (i = 0; i < 31; i++) {
if (dev->eps[i].ring)
- xhci_ring_free(xhci, dev->eps[i].ring);
+ xhci_free_endpoint_ring(xhci, dev, i);
if (dev->eps[i].stream_info)
xhci_free_stream_info(xhci,
dev->eps[i].stream_info);
@@ -1495,8 +1560,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
mult = 0;
/* Set up the endpoint ring */
- virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
+ if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index) &&
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+ virt_dev->eps[ep_index].new_ring =
+ xhci_vendor_alloc_transfer_ring(xhci, endpoint_type, ring_type,
+ max_packet, mem_flags);
+ } else {
+ virt_dev->eps[ep_index].new_ring =
+ xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
+ }
+
if (!virt_dev->eps[ep_index].new_ring)
return -ENOMEM;
@@ -1844,6 +1917,24 @@ void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
}
EXPORT_SYMBOL_GPL(xhci_free_erst);
+static struct xhci_device_context_array *xhci_vendor_alloc_dcbaa(
+ struct xhci_hcd *xhci, gfp_t flags)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->alloc_dcbaa)
+ return ops->alloc_dcbaa(xhci, flags);
+ return 0;
+}
+
+static void xhci_vendor_free_dcbaa(struct xhci_hcd *xhci)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->free_dcbaa)
+ ops->free_dcbaa(xhci);
+}
+
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
@@ -1898,9 +1989,13 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Freed medium stream array pool");
- if (xhci->dcbaa)
- dma_free_coherent(dev, sizeof(*xhci->dcbaa),
- xhci->dcbaa, xhci->dcbaa->dma);
+ if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) {
+ xhci_vendor_free_dcbaa(xhci);
+ } else {
+ if (xhci->dcbaa)
+ dma_free_coherent(dev, sizeof(*xhci->dcbaa),
+ xhci->dcbaa, xhci->dcbaa->dma);
+ }
xhci->dcbaa = NULL;
scratchpad_free(xhci);
@@ -2441,15 +2536,21 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* xHCI section 5.4.6 - doorbell array must be
* "physically contiguous and 64-byte (cache line) aligned".
*/
- xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
- flags);
- if (!xhci->dcbaa)
- goto fail;
- xhci->dcbaa->dma = dma;
+ if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) {
+ xhci->dcbaa = xhci_vendor_alloc_dcbaa(xhci, flags);
+ if (!xhci->dcbaa)
+ goto fail;
+ } else {
+ xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
+ flags);
+ if (!xhci->dcbaa)
+ goto fail;
+ xhci->dcbaa->dma = dma;
+ }
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Device context base array address = 0x%llx (DMA), %p (virt)",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
- xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
+ xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr);
/*
* Initialize the ring segment pool. The ring must be a contiguous
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index c1edcc9b13ce..21280a6d589e 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -184,6 +184,41 @@ static const struct of_device_id usb_xhci_of_match[] = {
MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
#endif
+static struct xhci_plat_priv_overwrite xhci_plat_vendor_overwrite;
+
+int xhci_plat_register_vendor_ops(struct xhci_vendor_ops *vendor_ops)
+{
+ if (vendor_ops == NULL)
+ return -EINVAL;
+
+ xhci_plat_vendor_overwrite.vendor_ops = vendor_ops;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xhci_plat_register_vendor_ops);
+
+static int xhci_vendor_init(struct xhci_hcd *xhci)
+{
+ struct xhci_vendor_ops *ops = NULL;
+
+ if (xhci_plat_vendor_overwrite.vendor_ops)
+ ops = xhci->vendor_ops = xhci_plat_vendor_overwrite.vendor_ops;
+
+ if (ops && ops->vendor_init)
+ return ops->vendor_init(xhci);
+ return 0;
+}
+
+static void xhci_vendor_cleanup(struct xhci_hcd *xhci)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->vendor_cleanup)
+ ops->vendor_cleanup(xhci);
+
+ xhci->vendor_ops = NULL;
+}
+
static int xhci_plat_probe(struct platform_device *pdev)
{
const struct xhci_plat_priv *priv_match;
@@ -339,6 +374,10 @@ static int xhci_plat_probe(struct platform_device *pdev)
goto put_usb3_hcd;
}
+ ret = xhci_vendor_init(xhci);
+ if (ret)
+ goto disable_usb_phy;
+
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
xhci->shared_hcd->tpl_support = hcd->tpl_support;
@@ -418,8 +457,10 @@ static int xhci_plat_remove(struct platform_device *dev)
usb_phy_shutdown(hcd->usb_phy);
usb_remove_hcd(hcd);
- usb_put_hcd(shared_hcd);
+ xhci_vendor_cleanup(xhci);
+
+ usb_put_hcd(shared_hcd);
clk_disable_unprepare(clk);
clk_disable_unprepare(reg_clk);
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 561d0b7bce09..e726a572321d 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -13,6 +13,7 @@
struct xhci_plat_priv {
const char *firmware_name;
unsigned long long quirks;
+ struct xhci_vendor_data *vendor_data;
int (*plat_setup)(struct usb_hcd *);
void (*plat_start)(struct usb_hcd *);
int (*init_quirk)(struct usb_hcd *);
@@ -22,4 +23,11 @@ struct xhci_plat_priv {
#define hcd_to_xhci_priv(h) ((struct xhci_plat_priv *)hcd_to_xhci(h)->priv)
#define xhci_to_priv(x) ((struct xhci_plat_priv *)(x)->priv)
+
+struct xhci_plat_priv_overwrite {
+ struct xhci_vendor_ops *vendor_ops;
+};
+
+int xhci_plat_register_vendor_ops(struct xhci_vendor_ops *vendor_ops);
+
#endif /* _XHCI_PLAT_H */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 2e99393560e5..e611f8d7d587 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3075,6 +3075,15 @@ void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
}
EXPORT_SYMBOL_GPL(xhci_update_erst_dequeue);
+static irqreturn_t xhci_vendor_queue_irq_work(struct xhci_hcd *xhci)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->queue_irq_work)
+ return ops->queue_irq_work(xhci);
+ return IRQ_NONE;
+}
+
/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
@@ -3108,6 +3117,10 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
goto out;
}
+ ret = xhci_vendor_queue_irq_work(xhci);
+ if (ret == IRQ_HANDLED)
+ goto out;
+
/*
* Clear the op reg interrupt status first,
* so we can receive interrupts from other MSI-X interrupters.
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 041a65a6f175..10d5e20b710d 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1621,6 +1621,11 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
return -ENODEV;
}
+ if (xhci_vendor_usb_offload_skip_urb(xhci, urb)) {
+ xhci_dbg(xhci, "skip urb for usb offload\n");
+ return -EOPNOTSUPP;
+ }
+
if (usb_endpoint_xfer_isoc(&urb->ep->desc))
num_tds = urb->number_of_packets;
else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
@@ -2964,6 +2969,14 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
xhci_finish_resource_reservation(xhci, ctrl_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
}
+ if (ret)
+ goto failed;
+
+ ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
+ if (ret)
+ xhci_warn(xhci, "sync device context failed, ret=%d", ret);
+
+failed:
return ret;
}
@@ -3107,7 +3120,11 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
for (i = 0; i < 31; i++) {
if (virt_dev->eps[i].new_ring) {
xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
- xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
+ if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, i))
+ xhci_vendor_free_transfer_ring(xhci, virt_dev->eps[i].new_ring, i);
+ else
+ xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
+
virt_dev->eps[i].new_ring = NULL;
}
}
@@ -3272,6 +3289,13 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
wait_for_completion(stop_cmd->completion);
+ err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
+ if (err) {
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
+ __func__, err);
+ goto cleanup;
+ }
+
spin_lock_irqsave(&xhci->lock, flags);
/* config ep command clears toggle if add and drop ep flags are set */
@@ -3303,6 +3327,11 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
wait_for_completion(cfg_cmd->completion);
+ err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
+ if (err)
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
+ __func__, err);
+
xhci_free_command(xhci, cfg_cmd);
cleanup:
xhci_free_command(xhci, stop_cmd);
@@ -3848,6 +3877,13 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
/* Wait for the Reset Device command to finish */
wait_for_completion(reset_device_cmd->completion);
+ ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
+ if (ret) {
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
+ __func__, ret);
+ goto command_cleanup;
+ }
+
/* The Reset Device command can't fail, according to the 0.95/0.96 spec,
* unless we tried to reset a slot ID that wasn't enabled,
* or the device wasn't in the addressed or configured state.
@@ -4093,6 +4129,14 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
goto disable_slot;
}
+
+ ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
+ if (ret) {
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
+ __func__, ret);
+ goto disable_slot;
+ }
+
vdev = xhci->devs[slot_id];
slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
trace_xhci_alloc_dev(slot_ctx);
@@ -4223,6 +4267,13 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
wait_for_completion(command->completion);
+ ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
+ if (ret) {
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
+ __func__, ret);
+ goto out;
+ }
+
/* FIXME: From section 4.3.4: "Software shall be responsible for timing
* the SetAddress() "recovery interval" required by USB and aborting the
* command on a timeout.
@@ -4371,6 +4422,14 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
return -ENOMEM;
}
+ ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
+ __func__, ret);
+ return ret;
+ }
+
xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -4395,6 +4454,30 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
return ret;
}
+struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci)
+{
+ return xhci->vendor_ops;
+}
+EXPORT_SYMBOL_GPL(xhci_vendor_get_ops);
+
+int xhci_vendor_sync_dev_ctx(struct xhci_hcd *xhci, unsigned int slot_id)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->sync_dev_ctx)
+ return ops->sync_dev_ctx(xhci, slot_id);
+ return 0;
+}
+
+bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb)
+{
+ struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
+
+ if (ops && ops->usb_offload_skip_urb)
+ return ops->usb_offload_skip_urb(xhci, urb);
+ return false;
+}
+
#ifdef CONFIG_PM
/* BESL to HIRD Encoding array for USB2 LPM */
@@ -5131,6 +5214,15 @@ static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
return -ENOMEM;
}
+ ret = xhci_vendor_sync_dev_ctx(xhci, hdev->slot_id);
+ if (ret) {
+ xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
+ __func__, ret);
+ xhci_free_command(xhci, config_cmd);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return ret;
+ }
+
xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 5a75fe563123..2efe8ddaab5f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1928,6 +1928,7 @@ struct xhci_hcd {
struct list_head regset_list;
void *dbc;
+
/* platform-specific data -- must come last */
unsigned long priv[] __aligned(sizeof(s64));
};
@@ -2206,6 +2207,53 @@ static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
urb->stream_id);
}
+/**
+ * struct xhci_vendor_ops - function callbacks for vendor specific operations
+ * @vendor_init: called for vendor init process
+ * @vendor_cleanup: called for vendor cleanup process
+ * @is_usb_offload_enabled: called to check if usb offload enabled
+ * @queue_irq_work: called to queue vendor specific irq work
+ * @alloc_dcbaa: called when allocating vendor specific dcbaa
+ * @free_dcbaa: called to free vendor specific dcbaa
+ * @alloc_transfer_ring: called when remote transfer ring allocation is required
+ * @free_transfer_ring: called to free vendor specific transfer ring
+ * @sync_dev_ctx: called when synchronization for device context is required
+ * @alloc_container_ctx: called when allocating vendor specific container context
+ * @free_container_ctx: called to free vendor specific container context
+ */
+struct xhci_vendor_ops {
+ int (*vendor_init)(struct xhci_hcd *xhci);
+ void (*vendor_cleanup)(struct xhci_hcd *xhci);
+ bool (*is_usb_offload_enabled)(struct xhci_hcd *xhci,
+ struct xhci_virt_device *vdev,
+ unsigned int ep_index);
+ irqreturn_t (*queue_irq_work)(struct xhci_hcd *xhci);
+
+ struct xhci_device_context_array *(*alloc_dcbaa)(struct xhci_hcd *xhci,
+ gfp_t flags);
+ void (*free_dcbaa)(struct xhci_hcd *xhci);
+
+ struct xhci_ring *(*alloc_transfer_ring)(struct xhci_hcd *xhci,
+ u32 endpoint_type, enum xhci_ring_type ring_type,
+ unsigned int max_packet, gfp_t mem_flags);
+ void (*free_transfer_ring)(struct xhci_hcd *xhci,
+ struct xhci_ring *ring, unsigned int ep_index);
+ int (*sync_dev_ctx)(struct xhci_hcd *xhci, unsigned int slot_id);
+ bool (*usb_offload_skip_urb)(struct xhci_hcd *xhci, struct urb *urb);
+ void (*alloc_container_ctx)(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
+ int type, gfp_t flags);
+ void (*free_container_ctx)(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
+};
+
+struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci);
+
+int xhci_vendor_sync_dev_ctx(struct xhci_hcd *xhci, unsigned int slot_id);
+bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb);
+void xhci_vendor_free_transfer_ring(struct xhci_hcd *xhci,
+ struct xhci_ring *ring, unsigned int ep_index);
+bool xhci_vendor_is_usb_offload_enabled(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, unsigned int ep_index);
+
/*
* TODO: As per spec Isochronous IDT transmissions are supported. We bypass
* them anyways as we where unable to find a device that matches the
--
2.31.1
^ permalink raw reply related [flat|nested] 8+ messages in thread