* [PATCH 01/12] thunderbolt: Add initial cactus ridge NHI support
2013-11-29 1:35 [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed) Andreas Noever
@ 2013-11-29 1:35 ` Andreas Noever
2013-11-29 1:35 ` [PATCH 02/12] thunderbolt: Add configuration channel interface Andreas Noever
` (11 subsequent siblings)
12 siblings, 0 replies; 23+ messages in thread
From: Andreas Noever @ 2013-11-29 1:35 UTC (permalink / raw)
To: linux-kernel; +Cc: Andreas Noever
Thunderbolt hotplug is supposed to be handled by the firmware. But Apple
decided to implement thunderbolt at the operating system level. The
firmare only initializes thunderbolt devices that are present at boot
time. This driver enables hotplug of thunderbolt of non-chained thunderbolt
devices on Apple systems.
This first patch adds the Kconfig file as well the parts of the driver
which talk directly to the hardware (that is pci device setup, interrupt
handling and RX/TX ring management).
Signed-off-by: Andreas Noever <andreas.noever@gmail.com>
---
drivers/Kconfig | 2 +
drivers/Makefile | 1 +
drivers/thunderbolt/Kconfig | 12 +
drivers/thunderbolt/Makefile | 3 +
drivers/thunderbolt/dsl3510.c | 591 +++++++++++++++++++++++++++++++++++++
drivers/thunderbolt/dsl3510.h | 116 ++++++++
drivers/thunderbolt/dsl3510_regs.h | 102 +++++++
7 files changed, 827 insertions(+)
create mode 100644 drivers/thunderbolt/Kconfig
create mode 100644 drivers/thunderbolt/Makefile
create mode 100644 drivers/thunderbolt/dsl3510.c
create mode 100644 drivers/thunderbolt/dsl3510.h
create mode 100644 drivers/thunderbolt/dsl3510_regs.h
diff --git a/drivers/Kconfig b/drivers/Kconfig
index b3138fb..c242b6c 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -170,4 +170,6 @@ source "drivers/phy/Kconfig"
source "drivers/powercap/Kconfig"
+source "drivers/thunderbolt/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 3cc8214..0062353 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -155,3 +155,4 @@ obj-$(CONFIG_IPACK_BUS) += ipack/
obj-$(CONFIG_NTB) += ntb/
obj-$(CONFIG_FMC) += fmc/
obj-$(CONFIG_POWERCAP) += powercap/
+obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
new file mode 100644
index 0000000..281787a
--- /dev/null
+++ b/drivers/thunderbolt/Kconfig
@@ -0,0 +1,12 @@
+menuconfig THUNDERBOLT
+ tristate "Apple Thunderbolt hotplug support"
+ default no
+ help
+ Cactus Ridge Thunderbolt Controller driver
+ This driver is required if you want to hotplug Thunderbolt devices on
+ Apple hardware.
+
+ Device chaining is currently not supported.
+
+ To compile this driver a module, choose M here. The module will be
+ called thunderbolt.
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
new file mode 100644
index 0000000..32b0504
--- /dev/null
+++ b/drivers/thunderbolt/Makefile
@@ -0,0 +1,3 @@
+obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
+thunderbolt-objs := dsl3510.o
+
diff --git a/drivers/thunderbolt/dsl3510.c b/drivers/thunderbolt/dsl3510.c
new file mode 100644
index 0000000..2a326f6
--- /dev/null
+++ b/drivers/thunderbolt/dsl3510.c
@@ -0,0 +1,591 @@
+/*
+ * Cactus Ridge NHI driver
+ *
+ * Copyright (c) 2013 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include "dsl3510.h"
+#include "dsl3510_regs.h"
+
+#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
+
+static int get_interrupt_index(struct tb_ring *ring)
+{
+ int bit = ring->hop;
+ if (!ring->is_tx)
+ bit += ring->nhi->hop_count;
+ return bit;
+}
+
+/**
+ * interrupt_active() - activate/deactivate interrupts for a single ring
+ *
+ * ring->nhi->lock must be held.
+ */
+static void interrupt_active(struct tb_ring *ring, bool active)
+{
+ int reg = get_interrupt_index(ring) / 32 + REG_RING_INTERRUPT_BASE;
+ int bit = get_interrupt_index(ring) & 31;
+ u32 value;
+ value = ioread32(ring->nhi->iobase + reg);
+ if (active) {
+ if (value & (1 << bit)) {
+ dev_WARN(&ring->nhi->pdev->dev,
+ "interrupt for %s %d is already enabled\n",
+ RING_TYPE(ring),
+ ring->hop);
+ }
+ value |= 1 << bit;
+ } else {
+ if (!(value & (1 << bit))) {
+ dev_WARN(&ring->nhi->pdev->dev,
+ "interrupt for %s %d is already disabled\n",
+ RING_TYPE(ring),
+ ring->hop);
+ }
+ value &= ~(1 << bit);
+ }
+ dev_info(&ring->nhi->pdev->dev,
+ "%s interrupt at register %#x bit %d (new value: %#x)\n",
+ active ? "enabling" : "disabling",
+ reg,
+ bit,
+ value);
+ iowrite32(value, ring->nhi->iobase + reg);
+}
+
+/**
+ * dsl3510_disable_interrupts() - disable interrupts for all rings
+ */
+static void dsl3510_disable_interrupts(struct tb_nhi *nhi)
+{
+ int i = 0;
+ /* disable interrupts */
+ for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
+ iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
+
+ /* clear interrupt status bits */
+ for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
+ ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
+}
+
+static void __iomem *ring_desc_base(struct tb_ring *ring)
+{
+ void __iomem *io = ring->nhi->iobase;
+ io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
+ io += ring->hop * 16;
+ return io;
+}
+
+static void __iomem *ring_options_base(struct tb_ring *ring)
+{
+ void __iomem *io = ring->nhi->iobase;
+ io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
+ io += ring->hop * 32;
+ return io;
+}
+
+static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
+{
+ iowrite16(value, ring_desc_base(ring) + offset);
+}
+
+static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
+{
+ iowrite32(value, ring_desc_base(ring) + offset);
+}
+
+static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
+{
+ iowrite32(value, ring_desc_base(ring) + offset);
+ iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
+}
+
+static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
+{
+ iowrite32(value, ring_options_base(ring) + offset);
+}
+
+static bool ring_full(struct tb_ring *ring)
+{
+ return ((ring->head + 1) % ring->size) == ring->tail;
+}
+
+static bool ring_empty(struct tb_ring *ring)
+{
+ return ring->head == ring->tail;
+}
+
+/* ring->lock is held. */
+static void ring_write_descriptors(struct tb_ring *ring)
+{
+ struct ring_packet *pkg, *n;
+ struct ring_desc *descriptor;
+ list_for_each_entry_safe(pkg, n, &ring->queue, list) {
+ if (ring_full(ring))
+ break;
+ list_move_tail(&pkg->list, &ring->in_flight);
+ descriptor = &ring->descriptors[ring->head];
+ descriptor->phys = pkg->buffer_phy;
+ descriptor->time = 0;
+ descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
+ if (ring->is_tx) {
+ descriptor->length = pkg->size;
+ descriptor->eof = pkg->eof;
+ descriptor->sof = pkg->sof;
+ }
+ ring->head = (ring->head + 1) % ring->size;
+ ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
+ }
+}
+
+static void ring_handle_interrupt(struct work_struct *work)
+{
+ struct tb_ring *ring = container_of(work, typeof(*ring), work);
+ struct ring_packet *pkg, *n;
+ bool invoke_callback = false;
+ mutex_lock(&ring->lock);
+ if (ring->in_shutdown)
+ goto out;
+ list_for_each_entry_safe(pkg, n, &ring->in_flight, list) {
+ if (ring_empty(ring))
+ break;
+ if (!(ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED))
+ break;
+ list_move_tail(&pkg->list, &ring->done);
+ invoke_callback = true;
+ if (!ring->is_tx) {
+ pkg->size = ring->descriptors[ring->tail].length;
+ pkg->eof = ring->descriptors[ring->tail].eof;
+ pkg->sof = ring->descriptors[ring->tail].sof;
+ pkg->flags = ring->descriptors[ring->tail].flags;
+ if (pkg->sof != 0)
+ dev_WARN(&ring->nhi->pdev->dev,
+ "%s %d got unexpected SOF: %#x\n",
+ RING_TYPE(ring),
+ ring->hop,
+ pkg->sof);
+
+ /*
+ * known flags:
+ * raw enabled: 0xa
+ * raw not enabled: 0xb
+ * partial packet (>MAX_FRAME_SIZE): 0xe
+ */
+ if (pkg->flags != 0xa && pkg->flags != 0xb)
+ dev_WARN(&ring->nhi->pdev->dev,
+ "%s %d got unexpected flags: %#x\n",
+ RING_TYPE(ring),
+ ring->hop,
+ pkg->flags);
+ }
+ ring->tail = (ring->tail + 1) % ring->size;
+ }
+
+ ring_write_descriptors(ring);
+out:
+ mutex_unlock(&ring->lock); /* allow the callback to queue new work */
+ if (invoke_callback)
+ ring->callback(ring, ring->callback_data);
+}
+
+void __ring_enqueue(struct tb_ring *ring, struct ring_packet *pkg)
+{
+ mutex_lock(&ring->lock);
+ if (ring->in_shutdown) {
+ pkg->canceled = true;
+ list_add_tail(&pkg->list, &ring->done);
+ } else {
+ pkg->canceled = false;
+ list_add_tail(&pkg->list, &ring->queue);
+ ring_write_descriptors(ring);
+ }
+ mutex_unlock(&ring->lock);
+}
+
+/**
+ * ring_poll() - return completed packet if any
+ *
+ * Return: Returns the first completed packet from the done queue. Returns NULL
+ * if the queue is empty.
+ */
+struct ring_packet *ring_poll(struct tb_ring *ring)
+{
+ struct ring_packet *pkg = NULL;
+ mutex_lock(&ring->lock);
+ if (!list_empty(&ring->done)) {
+ pkg = list_entry(ring->done.next, typeof(*pkg), list);
+ list_del(&pkg->list);
+ }
+ mutex_unlock(&ring->lock);
+ return pkg;
+}
+
+static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
+ ring_cb callback, void *callback_data,
+ bool transmit)
+{
+ struct tb_ring *ring = NULL;
+ dev_info(&nhi->pdev->dev,
+ "allocating %s ring %d\n",
+ transmit ? "TX" : "RX",
+ hop);
+ mutex_lock(&nhi->lock);
+ if (hop >= nhi->hop_count) {
+ dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
+ goto err;
+ }
+ if (transmit && nhi->tx_rings[hop]) {
+ dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
+ goto err;
+ } else if (!transmit && nhi->rx_rings[hop]) {
+ dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
+ goto err;
+ }
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ goto err;
+
+ mutex_init(&ring->lock);
+ ring->nhi = nhi;
+ ring->size = size;
+ ring->hop = hop;
+ ring->head = 0;
+ ring->tail = 0;
+ ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
+ size * sizeof(*ring->descriptors),
+ &ring->descriptors_dma,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!ring->descriptors)
+ goto err;
+
+ ring->callback = callback;
+ ring->callback_data = callback_data;
+ ring->is_tx = transmit;
+ ring->in_shutdown = false;
+ INIT_LIST_HEAD(&ring->queue);
+ INIT_LIST_HEAD(&ring->in_flight);
+ INIT_LIST_HEAD(&ring->done);
+ INIT_WORK(&ring->work, ring_handle_interrupt);
+
+ if (transmit)
+ nhi->tx_rings[hop] = ring;
+ else
+ nhi->rx_rings[hop] = ring;
+
+ ring_iowrite64desc(ring, ring->descriptors_dma, 0);
+ if (ring->is_tx) {
+ ring_iowrite32desc(ring, size, 12);
+ ring_iowrite32options(ring, 0, 4); /* time releated ? */
+ ring_iowrite32options(ring,
+ RING_FLAG_ENABLE | RING_FLAG_RAW,
+ 0);
+ } else {
+ ring_iowrite32desc(ring, (TB_FRAME_SIZE << 16) | size, 12);
+ ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */
+ ring_iowrite32options(ring,
+ RING_FLAG_ENABLE | RING_FLAG_RAW,
+ 0);
+ }
+ interrupt_active(ring, true);
+ mutex_unlock(&nhi->lock);
+ return ring;
+err:
+ mutex_unlock(&nhi->lock);
+ if (ring)
+ mutex_destroy(&ring->lock);
+ kfree(ring);
+ return NULL;
+}
+
+struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
+ ring_cb callback, void *callback_data)
+{
+ return ring_alloc(nhi, hop, size, callback, callback_data, true);
+}
+
+struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
+ ring_cb callback, void *callback_data)
+{
+ return ring_alloc(nhi, hop, size, callback, callback_data, false);
+}
+
+/**
+ * ring_drain_and_free() - shutdown a ring
+ *
+ * This method will disable the ring, cancel all packets and repeatedly
+ * call the callback function until all packets have been drained through
+ * ring_poll.
+ *
+ * When this method returns all invocations of ring->callback will have
+ * finished.
+ *
+ * Must NOT be called from ring->callback!
+ */
+void ring_drain_and_free(struct tb_ring *ring)
+{
+ struct ring_packet *pkg;
+ dev_info(&ring->nhi->pdev->dev,
+ "stopping %s %d\n",
+ RING_TYPE(ring),
+ ring->hop);
+
+ mutex_lock(&ring->nhi->lock);
+ mutex_lock(&ring->lock);
+ if (ring->in_shutdown) {
+ dev_WARN(&ring->nhi->pdev->dev,
+ "%s %d already in_shutdown!\n",
+ RING_TYPE(ring),
+ ring->is_tx);
+ mutex_unlock(&ring->nhi->lock);
+ mutex_unlock(&ring->lock);
+ return;
+ }
+ ring->in_shutdown = true;
+
+ interrupt_active(ring, false);
+ if (ring->is_tx)
+ ring->nhi->tx_rings[ring->hop] = NULL;
+ else
+ ring->nhi->rx_rings[ring->hop] = NULL;
+
+ ring_iowrite32options(ring, 0, 0);
+ ring_iowrite64desc(ring, 0, 0);
+ ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
+ ring_iowrite32desc(ring, 0, 12);
+ dma_free_coherent(&ring->nhi->pdev->dev,
+ ring->size * sizeof(*ring->descriptors),
+ ring->descriptors,
+ ring->descriptors_dma);
+ ring->descriptors = 0;
+ ring->descriptors_dma = 0;
+ ring->head = 0;
+ ring->tail = 0;
+ ring->size = 0;
+
+ /* Move all packets to the done queue and mark them as canceled. */
+ list_for_each_entry(pkg, &ring->in_flight, list)
+ pkg->canceled = true;
+ list_for_each_entry(pkg, &ring->queue, list)
+ pkg->canceled = true;
+ list_splice_tail(&ring->in_flight, &ring->done);
+ list_splice_tail(&ring->queue, &ring->done);
+
+ mutex_unlock(&ring->lock);
+ mutex_unlock(&ring->nhi->lock);
+ /*
+ * From this point on ring->work will not get scheduled again. Any new
+ * packets added to the ring will be directly canceled and moved to the
+ * done queue. The in_flight and queue lists will remain empty.
+ */
+ cancel_work_sync(&ring->work);
+
+ mutex_lock(&ring->lock);
+ while (!list_empty(&ring->done)) {
+ mutex_unlock(&ring->lock);
+ ring->callback(ring, ring->callback_data);
+ mutex_lock(&ring->lock);
+ }
+ mutex_unlock(&ring->lock);
+ mutex_destroy(&ring->lock);
+ kfree(ring);
+}
+
+static void dsl3510_handle_interrupt(struct work_struct *work)
+{
+ struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_task);
+ int value = 0; /* Suppress uninitialized usage warning. */
+ int bit;
+ int hop = -1;
+ int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
+ struct tb_ring *ring;
+
+ mutex_lock(&nhi->lock);
+
+ /*
+ * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
+ * (TX, RX, RX overflow). We iterate over the bits and read a new
+ * dwords as required. The registers are cleared on read.
+ */
+ for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
+ if (bit % 32 == 0)
+ value = ioread32(nhi->iobase
+ + REG_RING_NOTIFY_BASE
+ + 4 * (bit / 32));
+ if (++hop == nhi->hop_count) {
+ hop = 0;
+ type++;
+ }
+ if ((value & (1 << (bit % 32))) == 0)
+ continue;
+ if (type == 2) {
+ dev_warn(&nhi->pdev->dev,
+ "RX overflow for ring %d\n",
+ hop);
+ continue;
+ }
+ if (type == 0)
+ ring = nhi->tx_rings[hop];
+ else
+ ring = nhi->rx_rings[hop];
+ if (ring == NULL) {
+ dev_warn(&nhi->pdev->dev,
+ "got interrupt for inactive %s ring %d\n",
+ type ? "RX" : "TX",
+ hop);
+ continue;
+ }
+ schedule_work(&ring->work);
+ }
+ mutex_unlock(&nhi->lock);
+}
+
+static irqreturn_t dsl3510_msi(int irq, void *data)
+{
+ struct tb_nhi *nhi = data;
+ schedule_work(&nhi->interrupt_task);
+ return IRQ_HANDLED;
+}
+
+static void dsl3510_shutdown(struct tb_nhi *nhi)
+{
+ int i;
+ dev_info(&nhi->pdev->dev, "shutdown\n");
+
+ for (i = 0; i < nhi->hop_count; i++) {
+ if (nhi->tx_rings[i])
+ dev_WARN(&nhi->pdev->dev,
+ "TX ring %d is still active\n",
+ i);
+ if (nhi->rx_rings[i])
+ dev_WARN(&nhi->pdev->dev,
+ "RX ring %d is still active\n",
+ i);
+ }
+ dsl3510_disable_interrupts(nhi);
+ /*
+ * We have to release the irq before calling flush_work. Otherwise an
+ * already executing IRQ handler could call schedule_work again.
+ */
+ devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
+ flush_work(&nhi->interrupt_task);
+ mutex_destroy(&nhi->lock);
+}
+
+static int dsl3510_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct tb_nhi *nhi;
+ int res;
+
+ res = pcim_enable_device(pdev);
+ if (res) {
+ dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
+ return res;
+ }
+
+ res = pci_enable_msi(pdev);
+ if (res) {
+ dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
+ return res;
+ }
+
+ res = pcim_iomap_regions(pdev, 1 << 0, "DSL3510");
+ if (res) {
+ dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
+ return res;
+ }
+
+ nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
+ if (!nhi)
+ return -ENOMEM;
+
+ nhi->pdev = pdev;
+ /* cannot fail - table is allocated bin pcim_iomap_regions */
+ nhi->iobase = pcim_iomap_table(pdev)[0];
+ nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
+ if (nhi->hop_count != 12)
+ dev_warn(&pdev->dev,
+ "unexpected hop count: %d\n",
+ nhi->hop_count);
+ INIT_WORK(&nhi->interrupt_task, dsl3510_handle_interrupt);
+
+ nhi->tx_rings = devm_kzalloc(&pdev->dev,
+ nhi->hop_count * sizeof(struct tb_ring),
+ GFP_KERNEL);
+ nhi->rx_rings = devm_kzalloc(&pdev->dev,
+ nhi->hop_count * sizeof(struct tb_ring),
+ GFP_KERNEL);
+ if (!nhi->tx_rings || !nhi->rx_rings)
+ return -ENOMEM;
+
+ dsl3510_disable_interrupts(nhi); /* In case someone left them on. */
+ res = devm_request_irq(&pdev->dev,
+ pdev->irq,
+ dsl3510_msi,
+ 0,
+ "dsl3510",
+ nhi);
+ if (res) {
+ dev_err(&pdev->dev, "request_irq failed, aborting\n");
+ return res;
+ }
+
+ mutex_init(&nhi->lock);
+
+ pci_set_master(pdev);
+
+ /* magic value - clock related? */
+ iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
+
+ pci_set_drvdata(pdev, nhi); /* for dsl3510_remove only */
+
+ return 0;
+}
+
+static void dsl3510_remove(struct pci_dev *pdev)
+{
+ struct tb_nhi *nhi = pci_get_drvdata(pdev);
+ dsl3510_shutdown(nhi);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(dsl3510_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1547)},
+ { 0,}
+};
+
+MODULE_DEVICE_TABLE(pci, dsl3510_ids);
+
+static struct pci_driver dsl3510_driver = {
+ .name = "dsl3510",
+ .id_table = dsl3510_ids,
+ .probe = dsl3510_probe,
+ .remove = dsl3510_remove,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andreas Noever <andreas.noever@gmail.com>");
+
+static int __init dsl3510_init(void)
+{
+ struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1547, NULL);
+ if (dev)
+ pci_dev_put(dev);
+ else
+ printk(KERN_INFO "thunderbolt controller not found, try booting with acpi_osi=Darwin");
+
+ return pci_register_driver(&dsl3510_driver);
+}
+
+static void __exit dsl3510_unload(void)
+{
+ pci_unregister_driver(&dsl3510_driver);
+}
+
+module_init(dsl3510_init);
+module_exit(dsl3510_unload);
diff --git a/drivers/thunderbolt/dsl3510.h b/drivers/thunderbolt/dsl3510.h
new file mode 100644
index 0000000..b4b946d
--- /dev/null
+++ b/drivers/thunderbolt/dsl3510.h
@@ -0,0 +1,116 @@
+/*
+ * Cactus Ridge NHI driver
+ *
+ * Copyright (c) 2013 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef DSL3510_H_
+#define DSL3510_H_
+
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+/**
+ * struct tb_nhi - thunderbolt native host interface
+ */
+struct tb_nhi {
+ struct mutex lock; /*
+ * Must be held during ring creation/destruction.
+ * Is acquired by interrupt_task to when dispatching
+ * interrupts to individual rings.
+ **/
+ struct pci_dev *pdev;
+ void __iomem *iobase;
+ struct tb_ring **tx_rings;
+ struct tb_ring **rx_rings;
+ struct work_struct interrupt_task;
+ u32 hop_count; /* Number of rings (end point hops) supported by NHI. */
+};
+
+typedef void (*ring_cb)(struct tb_ring*, void*);
+
+/**
+ * struct tb_ring - thunderbolt TX or RX ring associated with a NHI
+ *
+ * Will invoke callback once data is available. You should then call ring_poll
+ * repeatedly to retrieve all completed (or canceled) packets.
+ */
+struct tb_ring {
+ struct mutex lock; /* must be acquired after nhi->lock */
+ struct tb_nhi *nhi;
+ int size;
+ int hop;
+ int head; /* write next descriptor here */
+ int tail; /* complete next descriptor here */
+ struct ring_desc *descriptors;
+ dma_addr_t descriptors_dma;
+ ring_cb callback;
+ void *callback_data;
+ struct list_head queue;
+ struct list_head in_flight;
+ struct list_head done;
+ struct work_struct work;
+ bool is_tx;
+ bool in_shutdown;
+};
+
+/**
+ * struct ring_packet - packet for use with ring_rx/ring_tx
+ */
+struct ring_packet {
+ void *buffer;
+ dma_addr_t buffer_phy;
+ struct list_head list;
+ u32 size:12;
+ u16 flags:12;
+ u32 eof:4;
+ u32 sof:4;
+ u8 canceled:1;
+};
+
+#define TB_FRAME_SIZE 0x100 /* minimum size for ring_rx */
+
+struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
+ ring_cb callback, void *callback_data);
+struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
+ ring_cb callback, void *callback_data);
+void ring_drain_and_free(struct tb_ring *ring);
+
+void __ring_enqueue(struct tb_ring *ring, struct ring_packet *pkg);
+
+/**
+ * ring_rx() - enqueue a packet on an RX ring
+ *
+ * If the ring is about to shut down the packet will be marked as canceled and
+ * put on the done queue.
+ *
+ * pkg->buffer and pkg->buffer_phy have to be set. The buffer must contain at
+ * least TB_FRAME_SIZE bytes. After pkg has been handled and retrieved through
+ * ring_poll pkg->canceled, pkg->size, pkg->eof, pkg->sof and pkg->flags will
+ * be set.
+ */
+static inline void ring_rx(struct tb_ring *ring, struct ring_packet *pkg)
+{
+ WARN_ON(ring->is_tx);
+ __ring_enqueue(ring, pkg);
+}
+
+/**
+ * ring_tx() - enqueue a packet on an TX ring
+ *
+ * If the ring is about to shut down the packet will be marked as canceled and
+ * put on the done queue.
+ *
+ * pkg->buffer, pkg->buffer_phy, pkg->size, pkg->eof, pkg->sof have to be set.
+ * After pkg has been handled and retrieved through ring_poll pkg->canceled
+ * will be set.
+ */
+static inline void ring_tx(struct tb_ring *ring, struct ring_packet *pkg)
+{
+ WARN_ON(!ring->is_tx);
+ __ring_enqueue(ring, pkg);
+}
+
+struct ring_packet *ring_poll(struct tb_ring *ring);
+
+#endif
diff --git a/drivers/thunderbolt/dsl3510_regs.h b/drivers/thunderbolt/dsl3510_regs.h
new file mode 100644
index 0000000..fdfc758
--- /dev/null
+++ b/drivers/thunderbolt/dsl3510_regs.h
@@ -0,0 +1,102 @@
+/*
+ * Cactus Ridge registers
+ *
+ * Copyright (c) 2013 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef DSL3510_REGS_H_
+#define DSL3510_REGS_H_
+
+#include <linux/types.h>
+
+enum ring_flags {
+ RING_FLAG_ISOCH_ENABLE = 1 << 27, /* TX only ? */
+ RING_FLAG_E2E_FLOW_CONTROL = 1 << 28,
+ RING_FLAG_PCI_NO_SNOOP = 1 << 29,
+ RING_FLAG_RAW = 1 << 30, /* ignore EOF/SOF mask, include checksum */
+ RING_FLAG_ENABLE = 1 << 31,
+};
+
+enum ring_desc_flags {
+ RING_DESC_ISOCH = 0x1, /* TX only ? */
+ RING_DESC_COMPLETED = 0x2, /* set by NHI */
+ RING_DESC_POSTED = 0x4, /* always set this */
+ RING_DESC_INTERRUPT = 0x8, /* request an interrupt on completion */
+};
+
+/**
+ * struct ring_desc - TX/RX ring entry
+ *
+ * For TX set length/eof/sof.
+ * For RX length/eof/sof are set by the NHI.
+ */
+struct ring_desc {
+ u64 phys;
+ u32 length:12;
+ u32 eof:4;
+ u32 sof:4;
+ enum ring_desc_flags flags:12;
+ u32 time; /* write zero */
+} __packed;
+
+/* NHI registers in bar 0 */
+
+/*
+ * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 00: physical pointer to an array of struct ring_desc
+ * 08: unknown
+ * 10: ring head (index of first non posted descriptor)
+ * 12: descriptor count
+ */
+#define REG_TX_RING_BASE 0x00000
+
+/*
+ * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 00: physical pointer to an array of struct ring_desc
+ * 08: ring head (index of first not posted descriptor)
+ * 10: unknown
+ * 12: descriptor count
+ * 14: max frame sizes (anything larger than 0x100 has no effect)
+ */
+#define REG_RX_RING_BASE 0x08000
+
+/*
+ * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 00: enum_ring_flags
+ * 04: isoch time stamp ?? (write 0)
+ * ..: unknown
+ */
+#define REG_TX_OPTIONS_BASE 0x19800
+
+/*
+ * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 00: enum ring_flags
+ * If RING_FLAG_E2E_FLOW_CONTROL is set then bits 13-23 must be set to
+ * the corresponding TX hop id.
+ * 04: EOF/SOF mask (ignored for RING_FLAG_RAW rings)
+ * 08: EOF/SOF mask (ignored for RING_FLAG_RAW rings)
+ * ..: unknown
+ */
+#define REG_RX_OPTIONS_BASE 0x29800
+
+/*
+ * three bitfields: tx, rx, rx overflow
+ * Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are
+ * cleared on read. New interrupts are fired only after ALL registers have been
+ * read (even those containing only disabled rings).
+ */
+#define REG_RING_NOTIFY_BASE 0x37800
+#define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32)
+
+/*
+ * two bitfields: rx, tx
+ * Both bitfields contains one bit for every hop (REG_HOP_COUNT). To
+ * enable/disable interrupts set/clear the corresponding bits.
+ */
+#define REG_RING_INTERRUPT_BASE 0x38200
+#define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32)
+
+/* The last 11 bits contain the number of hops supported by the NHI port. */
+#define REG_HOP_COUNT 0x39640
+
+#endif
--
1.8.4.2
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 02/12] thunderbolt: Add configuration channel interface
2013-11-29 1:35 [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed) Andreas Noever
2013-11-29 1:35 ` [PATCH 01/12] thunderbolt: Add initial cactus ridge NHI support Andreas Noever
@ 2013-11-29 1:35 ` Andreas Noever
2013-11-29 1:35 ` [PATCH 03/12] thunderbolt: Setup configuration channel Andreas Noever
` (10 subsequent siblings)
12 siblings, 0 replies; 23+ messages in thread
From: Andreas Noever @ 2013-11-29 1:35 UTC (permalink / raw)
To: linux-kernel; +Cc: Andreas Noever
Thunderbolt devices are configured by reading/writing into their
configuration space (similar to pci). This is done by sending packets
through the NHI (native host interface) onto the configuration channel.
This patch handles the low level packet based protocol and exposes
higher level operations like tb_cfg_read/tb_cfg_write.
Signed-off-by: Andreas Noever <andreas.noever@gmail.com>
---
drivers/thunderbolt/Makefile | 2 +-
drivers/thunderbolt/tb_cfg.c | 649 +++++++++++++++++++++++++++++++++++++++++++
drivers/thunderbolt/tb_cfg.h | 67 +++++
3 files changed, 717 insertions(+), 1 deletion(-)
create mode 100644 drivers/thunderbolt/tb_cfg.c
create mode 100644 drivers/thunderbolt/tb_cfg.h
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 32b0504..f486295 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,3 +1,3 @@
obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
-thunderbolt-objs := dsl3510.o
+thunderbolt-objs := dsl3510.o tb_cfg.o
diff --git a/drivers/thunderbolt/tb_cfg.c b/drivers/thunderbolt/tb_cfg.c
new file mode 100644
index 0000000..bb958d5
--- /dev/null
+++ b/drivers/thunderbolt/tb_cfg.c
@@ -0,0 +1,649 @@
+/*
+ * Thunderbolt configuration channel
+ *
+ * Copyright (c) 2013 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/dmapool.h>
+#include <linux/workqueue.h>
+
+#include "tb_cfg.h"
+#include "dsl3510.h"
+
+
+/* config packet definitions */
+
+enum tb_cfg_pkg_type {
+ TB_CFG_PKG_READ = 1,
+ TB_CFG_PKG_WRITE = 2,
+ TB_CFG_PKG_ERROR = 3,
+ TB_CFG_PKG_NOTIFY_ACK = 4,
+ TB_CFG_PKG_EVENT = 5,
+ TB_CFG_PKG_XDOMAIN_REQ = 6,
+ TB_CFG_PKG_XDOMAIN_RESP = 7,
+ TB_CFG_PKG_OVERRIDE = 8,
+ TB_CFG_PKG_RESET = 9,
+ TB_CFG_PKG_PREPARE_TO_SLEEP = 0xd,
+};
+
+enum tb_cfg_error {
+ TB_CFG_ERROR_PORT_NOT_CONNECTED = 0,
+ TB_CFG_ERROR_INVALID_CONFIG_SPACE = 2,
+ TB_CFG_ERROR_NO_SUCH_PORT = 4,
+ TB_CFG_ERROR_ACK_PLUG_EVENT = 7,
+ TB_CFG_ERROR_LOOP = 8,
+};
+
+struct tb_cfg_header {
+ u32 route_hi:18;
+ u32 unknown:14; /* highest order bit is set on replies */
+ u32 route_lo;
+} __packed;
+
+struct tb_cfg_address {
+ u32 offset:13; /* in dwords */
+ u32 length:6; /* in dwords */
+ u32 port:6;
+ enum tb_cfg_space space:2;
+ u32 zero:5;
+} __packed;
+
+struct cfg_reset_pkg {
+ struct tb_cfg_header header;
+} __packed;
+
+struct cfg_error_pkg {
+ struct tb_cfg_header header;
+ enum tb_cfg_error error:4;
+ u32 zero1:4;
+ u32 port:6;
+ u32 zero2:2; /* Both should be zero, still they are different fields. */
+ u32 zero3:16;
+} __packed;
+
+struct cfg_plug_pkg {
+ struct tb_cfg_header header;
+ u32 port:6;
+ u32 zero:25;
+ bool unplug:1;
+} __packed;
+
+struct cfg_read_pkg {
+ struct tb_cfg_header header;
+ struct tb_cfg_address addr;
+} __packed;
+
+struct cfg_write_pkg {
+ struct tb_cfg_header header;
+ struct tb_cfg_address addr;
+ u32 data[64]; /* tb_cfg_address.length has 6 bits */
+} __packed;
+
+/* helper methods */
+
+static u64 get_route(struct tb_cfg_header header)
+{
+ return (u64) header.route_hi << 32 | header.route_lo;
+}
+
+static struct tb_cfg_header make_header(u64 route)
+{
+ struct tb_cfg_header header = {
+ .route_hi = route >> 32,
+ .route_lo = route,
+ };
+ /* check for overflow */
+ WARN_ON(get_route(header) != route);
+ return header;
+}
+
+static int decode_error(struct ring_packet *response)
+{
+ struct cfg_error_pkg *pkg = response->buffer;
+ u32 raw;
+ WARN_ON(response->eof != TB_CFG_PKG_ERROR);
+ if (WARN_ON(response->size != sizeof(*pkg)))
+ return -EIO;
+ raw = *(u32 *) (response->buffer + 8);
+ WARN_ON(pkg->zero1);
+ WARN_ON(pkg->zero2);
+ WARN_ON(pkg->zero3);
+ WARN_ON(pkg->header.unknown != 1 << 13);
+ switch (pkg->error) {
+ case TB_CFG_ERROR_PORT_NOT_CONNECTED:
+ /* Port is not connected. This can happen during surprise
+ * removal. Do not warn. */
+ return -ENODEV;
+ case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
+ /*
+ * Invalid cfg_space/offset/length combination in
+ * cfg_read/cfg_write.
+ */
+ WARN(1,
+ "CFG_ERROR(raw: %#x route: %#llx port: %d): Invalid config space of offset\n",
+ raw,
+ get_route(pkg->header),
+ pkg->port);
+ return -ENXIO;
+ case TB_CFG_ERROR_NO_SUCH_PORT:
+ /*
+ * - The route contains a non-existent port.
+ * - The route contains a non-PHY port (e.g. PCIe).
+ * - The port in cfg_read/cfg_write does not exist.
+ */
+ WARN(1,
+ "CFG_ERROR(raw: %#x route: %#llx port: %d): Invalid port\n",
+ raw,
+ get_route(pkg->header),
+ pkg->port);
+ return -ENXIO;
+ case TB_CFG_ERROR_LOOP:
+ WARN(1,
+ "CFG_ERROR(raw: %#x route: %#llx port: %d): Route contains a loop\n",
+ raw,
+ get_route(pkg->header),
+ pkg->port);
+ return -EIO;
+ default:
+ /* 5,6,7,9 and 11 are also valid error codes */
+ WARN(1,
+ "CFG_ERROR(raw: %#x route: %#llx port: %d): Unknown error\n",
+ raw,
+ get_route(pkg->header),
+ pkg->port);
+ return -EIO;
+ }
+}
+
+static int check_header(struct ring_packet *pkg, size_t expected_len,
+ u64 expected_route, enum tb_cfg_pkg_type expected_type)
+{
+ struct tb_cfg_header *header = pkg->buffer;
+ if (pkg->eof == TB_CFG_PKG_ERROR)
+ return decode_error(pkg);
+ if (WARN_ON(pkg->size != expected_len))
+ return -EIO;
+ if (WARN_ON(header->unknown != 1 << 13))
+ return -EIO;
+ if (WARN_ON(get_route(*header) != expected_route))
+ return -EIO;
+ if (WARN_ON(pkg->eof != expected_type))
+ return -EIO;
+ if (WARN_ON(pkg->sof != 0))
+ return -EIO;
+ return 0;
+}
+
+static bool check_config_address(const struct tb_cfg_address *addr,
+ enum tb_cfg_space space, uint32_t offset,
+ uint32_t length)
+{
+ if (WARN_ON(addr->zero))
+ return -EIO;
+ if (WARN_ON(addr->space != space))
+ return -EIO;
+ if (WARN_ON(addr->offset != offset))
+ return -EIO;
+ if (WARN_ON(addr->length != length))
+ return -EIO;
+ /*
+ * We cannot check addr->port as it is set to the upstream port of the
+ * sender.
+ */
+ return 0;
+}
+
+static void cpu_to_be32_array(__be32 *dst, u32 *src, size_t len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ dst[i] = cpu_to_be32(src[i]);
+}
+
+static void be32_to_cpu_array(u32 *dst, __be32 *src, size_t len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ dst[i] = be32_to_cpu(src[i]);
+}
+
+static __be32 tb_crc(void *data, size_t len)
+{
+ return cpu_to_be32(~__crc32c_le(~0, data, len));
+}
+
+/* RX/TX handling */
+
+/**
+ * tb_ctl_handle_plug_event() - acknowledge a plug event and invoke cfg->callback
+ */
+static void tb_ctl_handle_plug_event(struct tb_cfg *cfg,
+ struct ring_packet *resp)
+{
+ struct cfg_plug_pkg *pkg = resp->buffer;
+ u64 route = get_route(pkg->header);
+ if (check_header(resp, sizeof(*pkg), route, TB_CFG_PKG_EVENT))
+ return;
+ if (tb_cfg_error(cfg, route, pkg->port))
+ tb_cfg_warn(cfg,
+ "could not reset plug event on %llx:%x\n",
+ route,
+ pkg->port);
+ WARN_ON(pkg->zero);
+ cfg->callback(cfg->callback_data, route, pkg->port, pkg->unplug);
+}
+
+static void tb_ctl_free_packet(struct tb_cfg *cfg, struct ring_packet *pkg)
+{
+ dma_pool_free(cfg->packet_pool, pkg->buffer, pkg->buffer_phy);
+ kfree(pkg);
+}
+
+/**
+ * tb_ctl_alloc_packets() - allocate multiple packets safely
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_ctl_alloc_packets(struct tb_cfg *cfg,
+ struct ring_packet **packets, int num)
+{
+ int i;
+ for (i = 0; i < num; i++) {
+ packets[i] = kzalloc(sizeof(*packets[i]), GFP_KERNEL);
+ if (!packets[i])
+ goto err;
+ packets[i]->buffer = dma_pool_alloc(cfg->packet_pool,
+ GFP_KERNEL,
+ &packets[i]->buffer_phy);
+ if (!packets[i]->buffer)
+ goto err;
+ }
+ return 0;
+err:
+ if (!packets[i])
+ i--;
+ while (--i > 0) {
+ if (packets[i]->buffer)
+ dma_pool_free(cfg->packet_pool,
+ packets[i]->buffer,
+ packets[i]->buffer_phy);
+
+ kfree(packets[i]);
+ }
+ return -ENOMEM;
+}
+
+/**
+ * tb_cfg_tx() - transmit a packet on the config channel
+ *
+ * len must be a multiple of four.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_cfg_tx(struct tb_cfg *cfg, void *data, size_t len,
+ enum tb_cfg_pkg_type type)
+{
+ struct ring_packet *pkg;
+ if (len % 4 != 0) { /* required for le->be conversion */
+ tb_cfg_WARN(cfg, "TX: invalid size: %zu\n", len);
+ return -EINVAL;
+ }
+ if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
+ tb_cfg_WARN(cfg,
+ "TX: packet too large: %zu/%d\n",
+ len,
+ TB_FRAME_SIZE - 4);
+ return -EINVAL;
+ }
+ if (tb_ctl_alloc_packets(cfg, &pkg, 1))
+ return -ENOMEM;
+ cpu_to_be32_array(pkg->buffer, data, len / 4);
+ *(u32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
+ pkg->size = len + 4;
+ pkg->sof = type;
+ pkg->eof = type;
+ ring_tx(cfg->tx, pkg);
+ return 0;
+}
+
+static void tb_cfg_tx_callback(struct tb_ring *ring, void *priv)
+{
+ struct tb_cfg *cfg = priv;
+ struct ring_packet *pkg;
+ while ((pkg = ring_poll(ring)))
+ tb_ctl_free_packet(cfg, pkg);
+}
+
+/**
+ * tb_cfg_rx() - receive a packet from the config channel
+ *
+ * Will timeout after a few seconds. The caller should requeue the packet as
+ * soon as possible.
+ *
+ * Return: Returns a a received ring_packet or NULL.
+ */
+static struct ring_packet *tb_cfg_rx(struct tb_cfg *cfg)
+{
+ struct ring_packet *pkg;
+ /**
+ * Most operations finish within a few milliseconds. But pci up/down
+ * ports will block during activation/deactivation. In the future we might
+ * have to implement variable timeouts or find a way to handle received
+ * packets out of order. Until then we just use a high timeout.
+ */
+ if (!wait_for_completion_timeout(&cfg->response_ready,
+ msecs_to_jiffies(30000))) {
+ tb_cfg_WARN(cfg, "RX: timeout\n");
+ return NULL;
+ }
+
+ if (!kfifo_get(&cfg->response_fifo, &pkg)) {
+ tb_cfg_err(cfg,
+ "RX: completion was signaled, but fifo was empty\n");
+ return NULL;
+ }
+ return pkg;
+}
+
+static void tb_cfg_rx_callback(struct tb_ring *ring, void *priv)
+{
+ struct tb_cfg *cfg = priv;
+ struct ring_packet *pkg;
+ while ((pkg = ring_poll(ring))) {
+ if (pkg->canceled) {
+ tb_ctl_free_packet(cfg, pkg);
+ continue;
+ }
+ if (pkg->size < 4 || pkg->size % 4 != 0) {
+ tb_cfg_err(cfg,
+ "RX: invalid size %d, dropping packet\n",
+ pkg->size);
+ goto rx;
+ }
+
+ pkg->size -= 4; /* remove checksum */
+ if (*(u32 *) (pkg->buffer + pkg->size)
+ != tb_crc(pkg->buffer, pkg->size)) {
+ tb_cfg_err(cfg,
+ "RX: checksum mismatch, dropping packet\n");
+ goto rx;
+ }
+ be32_to_cpu_array(pkg->buffer, pkg->buffer, pkg->size / 4);
+
+ if (pkg->eof == TB_CFG_PKG_EVENT) {
+ tb_ctl_handle_plug_event(cfg, pkg);
+ goto rx;
+ }
+ if (!kfifo_put(&cfg->response_fifo, pkg)) {
+ tb_cfg_err(cfg, "RX: fifo is full\n");
+ goto rx;
+ }
+ complete(&cfg->response_ready);
+ continue;
+rx:
+ ring_rx(cfg->rx, pkg);
+ }
+}
+
+
+/* public interface */
+
+/**
+ * tb_cfg_alloc() - allocate a config channel
+ *
+ * cb will be invoked once for every hot plug event.
+ *
+ * Return: Returns a pointer on success or NULL on failure.
+ */
+struct tb_cfg *tb_cfg_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data)
+{
+ int i;
+ struct tb_cfg *cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ struct ring_packet *rx_packets[10] = { };
+ if (!cfg)
+ return NULL;
+ cfg->nhi = nhi;
+ cfg->callback = cb;
+ cfg->callback_data = cb_data;
+
+ init_completion(&cfg->response_ready);
+ INIT_KFIFO(cfg->response_fifo);
+ cfg->packet_pool = dma_pool_create("dls3510 cfg", &nhi->pdev->dev,
+ TB_FRAME_SIZE, 4, 0);
+ if (!cfg->packet_pool)
+ goto err;
+
+ /* tb_ctl_handle_plug_event uses cfg->tx. Allocate cfg->tx first. */
+ cfg->tx = ring_alloc_tx(nhi, 0, 10, tb_cfg_tx_callback, cfg);
+ if (!cfg->tx)
+ goto err;
+
+ cfg->rx = ring_alloc_rx(nhi, 0, 10, tb_cfg_rx_callback, cfg);
+ if (!cfg->rx)
+ goto err;
+
+ /*
+ * After the first call to ring_rx we will start to receive plug
+ * notifications and call cfg->callback. After this we can no longer
+ * bail out cleanly. To avoid this situation we allocate all packets
+ * BEFORE calling ring_rx.
+ */
+ if (tb_ctl_alloc_packets(cfg, rx_packets, 10))
+ goto err;
+ for (i = 0; i < 10; i++)
+ ring_rx(cfg->rx, rx_packets[i]);
+
+ tb_cfg_info(cfg, "config channel created\n");
+ return cfg;
+err:
+ tb_cfg_free(cfg);
+ return NULL;
+}
+
+/**
+ * tb_cfg_free() - shutdown and free a config channel
+ *
+ * All calls to cfg->callback will have finished on return.
+ *
+ * Must NOT be called from cfg->callback.
+ */
+void tb_cfg_free(struct tb_cfg *cfg)
+{
+ struct ring_packet *pkg;
+ /*
+ * cfg->tx is used in tb_ctl_handle_plug_event. Free cfg->tx
+ * after cfg->rx.
+ */
+ if (cfg->rx)
+ ring_drain_and_free(cfg->rx);
+ if (cfg->tx)
+ ring_drain_and_free(cfg->tx);
+ while (kfifo_get(&cfg->response_fifo, &pkg))
+ tb_ctl_free_packet(cfg, pkg);
+ if (cfg->packet_pool)
+ dma_pool_destroy(cfg->packet_pool);
+ kfree(cfg);
+}
+
+/**
+ * tb_cfg_error() - send error packet
+ *
+ * Currently the error code is hardcoded to TB_CFG_ERROR_ACK_PLUG_EVENT.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_cfg_error(struct tb_cfg *cfg, u64 route, u32 port)
+{
+ struct cfg_error_pkg pkg = {
+ .header = make_header(route),
+ .port = port,
+ .error = TB_CFG_ERROR_ACK_PLUG_EVENT,
+ };
+ tb_cfg_info(cfg, "resetting error on %llx:%x.\n", route, port);
+ return tb_cfg_tx(cfg, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
+}
+
+/**
+ * tb_cfg_reset() - send an reset packet
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_cfg_reset(struct tb_cfg *cfg, u64 route)
+{
+ struct cfg_reset_pkg pkg = { .header = make_header(route) };
+ struct ring_packet *response;
+ int res = tb_cfg_tx(cfg, &pkg, sizeof(pkg), TB_CFG_PKG_RESET);
+ if (res)
+ return res;
+
+ response = tb_cfg_rx(cfg);
+ if (!response)
+ return -EIO;
+
+ res = check_header(response, 8, route, TB_CFG_PKG_RESET);
+ ring_rx(cfg->rx, response);
+ return res;
+}
+
+/**
+ * tb_cfg_read() - read from config space into buffer
+ *
+ * offset and length are in dwords.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_cfg_read(struct tb_cfg *cfg, void *buffer, uint64_t route, uint32_t port,
+ enum tb_cfg_space space, uint32_t offset, uint32_t length)
+{
+ struct cfg_read_pkg pkg = {
+ .header = make_header(route),
+ .addr = {
+ .port = port,
+ .space = space,
+ .offset = offset,
+ .length = length,
+ },
+ };
+ struct ring_packet *response;
+ struct cfg_write_pkg *reply;
+ int res;
+
+ res = tb_cfg_tx(cfg, &pkg, sizeof(pkg), TB_CFG_PKG_READ);
+ if (res)
+ return res;
+
+ response = tb_cfg_rx(cfg);
+ if (!response)
+ return -EIO;
+
+ res = check_header(response, 12 + 4 * length, route, TB_CFG_PKG_READ);
+ if (res)
+ goto out;
+
+ reply = response->buffer;
+ res = check_config_address(&reply->addr, space, offset, length);
+ if (res)
+ goto out;
+
+ memcpy(buffer, &reply->data, 4 * length);
+out:
+ ring_rx(cfg->rx, response);
+ return res;
+}
+
+/**
+ * tb_cfg_write() - write from buffer into config space
+ *
+ * offset and length are in dwords.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_cfg_write(struct tb_cfg *cfg, void *buffer, uint64_t route,
+ uint32_t port, enum tb_cfg_space space, uint32_t offset,
+ uint32_t length)
+{
+ struct cfg_write_pkg pkg = {
+ .header = make_header(route),
+ .addr = {
+ .port = port,
+ .space = space,
+ .offset = offset,
+ .length = length,
+ },
+ };
+ struct ring_packet *response;
+ struct cfg_read_pkg *reply;
+ int res;
+
+ memcpy(&pkg.data, buffer, length * 4);
+
+ res = tb_cfg_tx(cfg, &pkg, 12 + 4 * length, TB_CFG_PKG_WRITE);
+ if (res)
+ return res;
+
+ response = tb_cfg_rx(cfg);
+ if (!response)
+ return -EIO;
+
+ res = check_header(response, sizeof(*reply), route, TB_CFG_PKG_WRITE);
+ if (res)
+ goto out;
+
+ reply = response->buffer;
+ res = check_config_address(&reply->addr, space, offset, length);
+out:
+ ring_rx(cfg->rx, response);
+ return res;
+}
+
+/**
+ * tb_cfg_get_upstream_port() - get upstream port number of switch at route
+ *
+ * Reads the first dword from the switches TB_CFG_SWITCH config area and
+ * returns the port number from which the reply originated.
+ *
+ * This information is in principle present in every read/write response. We
+ * expose it only here in order to keep the normal API streamlined.
+ *
+ * Return: Returns the upstream port number on success or an error code on
+ * failure.
+ */
+int tb_cfg_get_upstream_port(struct tb_cfg *cfg, u64 route)
+{
+ struct cfg_read_pkg pkg = {
+ .header = make_header(route),
+ .addr = {
+ .port = 0,
+ .space = TB_CFG_SWITCH,
+ .offset = 0,
+ .length = 1,
+ },
+ };
+ struct ring_packet *response;
+ struct cfg_write_pkg *reply;
+ int res;
+
+ res = tb_cfg_tx(cfg, &pkg, sizeof(pkg), TB_CFG_PKG_READ);
+ if (res)
+ return res;
+
+ response = tb_cfg_rx(cfg);
+ if (!response)
+ return -EIO;
+
+ res = check_header(response, 12 + 4, route, TB_CFG_PKG_READ);
+ if (res)
+ goto out;
+
+ reply = response->buffer;
+ res = check_config_address(&reply->addr, TB_CFG_SWITCH, 0, 1);
+ if (res)
+ goto out;
+ res = reply->addr.port;
+out:
+ ring_rx(cfg->rx, response);
+ return res;
+}
diff --git a/drivers/thunderbolt/tb_cfg.h b/drivers/thunderbolt/tb_cfg.h
new file mode 100644
index 0000000..fa3a950
--- /dev/null
+++ b/drivers/thunderbolt/tb_cfg.h
@@ -0,0 +1,67 @@
+/*
+ * Thunderbolt configuration channel
+ *
+ * Copyright (c) 2013 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef _TB_CFG
+#define _TB_CFG
+
+#include <linux/types.h>
+#include <linux/kfifo.h>
+#include <linux/dmapool.h>
+
+#include "dsl3510.h"
+
+typedef void (*hotplug_cb)(void *data, u64 route, u8 port, bool unplug);
+
+/**
+ * struct tb_cfg - thunderbolt configuration channel
+ */
+struct tb_cfg {
+ struct tb_nhi *nhi;
+ struct tb_ring *tx;
+ struct tb_ring *rx;
+
+ struct dma_pool *packet_pool;
+
+ DECLARE_KFIFO(response_fifo, struct ring_packet*, 16);
+
+ struct completion response_ready;
+
+ hotplug_cb callback;
+ void *callback_data;
+};
+
+#define tb_cfg_WARN(cfg, format, arg...) \
+ dev_WARN(&(cfg)->nhi->pdev->dev, format, ## arg)
+
+#define tb_cfg_err(cfg, format, arg...) \
+ dev_err(&(cfg)->nhi->pdev->dev, format, ## arg)
+
+#define tb_cfg_warn(cfg, format, arg...) \
+ dev_warn(&(cfg)->nhi->pdev->dev, format, ## arg)
+
+#define tb_cfg_info(cfg, format, arg...) \
+ dev_info(&(cfg)->nhi->pdev->dev, format, ## arg)
+
+struct tb_cfg *tb_cfg_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data);
+void tb_cfg_free(struct tb_cfg *cfg);
+
+enum tb_cfg_space {
+ TB_CFG_HOPS = 0,
+ TB_CFG_PORT = 1,
+ TB_CFG_SWITCH = 2,
+ TB_CFG_COUNTERS = 3,
+};
+
+int tb_cfg_error(struct tb_cfg *cfg, u64 route, u32 port);
+int tb_cfg_reset(struct tb_cfg *cfg, u64 route);
+int tb_cfg_read(struct tb_cfg *cfg, void *buffer, uint64_t route, uint32_t port,
+ enum tb_cfg_space space, uint32_t offset, uint32_t length);
+int tb_cfg_write(struct tb_cfg *cfg, void *buffer, uint64_t route,
+ uint32_t port, enum tb_cfg_space space, uint32_t offset,
+ uint32_t length);
+int tb_cfg_get_upstream_port(struct tb_cfg *cfg, u64 route);
+
+#endif
--
1.8.4.2
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 03/12] thunderbolt: Setup configuration channel
2013-11-29 1:35 [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed) Andreas Noever
2013-11-29 1:35 ` [PATCH 01/12] thunderbolt: Add initial cactus ridge NHI support Andreas Noever
2013-11-29 1:35 ` [PATCH 02/12] thunderbolt: Add configuration channel interface Andreas Noever
@ 2013-11-29 1:35 ` Andreas Noever
2013-11-29 1:35 ` [PATCH 04/12] thunderbolt: Add tb_regs.h Andreas Noever
` (9 subsequent siblings)
12 siblings, 0 replies; 23+ messages in thread
From: Andreas Noever @ 2013-11-29 1:35 UTC (permalink / raw)
To: linux-kernel; +Cc: Andreas Noever
Add struct tb which will contain our view of the thunderbolt bus. For
now it just contains a pointer to the configuration channel and a
workqueue for hotplug events.
Add thunderbolt_alloc_and_start() and thunderbolt_shutdown_and_free()
which are responsible for setup and teardown of struct tb.
Signed-off-by: Andreas Noever <andreas.noever@gmail.com>
---
drivers/thunderbolt/Makefile | 2 +-
drivers/thunderbolt/dsl3510.c | 18 +++++-
drivers/thunderbolt/tb.c | 124 ++++++++++++++++++++++++++++++++++++++++++
drivers/thunderbolt/tb.h | 34 ++++++++++++
4 files changed, 175 insertions(+), 3 deletions(-)
create mode 100644 drivers/thunderbolt/tb.c
create mode 100644 drivers/thunderbolt/tb.h
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index f486295..7c5b811 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,3 +1,3 @@
obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
-thunderbolt-objs := dsl3510.o tb_cfg.o
+thunderbolt-objs := dsl3510.o tb_cfg.o tb.o
diff --git a/drivers/thunderbolt/dsl3510.c b/drivers/thunderbolt/dsl3510.c
index 2a326f6..434812b 100644
--- a/drivers/thunderbolt/dsl3510.c
+++ b/drivers/thunderbolt/dsl3510.c
@@ -12,6 +12,7 @@
#include "dsl3510.h"
#include "dsl3510_regs.h"
+#include "tb.h"
#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
@@ -481,6 +482,7 @@ static void dsl3510_shutdown(struct tb_nhi *nhi)
static int dsl3510_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct tb_nhi *nhi;
+ struct tb *tb;
int res;
res = pcim_enable_device(pdev);
@@ -543,14 +545,26 @@ static int dsl3510_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* magic value - clock related? */
iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
- pci_set_drvdata(pdev, nhi); /* for dsl3510_remove only */
+ dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
+ tb = thunderbolt_alloc_and_start(nhi);
+ if (!tb) {
+ /*
+ * At this point the RX/TX rings might already have been
+ * activated. Do a proper shutdown.
+ */
+ dsl3510_shutdown(nhi);
+ return -EIO;
+ }
+ pci_set_drvdata(pdev, tb); /* for dsl3510_remove only */
return 0;
}
static void dsl3510_remove(struct pci_dev *pdev)
{
- struct tb_nhi *nhi = pci_get_drvdata(pdev);
+ struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+ thunderbolt_shutdown_and_free(tb);
dsl3510_shutdown(nhi);
}
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
new file mode 100644
index 0000000..64deb7b
--- /dev/null
+++ b/drivers/thunderbolt/tb.c
@@ -0,0 +1,124 @@
+/*
+ * Device independent Thunderbolt bus logic
+ *
+ * Copyright (c) 2013 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+
+#include "tb.h"
+
+struct tb_hotplug_event {
+ struct work_struct work;
+ struct tb *tb;
+ u64 route;
+ u8 port;
+ bool unplug;
+};
+
+/**
+ * tb_handle_hotplug() - handle hotplug event
+ *
+ * Executes on the tb->wq.
+ */
+static void tb_handle_hotplug(struct work_struct *work)
+{
+ struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
+ struct tb *tb = ev->tb;
+ mutex_lock(&tb->lock);
+ if (tb->shutdown)
+ goto out;
+ /* do nothing for now */
+out:
+ mutex_unlock(&tb->lock);
+ kfree(ev);
+}
+
+/**
+ * tb_schedule_hotplug_handler() - callback function for the config channel
+ *
+ * Delegates to tb_handle_hotplug.
+ */
+static void tb_schedule_hotplug_handler(void *data, u64 route, u8 port,
+ bool unplug)
+{
+ struct tb *tb = data;
+ struct tb_hotplug_event *ev = kmalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ return;
+ INIT_WORK(&ev->work, tb_handle_hotplug);
+ ev->tb = tb;
+ ev->route = route;
+ ev->port = port;
+ ev->unplug = unplug;
+ queue_work(tb->wq, &ev->work);
+}
+
+/**
+ * thunderbolt_shutdown_and_free() - shutdown everything
+ *
+ * Free the config channel.
+ */
+void thunderbolt_shutdown_and_free(struct tb *tb)
+{
+ mutex_lock(&tb->lock);
+ tb->shutdown = true; /* signal tb_handle_hotplug to quit */
+
+ if (tb->cfg)
+ tb_cfg_free(tb->cfg);
+ tb->cfg = NULL;
+
+ /* allow tb_handle_hotplug to acquire the lock */
+ mutex_unlock(&tb->lock);
+ if (tb->wq) {
+ flush_workqueue(tb->wq);
+ destroy_workqueue(tb->wq);
+ tb->wq = NULL;
+ }
+ mutex_destroy(&tb->lock);
+ kfree(tb);
+}
+
+/**
+ * thunderbolt_alloc_and_start() - setup the thunderbolt bus
+ *
+ * Allocates a tb_cfg control channel.
+ *
+ * Return: Returns NULL on error.
+ */
+struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi)
+{
+ struct tb *tb;
+
+ tb = kzalloc(sizeof(*tb), GFP_KERNEL);
+ if (!tb)
+ return NULL;
+
+ tb->nhi = nhi;
+ mutex_init(&tb->lock);
+ mutex_lock(&tb->lock);
+
+ tb->wq = alloc_ordered_workqueue("thunderbolt", 0);
+ if (!tb->wq)
+ goto err_locked;
+
+ /*
+ * tb_schedule_hotplug_handler may be called as soon as the config
+ * channel is allocated. Thats why we have to hold the lock here.
+ */
+ tb->cfg = tb_cfg_alloc(tb->nhi, tb_schedule_hotplug_handler, tb);
+ if (!tb->cfg)
+ goto err_locked;
+
+ mutex_unlock(&tb->lock);
+ return tb;
+
+err_locked:
+ /* In case tb_handle_hotplug is already executing. */
+ tb->shutdown = true;
+ mutex_unlock(&tb->lock);
+ thunderbolt_shutdown_and_free(tb);
+ return NULL;
+}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
new file mode 100644
index 0000000..a3378dc
--- /dev/null
+++ b/drivers/thunderbolt/tb.h
@@ -0,0 +1,34 @@
+/*
+ * Device independent Thunderbolt bus logic
+ *
+ * Copyright (c) 2013 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef TB_H_
+#define TB_H_
+
+#include <linux/pci.h>
+
+#include "tb_cfg.h"
+
+/**
+ * struct tb - main thunderbolt bus structure
+ */
+struct tb {
+ struct mutex lock; /*
+ * Big lock. Must be held when accessing cfg or
+ * any struct tb_switch / struct tb_port.
+ */
+ struct tb_nhi *nhi;
+ struct tb_cfg *cfg;
+ struct workqueue_struct *wq; /* ordered workqueue for plug events */
+ bool shutdown; /*
+ * Once this is set tb_handle_hotplug will exit (once it
+ * can aquire lock at least once). Used to drain wq.
+ */
+};
+
+struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi);
+void thunderbolt_shutdown_and_free(struct tb *tb);
+
+#endif
--
1.8.4.2
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 04/12] thunderbolt: Add tb_regs.h
2013-11-29 1:35 [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed) Andreas Noever
` (2 preceding siblings ...)
2013-11-29 1:35 ` [PATCH 03/12] thunderbolt: Setup configuration channel Andreas Noever
@ 2013-11-29 1:35 ` Andreas Noever
2013-11-29 1:35 ` [PATCH 05/12] thunderbolt: Initialize root switch and ports Andreas Noever
` (8 subsequent siblings)
12 siblings, 0 replies; 23+ messages in thread
From: Andreas Noever @ 2013-11-29 1:35 UTC (permalink / raw)
To: linux-kernel; +Cc: Andreas Noever
Every thunderbolt device consists (logically) of a switch with multiple
ports. Every port contains multiple config regions (HOPS, PORT, SWITCH,
COUNTERS) which are used to configure the device.
The tb_regs.h file contains all known registers and capabilities from these
config regions.
Signed-off-by: Andreas Noever <andreas.noever@gmail.com>
---
drivers/thunderbolt/tb_regs.h | 187 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 187 insertions(+)
create mode 100644 drivers/thunderbolt/tb_regs.h
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
new file mode 100644
index 0000000..bd3ad67
--- /dev/null
+++ b/drivers/thunderbolt/tb_regs.h
@@ -0,0 +1,187 @@
+/*
+ * Thunderbolt Port/Switch config area registers
+ *
+ * Copyright (c) 2013 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef _TB_REGS
+#define _TB_REGS
+
+#include <linux/types.h>
+
+/*
+ * TODO: should be 63? But we do not know how to receive frames larger than 256
+ * bytes. (header + checksum = 16, 60*4 = 240)
+ */
+#define TB_MAX_CONFIG_RW_LENGTH 60
+
+enum tb_cap {
+ TB_CAP_PHY = 0x0001,
+ TB_CAP_TIME1 = 0x0003,
+ TB_CAP_PCIE = 0x0004,
+ TB_CAP_I2C = 0x0005,
+ TB_CAP_PLUG_EVENTS = 0x0105, /* also EEPROM */
+ TB_CAP_TIME2 = 0x0305,
+ TB_CAL_IECS = 0x0405,
+ TB_CAP_LINK_CONTROLLER = 0x0605, /* also IECS */
+};
+
+enum tb_port_state {
+ TB_PORT_DISABLED = 0, /* tb_cap_phy.disable == 1 */
+ TB_PORT_CONNECTING = 1, /* retry */
+ TB_PORT_UP = 2,
+ TB_PORT_UNPLUGGED = 7,
+};
+
+/* capability headers */
+
+struct tb_cap_basic {
+ u8 next;
+ /* enum tb_cap cap:8; prevent "narrower than values of its type" */
+ u8 cap; /* if cap == 0x05 then we have a extended capability */
+} __packed;
+
+struct tb_cap_extended_short {
+ u8 next; /* if next and length are zero then we have a long cap */
+ enum tb_cap cap:16;
+ u8 length;
+} __packed;
+
+struct tb_cap_extended_long {
+ u8 zero1;
+ enum tb_cap cap:16;
+ u8 zero2;
+ u16 next;
+ u16 length;
+} __packed;
+
+/* capabilities */
+
+struct tb_cap_link_controller {
+ struct tb_cap_extended_long cap_header;
+ u32 count:4; /* number of link controllers */
+ u32 unknown1:4;
+ u32 base_offset:8; /*
+ * offset (into this capability) of the configuration
+ * area of the first link controller
+ */
+ u32 length:12; /* link controller configuration area length */
+ u32 unknown2:4; /* TODO check that length is correct */
+} __packed;
+
+struct tb_cap_phy {
+ struct tb_cap_basic cap_header;
+ u32 unknown1:16;
+ u32 unknown2:14;
+ bool disable:1;
+ u32 unknown3:11;
+ enum tb_port_state state:4;
+ u32 unknown4:2;
+} __packed;
+
+struct tb_cap_plug_events {
+ struct tb_cap_extended_short cap_header;
+ u32 __unknown1:2;
+ u32 plug_events:5;
+ u32 __unknown2:25;
+} __packed;
+
+/* device headers */
+
+/* Present on port 0 in TB_CFG_SWITCH at address zero. */
+struct tb_regs_switch_header {
+ /* DWORD 0 */
+ u16 vendor_id;
+ u16 device_id;
+ /* DWORD 1 */
+ u32 first_cap_offset:8;
+ u32 upstream_port_number:6;
+ u32 max_port_number:6;
+ u32 depth:3;
+ u32 __unknown1:1;
+ u32 revision:8;
+ /* DWORD 2 */
+ u32 route_lo;
+ /* DWORD 3 */
+ u32 route_hi:31;
+ bool enabled:1;
+ /* DWORD 4 */
+ u32 plug_events_delay:8; /*
+ * RW, pause between plug events in
+ * milliseconds. Writing 0x00 is interpreted
+ * as 255ms.
+ */
+ u32 __unknown4:16;
+ u32 thunderbolt_version:8;
+} __packed;
+
+enum tb_port_type {
+ TB_TYPE_INACTIVE = 0x000000,
+ TB_TYPE_PORT = 0x000001,
+ TB_TYPE_NHI = 0x000002,
+ /* TB_TYPE_ETHERNET = 0x020000, lower order bits are not known */
+ /* TB_TYPE_SATA = 0x080000, lower order bits are not known */
+ TB_TYPE_DP_HDMI_IN = 0x0e0101,
+ TB_TYPE_DP_HDMI_OUT = 0x0e0102,
+ TB_TYPE_PCIE_DOWN = 0x100101,
+ TB_TYPE_PCIE_UP = 0x100102,
+ /* TB_TYPE_USB = 0x200000, lower order bits are not known */
+};
+
+/* Present on every port in TB_CF_PORT at address zero. */
+struct tb_regs_port_header {
+ /* DWORD 0 */
+ u16 vendor_id;
+ u16 device_id;
+ /* DWORD 1 */
+ u32 first_cap_offset:8;
+ u32 __unknown1:16;
+ u32 revision:8;
+ /* DWORD 2 */
+ enum tb_port_type type:24;
+ u32 thunderbolt_version:8;
+ /* DWORD 3 */
+ u32 __unknown2:20;
+ u32 port_number:6;
+ u32 __unknown3:6;
+ /* DWORD 4 */
+ u32 nfc_credits;
+ /* DWORD 5 */
+ u32 max_in_hop_id:11;
+ u32 max_out_hop_id:11;
+ u32 __unkown4:10;
+ /* DWORD 6 */
+ u32 __unknown5;
+ /* DWORD 7 */
+ u32 __unknown6;
+
+} __packed;
+
+/* Hop register from TB_CFG_HOPS. 8 byte per entry. */
+struct tb_regs_hop {
+ /* DWORD 0 */
+ u32 next_hop:11; /*
+ * hop to take after sending the packet through
+ * out_port (on the incoming port of the next switch)
+ */
+ u32 out_port:6; /* next port of the path (on the same switch) */
+ u32 initial_credits:8;
+ u32 unknown1:6; /* set to zero */
+ bool enable:1;
+
+ /* DWORD 1 */
+ u32 weight:4;
+ u32 unknown2:4; /* set to zero */
+ u32 priority:3;
+ bool drop_packages:1;
+ u32 counter:11; /* index into TB_CFG_COUNTERS on this port */
+ bool counter_enable:1;
+ bool ingress_fc:1;
+ bool egress_fc:1;
+ bool ingress_shared_buffer:1;
+ bool egress_shared_buffer:1;
+ u32 unknown3:4; /* set to zero */
+} __packed;
+
+
+#endif
--
1.8.4.2
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 05/12] thunderbolt: Initialize root switch and ports
2013-11-29 1:35 [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed) Andreas Noever
` (3 preceding siblings ...)
2013-11-29 1:35 ` [PATCH 04/12] thunderbolt: Add tb_regs.h Andreas Noever
@ 2013-11-29 1:35 ` Andreas Noever
2013-11-29 1:35 ` [PATCH 06/12] thunderbolt: Add thunderbolt capability handling Andreas Noever
` (7 subsequent siblings)
12 siblings, 0 replies; 23+ messages in thread
From: Andreas Noever @ 2013-11-29 1:35 UTC (permalink / raw)
To: linux-kernel; +Cc: Andreas Noever
This patch adds the structures tb_switch and tb_port as well as code to
reset and configure the root switch.
Signed-off-by: Andreas Noever <andreas.noever@gmail.com>
---
drivers/thunderbolt/tb.c | 229 ++++++++++++++++++++++++++++++++++++++++++++++-
drivers/thunderbolt/tb.h | 113 +++++++++++++++++++++++
2 files changed, 340 insertions(+), 2 deletions(-)
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 64deb7b..d2df3be 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -8,7 +8,217 @@
#include <linux/errno.h>
#include <linux/delay.h>
+#include "dsl3510.h"
#include "tb.h"
+#include "tb_regs.h"
+
+/* utility functions */
+
+static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
+{
+ tb_info(tb,
+ " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
+ sw->vendor_id,
+ sw->device_id,
+ sw->revision,
+ sw->thunderbolt_version);
+ tb_info(tb, " Max Port Number: %d\n", sw->max_port_number);
+ tb_info(tb, " Config:\n");
+ tb_info(tb,
+ " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
+ sw->upstream_port_number,
+ sw->depth,
+ (((u64) sw->route_hi) << 32) | sw->route_lo,
+ sw->enabled,
+ sw->plug_events_delay);
+ tb_info(tb,
+ " unknown1: %#x unknown4: %#x\n",
+ sw->__unknown1,
+ sw->__unknown4);
+}
+
+static char *tb_port_type(struct tb_regs_port_header *port)
+{
+ switch (port->type >> 16) {
+ case 0:
+ switch ((u8) port->type) {
+ case 0:
+ return "Inactive";
+ case 1:
+ return "Port";
+ case 2:
+ return "NHI";
+ default:
+ return "unknown";
+ }
+ case 0x2:
+ return "Ethernet";
+ case 0x8:
+ return "SATA";
+ case 0xe:
+ return "DP/HDMI";
+ case 0x10:
+ return "PCIe";
+ case 0x20:
+ return "USB";
+ default:
+ return "unknown";
+ }
+}
+
+static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
+{
+ tb_info(tb,
+ " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
+ port->port_number,
+ port->vendor_id,
+ port->device_id,
+ port->revision,
+ port->thunderbolt_version,
+ tb_port_type(port),
+ port->type);
+ tb_info(tb,
+ " Max hop id (in/out): %d/%d, NFC Credits: %#x\n",
+ port->max_in_hop_id,
+ port->max_out_hop_id,
+ port->nfc_credits);
+}
+
+static int tb_route_length(u64 route)
+{
+ return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT;
+}
+
+/* switch/port allocation & initialization */
+
+/**
+ * tb_init_port() - initialize a port
+ *
+ * This is a helper method for tb_switch_alloc. Does not check or initialize
+ * any downstream switches.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_init_port(struct tb_switch *sw, u8 port_nr)
+{
+ int res;
+ struct tb_port *port = &sw->ports[port_nr];
+ port->sw = sw;
+ port->port = port_nr;
+ res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
+ if (res)
+ return res;
+
+ tb_dump_port(sw->tb, &port->config);
+
+ /* TODO: Read dual link port, DP port and more from EEPROM. */
+ return 0;
+
+}
+
+/**
+ * tb_switch_free() - free a tb_switch and all downstream switches
+ */
+static void tb_switch_free(struct tb_switch *sw)
+{
+ kfree(sw->ports);
+ kfree(sw);
+}
+
+/**
+ * tb_switch_alloc() - allocate and initialize a switch
+ *
+ * Return: Returns a NULL on failure.
+ */
+static struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
+{
+ int i;
+ struct tb_switch *sw;
+ int upstream_port = tb_cfg_get_upstream_port(tb->cfg, route);
+ if (upstream_port < 0)
+ return NULL;
+
+ sw = kzalloc(sizeof(*sw), GFP_KERNEL);
+ if (!sw)
+ return NULL;
+
+ sw->tb = tb;
+ if (tb_cfg_read(tb->cfg, &sw->config, route, 0, 2, 0, 5))
+ goto err;
+ tb_info(tb,
+ "initializing Switch at %#llx (depth: %d, up port: %d)\n",
+ route,
+ tb_route_length(route),
+ upstream_port);
+ tb_info(tb, "old switch config:\n");
+ tb_dump_switch(tb, &sw->config);
+
+ /* configure switch */
+ sw->config.upstream_port_number = upstream_port;
+ sw->config.depth = tb_route_length(route);
+ sw->config.route_lo = route;
+ sw->config.route_hi = route >> 32;
+ sw->config.enabled = 1;
+ /* from here on we may use the tb_sw_* functions & macros */
+
+ if (sw->config.vendor_id != 0x8086) {
+ tb_sw_WARN(sw,
+ "unsupported switch vendor id %#x, aborting\n",
+ sw->config.vendor_id);
+ goto err;
+ }
+ if (sw->config.device_id != 0x1547 && sw->config.device_id != 0x1549) {
+
+ tb_sw_WARN(sw,
+ "unsupported switch device id %#x, aborting\n",
+ sw->config.device_id);
+ goto err;
+ }
+
+ /* upload configuration */
+ if (tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3))
+ goto err;
+
+ /* initialize ports */
+ sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
+ GFP_KERNEL);
+ if (!sw->ports)
+ goto err;
+
+ for (i = 0; i <= sw->config.max_port_number; i++) {
+ if (tb_init_port(sw, i))
+ goto err;
+ /* TODO: check if port is disabled (EEPROM) */
+ }
+
+ /* TODO: I2C, IECS, EEPROM, link controller */
+
+ return sw;
+err:
+ kfree(sw->ports);
+ kfree(sw);
+ return NULL;
+}
+
+/**
+ * reset_switch() - send TB_CFG_PKG_RESET and enable switch
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_switch_reset(struct tb *tb, u64 route)
+{
+ int res;
+ struct tb_regs_switch_header header = {
+ header.route_hi = route >> 32,
+ header.route_lo = route,
+ header.enabled = true,
+ };
+ tb_info(tb, "resetting switch at %llx\n", route);
+ res = tb_cfg_reset(tb->cfg, route);
+ if (res)
+ return res;
+ return tb_cfg_write(tb->cfg, ((u32 *) &header) + 2, route, 0, 2, 2, 2);
+}
struct tb_hotplug_event {
struct work_struct work;
@@ -59,13 +269,17 @@ static void tb_schedule_hotplug_handler(void *data, u64 route, u8 port,
/**
* thunderbolt_shutdown_and_free() - shutdown everything
*
- * Free the config channel.
+ * Free all switches and the config channel.
*/
void thunderbolt_shutdown_and_free(struct tb *tb)
{
mutex_lock(&tb->lock);
tb->shutdown = true; /* signal tb_handle_hotplug to quit */
+ if (tb->root_switch)
+ tb_switch_free(tb->root_switch);
+ tb->root_switch = NULL;
+
if (tb->cfg)
tb_cfg_free(tb->cfg);
tb->cfg = NULL;
@@ -84,7 +298,7 @@ void thunderbolt_shutdown_and_free(struct tb *tb)
/**
* thunderbolt_alloc_and_start() - setup the thunderbolt bus
*
- * Allocates a tb_cfg control channel.
+ * Allocates a tb_cfg control channel and initializes the root switch.
*
* Return: Returns NULL on error.
*/
@@ -92,6 +306,10 @@ struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi)
{
struct tb *tb;
+ BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
+ BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
+ BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
+
tb = kzalloc(sizeof(*tb), GFP_KERNEL);
if (!tb)
return NULL;
@@ -112,6 +330,13 @@ struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi)
if (!tb->cfg)
goto err_locked;
+ if (tb_switch_reset(tb, 0))
+ goto err_locked;
+
+ tb->root_switch = tb_switch_alloc(tb, 0);
+ if (!tb->root_switch)
+ goto err_locked;
+
mutex_unlock(&tb->lock);
return tb;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index a3378dc..4000f2b 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -9,8 +9,32 @@
#include <linux/pci.h>
+#include "tb_regs.h"
#include "tb_cfg.h"
+#define TB_PORT_MASK 0x3f
+#define TB_ROUTE_SHIFT 8
+
+/**
+ * struct tb_switch - a thunderbolt switch
+ */
+struct tb_switch {
+ struct tb_regs_switch_header config;
+ struct tb_port *ports;
+ struct tb *tb;
+ bool invalid; /* unplugged, will go away */
+};
+
+/**
+ * struct tb_port - a thunderbolt port, part of a tb_switch
+ */
+struct tb_port {
+ struct tb_regs_port_header config;
+ struct tb_switch *sw;
+ u8 port; /* port number on switch */
+ bool invalid; /* unplugged, will go away */
+};
+
/**
* struct tb - main thunderbolt bus structure
*/
@@ -22,12 +46,101 @@ struct tb {
struct tb_nhi *nhi;
struct tb_cfg *cfg;
struct workqueue_struct *wq; /* ordered workqueue for plug events */
+ struct tb_switch *root_switch;
bool shutdown; /*
* Once this is set tb_handle_hotplug will exit (once it
* can aquire lock at least once). Used to drain wq.
*/
};
+static inline u64 tb_route(struct tb_switch *sw)
+{
+ return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
+}
+
+/* helper functions & macros */
+
+static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
+ enum tb_cfg_space space, uint32_t offset,
+ uint32_t length)
+{
+ return tb_cfg_read(sw->tb->cfg,
+ buffer,
+ tb_route(sw),
+ 0,
+ space,
+ offset,
+ length);
+}
+
+static inline int tb_sw_write(struct tb_switch *sw, void *buffer,
+ enum tb_cfg_space space, uint32_t offset,
+ uint32_t length)
+{
+ return tb_cfg_write(sw->tb->cfg,
+ buffer,
+ tb_route(sw),
+ 0,
+ space,
+ offset,
+ length);
+}
+
+static inline int tb_port_read(struct tb_port *port, void *buffer,
+ enum tb_cfg_space space, uint32_t offset,
+ uint32_t length)
+{
+ return tb_cfg_read(port->sw->tb->cfg,
+ buffer,
+ tb_route(port->sw),
+ port->port,
+ space,
+ offset,
+ length);
+}
+
+static inline int tb_port_write(struct tb_port *port, void *buffer,
+ enum tb_cfg_space space, uint32_t offset,
+ uint32_t length)
+{
+ return tb_cfg_write(port->sw->tb->cfg,
+ buffer,
+ tb_route(port->sw),
+ port->port,
+ space,
+ offset,
+ length);
+}
+
+#define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
+#define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
+#define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
+
+
+#define __TB_SW_PRINT(level, sw, fmt, arg...) \
+ do { \
+ struct tb_switch *__sw = (sw); \
+ level(__sw->tb, "%llx: " fmt, \
+ tb_route(__sw), ## arg); \
+ } while (0)
+#define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg)
+#define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg)
+#define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg)
+
+
+#define __TB_PORT_PRINT(level, _port, fmt, arg...) \
+ do { \
+ struct tb_port *__port = (_port); \
+ level(__port->sw->tb, "%llx:%x: " fmt, \
+ tb_route(__port->sw), __port->port, ## arg); \
+ } while (0)
+#define tb_port_WARN(port, fmt, arg...) \
+ __TB_PORT_PRINT(tb_WARN, port, fmt, ##arg)
+#define tb_port_warn(port, fmt, arg...) \
+ __TB_PORT_PRINT(tb_warn, port, fmt, ##arg)
+#define tb_port_info(port, fmt, arg...) \
+ __TB_PORT_PRINT(tb_info, port, fmt, ##arg)
+
struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi);
void thunderbolt_shutdown_and_free(struct tb *tb);
--
1.8.4.2
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 06/12] thunderbolt: Add thunderbolt capability handling
2013-11-29 1:35 [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed) Andreas Noever
` (4 preceding siblings ...)
2013-11-29 1:35 ` [PATCH 05/12] thunderbolt: Initialize root switch and ports Andreas Noever
@ 2013-11-29 1:35 ` Andreas Noever
2013-11-29 1:35 ` [PATCH 07/12] thunderbolt: Enable plug events Andreas Noever
` (6 subsequent siblings)
12 siblings, 0 replies; 23+ messages in thread
From: Andreas Noever @ 2013-11-29 1:35 UTC (permalink / raw)
To: linux-kernel; +Cc: Andreas Noever
Thunderbolt config areas contain capability lists similar to those found
on pci devices. This patch introduces a tb_find_cap utility method to search
for capabilities.
Signed-off-by: Andreas Noever <andreas.noever@gmail.com>
---
drivers/thunderbolt/tb.c | 103 +++++++++++++++++++++++++++++++++++++++++++++++
drivers/thunderbolt/tb.h | 2 +
2 files changed, 105 insertions(+)
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index d2df3be..943842b 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -89,6 +89,109 @@ static int tb_route_length(u64 route)
return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT;
}
+
+/* thunderbolt capability lookup */
+
+struct tb_cap_any {
+ union {
+ struct tb_cap_basic basic;
+ struct tb_cap_extended_short extended_short;
+ struct tb_cap_extended_long extended_long;
+ };
+} __packed;
+
+static bool tb_cap_is_basic(struct tb_cap_any *cap)
+{
+ /* basic.cap is u8. This checks only the lower 8 bit of cap. */
+ return cap->basic.cap != 5;
+}
+
+static bool tb_cap_is_long(struct tb_cap_any *cap)
+{
+ return !tb_cap_is_basic(cap)
+ && cap->extended_short.next == 0
+ && cap->extended_short.length == 0;
+}
+
+static enum tb_cap tb_cap(struct tb_cap_any *cap)
+{
+ if (tb_cap_is_basic(cap))
+ return cap->basic.cap;
+ else
+ /* extended_short/long have cap at the same position. */
+ return cap->extended_short.cap;
+}
+
+static u32 tb_cap_next(struct tb_cap_any *cap, u32 offset)
+{
+ int next;
+ if (offset == 1) {
+ /*
+ * The first pointer is part of the switch header and always
+ * a simple pointer.
+ */
+ next = cap->basic.next;
+ } else {
+ if (tb_cap_is_basic(cap))
+ next = cap->basic.next;
+ /* "255 byte config areas should be enough for anybody." */
+ else if (!tb_cap_is_long(cap))
+ next = cap->extended_short.next;
+ /*
+ * "Also we should have at least three types of capability
+ * headers in version 1."
+ */
+ else
+ next = cap->extended_long.next;
+ }
+ /*
+ * "Hey, we could terminate some capability lists with a null offset
+ * and others with a pointer to the last element." - "Great idea!"
+ */
+ if (next == offset)
+ return 0;
+ return next;
+}
+
+/**
+ * tb_find_cap() - find a capability
+ *
+ * Return: Returns a positive offset if the capability was found and 0 if not.
+ * Returns an error code on failure.
+ */
+int tb_find_cap(struct tb_port *port, enum tb_cfg_space space, enum tb_cap cap)
+{
+ u32 offset = 1;
+ struct tb_cap_any header;
+ int res;
+ int retries = 10;
+ while (retries--) {
+ res = tb_port_read(port, &header, space, offset, 1);
+ if (res) {
+ /* Intel needs some help with linked lists. */
+ if (space == TB_CFG_PORT
+ && offset == 0xa
+ && port->config.type == TB_TYPE_DP_HDMI_OUT) {
+ offset = 0x39;
+ continue;
+ }
+ return res;
+ }
+ if (offset != 1 && tb_cap(&header) == cap)
+ return offset;
+ offset = tb_cap_next(&header, offset);
+ if (!offset)
+ return 0;
+ continue;
+ }
+ tb_port_WARN(port,
+ "could not find cap %#x in config space %d, last offset: %#x\n",
+ cap,
+ space,
+ offset);
+ return -EIO;
+}
+
/* switch/port allocation & initialization */
/**
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 4000f2b..e81c63a 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -144,4 +144,6 @@ static inline int tb_port_write(struct tb_port *port, void *buffer,
struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi);
void thunderbolt_shutdown_and_free(struct tb *tb);
+int tb_find_cap(struct tb_port *port, enum tb_cfg_space space, u32 value);
+
#endif
--
1.8.4.2
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 07/12] thunderbolt: Enable plug events
2013-11-29 1:35 [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed) Andreas Noever
` (5 preceding siblings ...)
2013-11-29 1:35 ` [PATCH 06/12] thunderbolt: Add thunderbolt capability handling Andreas Noever
@ 2013-11-29 1:35 ` Andreas Noever
2013-11-29 1:35 ` [PATCH 08/12] thunderbolt: Scan for downstream switches Andreas Noever
` (5 subsequent siblings)
12 siblings, 0 replies; 23+ messages in thread
From: Andreas Noever @ 2013-11-29 1:35 UTC (permalink / raw)
To: linux-kernel; +Cc: Andreas Noever
Thunderbolt switches have a plug events capability. This patch adds the
tb_plug_events_active method and uses it to activate plug events on
switch allocation.
Signed-off-by: Andreas Noever <andreas.noever@gmail.com>
---
drivers/thunderbolt/tb.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++-
drivers/thunderbolt/tb.h | 1 +
2 files changed, 49 insertions(+), 1 deletion(-)
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 943842b..3837f1a 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -192,6 +192,41 @@ int tb_find_cap(struct tb_port *port, enum tb_cfg_space space, enum tb_cap cap)
return -EIO;
}
+
+/* thunderbolt switch utility functions */
+
+/**
+ * tb_plug_events_active() - enable/disable plug events on a switch
+ *
+ * Also configures a sane plug_events_delay of 255ms.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_plug_events_active(struct tb_switch *sw, bool active)
+{
+ u32 data;
+ int res;
+
+ sw->config.plug_events_delay = 0xff;
+ res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
+ if (res)
+ return res;
+
+ res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
+ if (res)
+ return res;
+
+ if (active) {
+ data = data & 0xFFFFFF83;
+ if (sw->config.device_id == 0x1547)
+ data |= 4;
+ } else {
+ data = data | 0x7c;
+ }
+ return tb_sw_write(sw, &data, TB_CFG_SWITCH,
+ sw->cap_plug_events + 1, 1);
+}
+
/* switch/port allocation & initialization */
/**
@@ -236,6 +271,7 @@ static void tb_switch_free(struct tb_switch *sw)
static struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
{
int i;
+ int cap;
struct tb_switch *sw;
int upstream_port = tb_cfg_get_upstream_port(tb->cfg, route);
if (upstream_port < 0)
@@ -296,6 +332,16 @@ static struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
/* TODO: I2C, IECS, EEPROM, link controller */
+ cap = tb_find_cap(&sw->ports[0], TB_CFG_SWITCH, TB_CAP_PLUG_EVENTS);
+ if (cap < 0) {
+ tb_sw_WARN(sw, "cannot find TB_CAP_PLUG_EVENTS aborting\n");
+ goto err;
+ }
+ sw->cap_plug_events = cap;
+
+ if (tb_plug_events_active(sw, true))
+ goto err;
+
return sw;
err:
kfree(sw->ports);
@@ -401,7 +447,8 @@ void thunderbolt_shutdown_and_free(struct tb *tb)
/**
* thunderbolt_alloc_and_start() - setup the thunderbolt bus
*
- * Allocates a tb_cfg control channel and initializes the root switch.
+ * Allocates a tb_cfg control channel, initializes the root switch and enables
+ * plug events.
*
* Return: Returns NULL on error.
*/
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index e81c63a..2ada42a 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -22,6 +22,7 @@ struct tb_switch {
struct tb_regs_switch_header config;
struct tb_port *ports;
struct tb *tb;
+ int cap_plug_events; /* offset, zero if not found */
bool invalid; /* unplugged, will go away */
};
--
1.8.4.2
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 08/12] thunderbolt: Scan for downstream switches
2013-11-29 1:35 [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed) Andreas Noever
` (6 preceding siblings ...)
2013-11-29 1:35 ` [PATCH 07/12] thunderbolt: Enable plug events Andreas Noever
@ 2013-11-29 1:35 ` Andreas Noever
2013-11-29 1:35 ` [PATCH 09/12] thunderbolt: Handle hotplug events Andreas Noever
` (4 subsequent siblings)
12 siblings, 0 replies; 23+ messages in thread
From: Andreas Noever @ 2013-11-29 1:35 UTC (permalink / raw)
To: linux-kernel; +Cc: Andreas Noever
Add utility methods tb_port_state and tb_wait_for_port. Add tb_scan_port
which checks whether a port is connected and if so allocates a
downstream switch.
Signed-off-by: Andreas Noever <andreas.noever@gmail.com>
---
drivers/thunderbolt/tb.c | 160 +++++++++++++++++++++++++++++++++++++++++++++++
drivers/thunderbolt/tb.h | 17 +++++
2 files changed, 177 insertions(+)
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 3837f1a..9daff7b 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -84,6 +84,25 @@ static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
port->nfc_credits);
}
+
+/**
+ * tb_downstream_route() - get route to downstream switch
+ *
+ * Port must not be the upstream port (otherwise a loop is created).
+ *
+ * Return: Returns a route to the switch behind @port.
+ */
+static u64 tb_downstream_route(struct tb_port *port)
+{
+ return tb_route(port->sw)
+ | ((u64) port->port << (port->sw->config.depth * 8));
+}
+
+static bool tb_is_upstream_port(struct tb_port *port)
+{
+ return port == tb_upstream_port(port->sw);
+}
+
static int tb_route_length(u64 route)
{
return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT;
@@ -192,6 +211,84 @@ int tb_find_cap(struct tb_port *port, enum tb_cfg_space space, enum tb_cap cap)
return -EIO;
}
+/* thunderbolt port utility functions */
+
+/**
+ * tb_port_state() - get connectedness state of a port
+ *
+ * The port must have a TB_CAP_PHY (i.e. it should be a real port).
+ *
+ * Return: Returns a tb_port_state on success or an error code on failure.
+ */
+static enum tb_port_state tb_port_state(struct tb_port *port)
+{
+ struct tb_cap_phy phy;
+ int res;
+ if (port->cap_phy == 0) {
+ tb_port_WARN(port, "does not have a PHY\n");
+ return -EINVAL;
+ }
+ res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
+ if (res)
+ return res;
+ return phy.state;
+}
+
+/**
+ * tb_wait_for_port() - wait for a port to become ready
+ *
+ * Check if the port is connected but not up. If so we wait for some
+ * time to see whether it comes up.
+ *
+ * Return: Returns an error code on failure. Returns 0 if the port is not
+ * connected or failed to reach state TB_PORT_UP within one second. Returns 1
+ * if the port is connected and in state TB_PORT_UP.
+ */
+static int tb_wait_for_port(struct tb_port *port)
+{
+ int retries = 10;
+ enum tb_port_state state;
+ if (!port->cap_phy) {
+ tb_port_WARN(port, "does not have PHY\n");
+ return -EINVAL;
+ }
+ if (tb_is_upstream_port(port)) {
+ tb_port_WARN(port, "is the upstream port\n");
+ return -EINVAL;
+ }
+
+ while (retries--) {
+ state = tb_port_state(port);
+ if (state < 0)
+ return state;
+ if (state == TB_PORT_DISABLED) {
+ tb_port_info(port, "is disabled (state: 0)\n");
+ return 0;
+ }
+ if (state == TB_PORT_UNPLUGGED) {
+ tb_port_info(port, "is unplugged (state: 7)\n");
+ return 0;
+ }
+ if (state == TB_PORT_UP) {
+ tb_port_info(port,
+ "is connected, link is up (state: 2)\n");
+ return 1;
+ }
+
+ /*
+ * After plug-in the state is TB_PORT_CONNECTING. Give it some
+ * time.
+ */
+ tb_port_info(port,
+ "is connected, link is not up (state: %d), retrying...\n",
+ state);
+ msleep(100);
+ }
+ tb_port_WARN(port,
+ "failed to reach state TB_PORT_UP. Ignoring port...\n");
+ return 0;
+}
+
/* thunderbolt switch utility functions */
@@ -240,13 +337,25 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
static int tb_init_port(struct tb_switch *sw, u8 port_nr)
{
int res;
+ int cap;
struct tb_port *port = &sw->ports[port_nr];
port->sw = sw;
port->port = port_nr;
+ port->remote = NULL;
res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
if (res)
return res;
+ /* Port 0 is the switch itself and has no PHY. */
+ if (port->config.type == TB_TYPE_PORT && port_nr != 0) {
+ cap = tb_find_cap(port, TB_CFG_PORT, TB_CAP_PHY);
+
+ if (cap > 0)
+ port->cap_phy = cap;
+ else
+ tb_port_WARN(port, "non switch port without a PHY\n");
+ }
+
tb_dump_port(sw->tb, &port->config);
/* TODO: Read dual link port, DP port and more from EEPROM. */
@@ -259,6 +368,14 @@ static int tb_init_port(struct tb_switch *sw, u8 port_nr)
*/
static void tb_switch_free(struct tb_switch *sw)
{
+ int i;
+ /* port 0 is the switch itself and never has a remote */
+ for (i = 1; i <= sw->config.max_port_number; i++) {
+ if (tb_is_upstream_port(&sw->ports[i]))
+ continue;
+ if (sw->ports[i].remote)
+ tb_switch_free(sw->ports[i].remote->sw);
+ }
kfree(sw->ports);
kfree(sw);
}
@@ -349,6 +466,9 @@ err:
return NULL;
}
+
+/* startup, enumeration, hot plug handling and shutdown */
+
/**
* reset_switch() - send TB_CFG_PKG_RESET and enable switch
*
@@ -369,6 +489,44 @@ static int tb_switch_reset(struct tb *tb, u64 route)
return tb_cfg_write(tb->cfg, ((u32 *) &header) + 2, route, 0, 2, 2, 2);
}
+static void tb_link_ports(struct tb_port *down, struct tb_port *up)
+{
+ down->remote = up;
+ up->remote = down;
+}
+
+static void tb_scan_port(struct tb_port *port);
+
+static void tb_scan_ports(struct tb_switch *sw)
+{
+ int i;
+ for (i = 1; i <= sw->config.max_port_number; i++)
+ tb_scan_port(&sw->ports[i]);
+}
+
+/**
+ * tb_scan_port() - check for switches below port
+ *
+ * Checks whether port is connected. If so then we try to create a downstream
+ * switch and recursively scan its ports
+ */
+static void tb_scan_port(struct tb_port *port)
+{
+ struct tb_switch *sw;
+ if (tb_is_upstream_port(port))
+ return;
+ if (port->config.type != TB_TYPE_PORT)
+ return;
+ if (tb_wait_for_port(port) <= 0)
+ return;
+
+ sw = tb_switch_alloc(port->sw->tb, tb_downstream_route(port));
+ if (!sw)
+ return;
+ tb_link_ports(port, tb_upstream_port(sw));
+ tb_scan_ports(sw);
+}
+
struct tb_hotplug_event {
struct work_struct work;
struct tb *tb;
@@ -487,6 +645,8 @@ struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi)
if (!tb->root_switch)
goto err_locked;
+ tb_scan_ports(tb->root_switch);
+
mutex_unlock(&tb->lock);
return tb;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 2ada42a..fe35d4d 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -32,6 +32,8 @@ struct tb_switch {
struct tb_port {
struct tb_regs_port_header config;
struct tb_switch *sw;
+ struct tb_port *remote; /* remote port, NULL if not connected */
+ int cap_phy; /* offset, zero if not found */
u8 port; /* port number on switch */
bool invalid; /* unplugged, will go away */
};
@@ -54,6 +56,21 @@ struct tb {
*/
};
+/**
+ * tb_upstream_port() - return the upstream port of a switch
+ *
+ * Every switch has an upstream port (for the root switch it is the NHI).
+ *
+ * During switch alloc/init tb_upstream_port()->remote may be NULL, even for
+ * non root switches (on the NHI port remote is always NULL).
+ *
+ * Return: Returns the upstream port of the switch.
+ */
+static inline struct tb_port *tb_upstream_port(struct tb_switch *sw)
+{
+ return &sw->ports[sw->config.upstream_port_number];
+}
+
static inline u64 tb_route(struct tb_switch *sw)
{
return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
--
1.8.4.2
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 09/12] thunderbolt: Handle hotplug events
2013-11-29 1:35 [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed) Andreas Noever
` (7 preceding siblings ...)
2013-11-29 1:35 ` [PATCH 08/12] thunderbolt: Scan for downstream switches Andreas Noever
@ 2013-11-29 1:35 ` Andreas Noever
2013-11-29 1:35 ` [PATCH 10/12] thunderbolt: Add path setup code Andreas Noever
` (3 subsequent siblings)
12 siblings, 0 replies; 23+ messages in thread
From: Andreas Noever @ 2013-11-29 1:35 UTC (permalink / raw)
To: linux-kernel; +Cc: Andreas Noever
We reveive a plug event callback whenever a thunderbolt device is added
or removed. This patch fills in the tb_handle_hotplug method and starts
reacting to these events by adding/removing switches from the hierarchy.
Signed-off-by: Andreas Noever <andreas.noever@gmail.com>
---
drivers/thunderbolt/tb.c | 83 +++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 82 insertions(+), 1 deletion(-)
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 9daff7b..d9bce38 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -108,6 +108,22 @@ static int tb_route_length(u64 route)
return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT;
}
+static struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route)
+{
+ u8 next_port = route & TB_PORT_MASK;
+ if (route == 0)
+ return sw;
+ if (next_port > sw->config.max_port_number)
+ return 0;
+ if (tb_is_upstream_port(&sw->ports[next_port]))
+ return 0;
+ if (!sw->ports[next_port].remote)
+ return 0;
+ return get_switch_at_route(sw->ports[next_port].remote->sw,
+ route >> TB_ROUTE_SHIFT);
+}
+
+
/* thunderbolt capability lookup */
@@ -527,6 +543,27 @@ static void tb_scan_port(struct tb_port *port)
tb_scan_ports(sw);
}
+/**
+ * tb_invalidate_below() - recursively invalidate all ports below a given port
+ */
+static void tb_invalidate_below(struct tb_port *port)
+{
+ struct tb_switch *sw;
+ int i;
+ if (tb_is_upstream_port(port)) {
+ tb_port_WARN(port, "trying to invalidate an upstream port.\n");
+ return;
+ }
+ if (!port->remote)
+ return;
+ sw = port->remote->sw;
+ for (i = 0; i <= sw->config.max_port_number; i++) {
+ sw->ports[i].invalid = true;
+ if (!tb_is_upstream_port(&sw->ports[i]))
+ tb_invalidate_below(&sw->ports[i]);
+ }
+}
+
struct tb_hotplug_event {
struct work_struct work;
struct tb *tb;
@@ -544,10 +581,54 @@ static void tb_handle_hotplug(struct work_struct *work)
{
struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
struct tb *tb = ev->tb;
+ struct tb_switch *sw;
+ struct tb_port *port;
mutex_lock(&tb->lock);
if (tb->shutdown)
goto out;
- /* do nothing for now */
+
+ sw = get_switch_at_route(tb->root_switch, ev->route);
+ if (!sw) {
+ tb_WARN(tb,
+ "hotplug event for non existent switch %llx:%x (unplug: %d)\n",
+ ev->route,
+ ev->port,
+ ev->unplug)
+ goto out;
+ }
+ if (sw->config.max_port_number < ev->port) {
+ tb_WARN(tb,
+ "hotplug event for non existent port %llx:%x (unplug: %d)\n",
+ ev->route,
+ ev->port,
+ ev->unplug)
+ goto out;
+ }
+ port = &sw->ports[ev->port];
+ if (tb_is_upstream_port(port)) {
+ tb_WARN(tb,
+ "hotplug event for upstream port %llx:%x (unplug: %d)\n",
+ ev->route,
+ ev->port,
+ ev->unplug)
+ goto out;
+ }
+ if (ev->unplug) {
+ if (port->remote) {
+ tb_port_info(port, "unplugged\n");
+ tb_invalidate_below(port);
+ tb_switch_free(port->remote->sw);
+ port->remote = NULL;
+ } else {
+ tb_port_info(port,
+ "got unplug event for disconnected port\n");
+ }
+ } else {
+ tb_port_info(port, "hotplug: scanning\n");
+ tb_scan_port(port);
+ if (!port->remote)
+ tb_port_info(port, "hotplug: no switch found\n");
+ }
out:
mutex_unlock(&tb->lock);
kfree(ev);
--
1.8.4.2
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 10/12] thunderbolt: Add path setup code.
2013-11-29 1:35 [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed) Andreas Noever
` (8 preceding siblings ...)
2013-11-29 1:35 ` [PATCH 09/12] thunderbolt: Handle hotplug events Andreas Noever
@ 2013-11-29 1:35 ` Andreas Noever
2013-11-29 1:35 ` [PATCH 11/12] thunderbolt: Add support for simple pci tunnels Andreas Noever
` (2 subsequent siblings)
12 siblings, 0 replies; 23+ messages in thread
From: Andreas Noever @ 2013-11-29 1:35 UTC (permalink / raw)
To: linux-kernel; +Cc: Andreas Noever
A thunderbolt path is a unidirectional channel between two thunderbolt
ports. Two such paths are needed to establish a pci tunnel.
This patch introduces struct tb_path as well as a set of tb_path_*
methods which are used do activate & deactive paths.
Signed-off-by: Andreas Noever <andreas.noever@gmail.com>
---
drivers/thunderbolt/tb.c | 260 +++++++++++++++++++++++++++++++++++++++++++++++
drivers/thunderbolt/tb.h | 60 +++++++++++
2 files changed, 320 insertions(+)
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index d9bce38..0fea997 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -84,6 +84,35 @@ static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
port->nfc_credits);
}
+static void tb_dump_hop(struct tb_port *port, struct tb_regs_hop *hop)
+{
+ tb_port_info(port,
+ " Hop through port %d to hop %d (%s)\n",
+ hop->out_port,
+ hop->next_hop,
+ hop->enable ? "enabled" : "disabled");
+ tb_port_info(port,
+ " Weight: %d Priority: %d Credits: %d Drop: %d\n",
+ hop->weight,
+ hop->priority,
+ hop->initial_credits,
+ hop->drop_packages);
+ tb_port_info(port,
+ " Counter enabled: %d Counter index: %d\n",
+ hop->counter_enable,
+ hop->counter);
+ tb_port_info(port,
+ " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
+ hop->ingress_fc,
+ hop->egress_fc,
+ hop->ingress_shared_buffer,
+ hop->egress_shared_buffer);
+ tb_port_info(port,
+ " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
+ hop->unknown1,
+ hop->unknown2,
+ hop->unknown3);
+}
/**
* tb_downstream_route() - get route to downstream switch
@@ -305,6 +334,40 @@ static int tb_wait_for_port(struct tb_port *port)
return 0;
}
+/**
+ * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
+ *
+ * Change the number of NFC credits allocated to @port by @credits. To remove
+ * NFC credits pass a negative amount of credits.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_port_add_nfc_credits(struct tb_port *port, int credits)
+{
+ if (credits == 0)
+ return 0;
+ tb_port_info(port,
+ "adding %#x NFC credits (%#x -> %#x)",
+ credits,
+ port->config.nfc_credits,
+ port->config.nfc_credits + credits);
+ port->config.nfc_credits += credits;
+ return tb_port_write(port, &port->config.nfc_credits,
+ TB_CFG_PORT, 4, 1);
+}
+
+/**
+ * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_port_clear_counter(struct tb_port *port, int counter)
+{
+ u32 zero[3] = { 0, 0, 0 };
+ tb_port_info(port, "clearing counter %d\n", counter);
+ return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
+}
+
/* thunderbolt switch utility functions */
@@ -482,6 +545,203 @@ err:
return NULL;
}
+/* thunderbolt path handling */
+
+/**
+ * tb_path_alloc() - allocate a thunderbolt path
+ *
+ * Return: Returns a tb_path on success or an error code on failure.
+ */
+struct tb_path *tb_path_alloc(struct tb *tb, int num_hops)
+{
+ struct tb_path *path = kzalloc(sizeof(*path), GFP_KERNEL);
+ if (!path)
+ return NULL;
+ path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
+ if (!path->hops) {
+ kfree(path);
+ return NULL;
+ }
+ path->tb = tb;
+ path->path_length = num_hops;
+ return path;
+}
+
+/**
+ * tb_path_free() - free a deactivated path
+ */
+void tb_path_free(struct tb_path *path)
+{
+ if (path->activated) {
+ tb_WARN(path->tb, "trying to free an activated path\n")
+ return;
+ }
+ kfree(path->hops);
+ kfree(path);
+}
+
+static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop)
+{
+ int i, res;
+ for (i = first_hop; i < path->path_length; i++) {
+ res = tb_port_add_nfc_credits(path->hops[i].in_port,
+ -path->nfc_credits);
+ if (res)
+ tb_port_warn(path->hops[i].in_port,
+ "nfc credits deallocation failed for hop %d\n",
+ i);
+ }
+}
+
+static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
+{
+ int i, res;
+ struct tb_regs_hop hop = { };
+ for (i = first_hop; i < path->path_length; i++) {
+ res = tb_port_write(path->hops[i].in_port,
+ &hop,
+ TB_CFG_HOPS,
+ 2 * path->hops[i].in_hop_index,
+ 2);
+ if (res)
+ tb_port_warn(path->hops[i].in_port,
+ "hop deactivation failed for hop %d, index %d\n",
+ i,
+ path->hops[i].in_hop_index);
+ }
+}
+
+void tb_path_deactivate(struct tb_path *path)
+{
+ if (!path->activated) {
+ tb_WARN(path->tb, "trying to deactivate an inactive path\n");
+ return;
+ }
+ tb_info(path->tb,
+ "deactivating path from %llx:%x to %llx:%x\n",
+ tb_route(path->hops[0].in_port->sw),
+ path->hops[0].in_port->port,
+ tb_route(path->hops[path->path_length - 1].out_port->sw),
+ path->hops[path->path_length - 1].out_port->port);
+ __tb_path_deactivate_hops(path, 0);
+ __tb_path_deallocate_nfc(path, 0);
+ path->activated = false;
+}
+
+/**
+ * tb_path_activate() - activate a path
+ *
+ * Activate a path starting with the last hop and iterating backwards. The
+ * caller must fill path->hops before calling path_activate().
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_path_activate(struct tb_path *path)
+{
+ int i, res;
+ enum tb_path_port out_mask, in_mask;
+ if (path->activated) {
+ tb_WARN(path->tb, "trying to activate already activated path\n");
+ return -EINVAL;
+ }
+
+ tb_info(path->tb,
+ "activating path from %llx:%x to %llx:%x\n",
+ tb_route(path->hops[0].in_port->sw),
+ path->hops[0].in_port->port,
+ tb_route(path->hops[path->path_length - 1].out_port->sw),
+ path->hops[path->path_length - 1].out_port->port);
+
+ /* Clear counters. */
+ for (i = path->path_length - 1; i >= 0; i--) {
+ if (path->hops[i].in_counter_index == -1)
+ continue;
+ res = tb_port_clear_counter(path->hops[i].in_port,
+ path->hops[i].in_counter_index);
+ if (res)
+ goto err;
+ }
+
+ /* Add non flow controlled credits. */
+ for (i = path->path_length - 1; i >= 0; i--) {
+ res = tb_port_add_nfc_credits(path->hops[i].in_port,
+ path->nfc_credits);
+ if (res) {
+ __tb_path_deallocate_nfc(path, i);
+ goto err;
+ }
+ }
+
+ /* Activate hops. */
+ for (i = path->path_length - 1; i >= 0; i--) {
+ struct tb_regs_hop hop;
+
+ /* dword 0 */
+ hop.next_hop = path->hops[i].next_hop_index;
+ hop.out_port = path->hops[i].out_port->port;
+ /* TODO: figure out why these are good values */
+ hop.initial_credits = (i == path->path_length - 1) ? 16 : 7;
+ hop.unknown1 = 0;
+ hop.enable = 1;
+
+ /* dword 1 */
+ out_mask = (i == path->path_length - 1) ?
+ TB_PATH_DESTINATION : TB_PATH_INTERNAL;
+ in_mask = (i == 0) ? TB_PATH_SOURCE : TB_PATH_INTERNAL;
+ hop.weight = path->weight;
+ hop.unknown2 = 0;
+ hop.priority = path->priority;
+ hop.drop_packages = path->drop_packages;
+ hop.counter = path->hops[i].in_counter_index;
+ hop.counter_enable = path->hops[i].in_counter_index != -1;
+ hop.ingress_fc = path->ingress_fc_enable & in_mask;
+ hop.egress_fc = path->egress_fc_enable & out_mask;
+ hop.ingress_shared_buffer = path->ingress_shared_buffer
+ & in_mask;
+ hop.egress_shared_buffer = path->egress_shared_buffer
+ & out_mask;
+ hop.unknown3 = 0;
+
+ tb_port_info(path->hops[i].in_port,
+ "Writing hop %d, index %d",
+ i,
+ path->hops[i].in_hop_index);
+ tb_dump_hop(path->hops[i].in_port, &hop);
+ res = tb_port_write(path->hops[i].in_port,
+ &hop,
+ TB_CFG_HOPS,
+ 2 * path->hops[i].in_hop_index,
+ 2);
+ if (res) {
+ __tb_path_deactivate_hops(path, i);
+ __tb_path_deallocate_nfc(path, 0);
+ goto err;
+ }
+ }
+ path->activated = true;
+ tb_info(path->tb, "path activation complete\n");
+ return 0;
+err:
+ tb_WARN(path->tb, "path activation failed\n");
+ return res;
+}
+
+/**
+ * tb_path_is_invalid() - check whether any ports on the path are invalid
+ *
+ * Return: Returns true if the path is invalid, false otherwise.
+ */
+bool tb_path_is_invalid(struct tb_path *path)
+{
+ int i = 0;
+ for (i = 0; i < path->path_length; i++) {
+ if (path->hops[i].in_port->invalid)
+ return true;
+ if (path->hops[i].out_port->invalid)
+ return true;
+ }
+ return false;
+}
/* startup, enumeration, hot plug handling and shutdown */
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index fe35d4d..cc1f079 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -39,6 +39,60 @@ struct tb_port {
};
/**
+ * struct tb_path_hop - routing information for a tb_path
+ *
+ * Hop configuration is always done on the IN port of a switch.
+ * in_port and out_port have to be on the same switch. Packets arriving on
+ * in_port with "hop" = in_hop_index will get routed to through out_port. The
+ * next hop to take (on out_port->remote) is determined by next_hop_index.
+ *
+ * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in
+ * port.
+ */
+struct tb_path_hop {
+ struct tb_port *in_port;
+ struct tb_port *out_port;
+ int in_hop_index;
+ int in_counter_index; /* write -1 to disable counters for this hop. */
+ int next_hop_index;
+};
+
+/**
+ * enum tb_path_port - path options mask
+ */
+enum tb_path_port {
+ TB_PATH_NONE = 0,
+ TB_PATH_SOURCE = 1, /* activate on the first hop (out of src) */
+ TB_PATH_INTERNAL = 2, /* activate on other hops (not the first/last) */
+ TB_PATH_DESTINATION = 4, /* activate on the last hop (into dst) */
+ TB_PATH_ALL = 7,
+};
+
+/**
+ * struct tb_path - a unidirectional path between two ports
+ *
+ * A path consists of a number of hops (see tb_path_hop). To establish a PCIe
+ * tunnel two paths have to be created between the two PCIe ports.
+ *
+ */
+struct tb_path {
+ struct tb *tb;
+ int nfc_credits; /* non flow controlled credits */
+ enum tb_path_port ingress_shared_buffer;
+ enum tb_path_port egress_shared_buffer;
+ enum tb_path_port ingress_fc_enable;
+ enum tb_path_port egress_fc_enable;
+
+ int priority:3;
+ int weight:4;
+ bool drop_packages;
+ bool activated;
+ struct tb_path_hop *hops;
+ int path_length; /* number of hops */
+};
+
+
+/**
* struct tb - main thunderbolt bus structure
*/
struct tb {
@@ -164,4 +218,10 @@ void thunderbolt_shutdown_and_free(struct tb *tb);
int tb_find_cap(struct tb_port *port, enum tb_cfg_space space, u32 value);
+struct tb_path *tb_path_alloc(struct tb *tb, int num_hops);
+void tb_path_free(struct tb_path *path);
+int tb_path_activate(struct tb_path *path);
+void tb_path_deactivate(struct tb_path *path);
+bool tb_path_is_invalid(struct tb_path *path);
+
#endif
--
1.8.4.2
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 11/12] thunderbolt: Add support for simple pci tunnels.
2013-11-29 1:35 [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed) Andreas Noever
` (9 preceding siblings ...)
2013-11-29 1:35 ` [PATCH 10/12] thunderbolt: Add path setup code Andreas Noever
@ 2013-11-29 1:35 ` Andreas Noever
2013-11-29 1:35 ` [PATCH 12/12] thunderbolt: Scan and activate one PCI device Andreas Noever
2013-12-02 16:29 ` [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed) Matthew Garrett
12 siblings, 0 replies; 23+ messages in thread
From: Andreas Noever @ 2013-11-29 1:35 UTC (permalink / raw)
To: linux-kernel; +Cc: Andreas Noever
A pci downstream and pci upstream port can be connected through a
tunnel. To establish the tunnel we have to setup two unidirectional
paths between the two ports.
Right now we only support paths with two hops (i.e. no chaining) and at
most one pci device per thunderbolt device.
Signed-off-by: Andreas Noever <andreas.noever@gmail.com>
---
drivers/thunderbolt/Makefile | 2 +-
drivers/thunderbolt/tb.c | 27 ++++++
drivers/thunderbolt/tb.h | 2 +
drivers/thunderbolt/tb_pci.c | 215 +++++++++++++++++++++++++++++++++++++++++++
drivers/thunderbolt/tb_pci.h | 29 ++++++
5 files changed, 274 insertions(+), 1 deletion(-)
create mode 100644 drivers/thunderbolt/tb_pci.c
create mode 100644 drivers/thunderbolt/tb_pci.h
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 7c5b811..35f2147 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,3 +1,3 @@
obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
-thunderbolt-objs := dsl3510.o tb_cfg.o tb.o
+thunderbolt-objs := dsl3510.o tb_cfg.o tb.o tb_pci.o
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 0fea997..9f25935 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -11,6 +11,7 @@
#include "dsl3510.h"
#include "tb.h"
#include "tb_regs.h"
+#include "tb_pci.h"
/* utility functions */
@@ -824,6 +825,19 @@ static void tb_invalidate_below(struct tb_port *port)
}
}
+static void destroy_invalid_tunnels(struct tb *tb)
+{
+ struct tb_pci_tunnel *tunnel;
+ struct tb_pci_tunnel *n;
+ list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list)
+ {
+ if (tb_pci_is_invalid(tunnel)) {
+ tb_pci_deactivate(tunnel);
+ tb_pci_free(tunnel);
+ }
+ }
+}
+
struct tb_hotplug_event {
struct work_struct work;
struct tb *tb;
@@ -877,6 +891,7 @@ static void tb_handle_hotplug(struct work_struct *work)
if (port->remote) {
tb_port_info(port, "unplugged\n");
tb_invalidate_below(port);
+ destroy_invalid_tunnels(tb);
tb_switch_free(port->remote->sw);
port->remote = NULL;
} else {
@@ -918,12 +933,23 @@ static void tb_schedule_hotplug_handler(void *data, u64 route, u8 port,
* thunderbolt_shutdown_and_free() - shutdown everything
*
* Free all switches and the config channel.
+ *
+ * Only tb->lock and tb->tunnel_list must be initialized. If tb->tunnel_list
+ * is populated then we assume that tb->cfg is setup.
*/
void thunderbolt_shutdown_and_free(struct tb *tb)
{
+ struct tb_pci_tunnel *tunnel;
+ struct tb_pci_tunnel *n;
+
mutex_lock(&tb->lock);
tb->shutdown = true; /* signal tb_handle_hotplug to quit */
+ list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) {
+ tb_pci_deactivate(tunnel);
+ tb_pci_free(tunnel);
+ }
+
if (tb->root_switch)
tb_switch_free(tb->root_switch);
tb->root_switch = NULL;
@@ -966,6 +992,7 @@ struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi)
tb->nhi = nhi;
mutex_init(&tb->lock);
mutex_lock(&tb->lock);
+ INIT_LIST_HEAD(&tb->tunnel_list);
tb->wq = alloc_ordered_workqueue("thunderbolt", 0);
if (!tb->wq)
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index cc1f079..bb9a9de 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -104,10 +104,12 @@ struct tb {
struct tb_cfg *cfg;
struct workqueue_struct *wq; /* ordered workqueue for plug events */
struct tb_switch *root_switch;
+ struct list_head tunnel_list; /* list of active PCIe tunnels */
bool shutdown; /*
* Once this is set tb_handle_hotplug will exit (once it
* can aquire lock at least once). Used to drain wq.
*/
+
};
/**
diff --git a/drivers/thunderbolt/tb_pci.c b/drivers/thunderbolt/tb_pci.c
new file mode 100644
index 0000000..42a173b
--- /dev/null
+++ b/drivers/thunderbolt/tb_pci.c
@@ -0,0 +1,215 @@
+/*
+ * Thunderbolt PCIe tunnel
+ *
+ * Copyright (c) 2013 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include "tb_pci.h"
+#include "tb.h"
+
+#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
+ do { \
+ struct tb_pci_tunnel *__tunnel = (tunnel); \
+ level(__tunnel->tb, "%llx:%x <-> %llx:%x (PCI): " fmt, \
+ tb_route(__tunnel->down_port->sw), \
+ __tunnel->down_port->port, \
+ tb_route(__tunnel->up_port->sw), \
+ __tunnel->up_port->port, \
+ ## arg); \
+ } while (0)
+
+#define tb_tunnel_WARN(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
+#define tb_tunnel_warn(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
+#define tb_tunnel_info(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
+
+static void tb_pci_init_path(struct tb_path *path)
+{
+ path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_fc_enable = TB_PATH_ALL;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+ path->priority = 3;
+ path->weight = 1;
+ path->drop_packages = 0;
+ path->nfc_credits = 0;
+}
+
+/**
+ * tb_pci_alloc() - allocate a pci tunnel
+ *
+ * Allocates a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
+ * TB_TYPE_PCIE_DOWN.
+ *
+ * Currently only paths of consisting of two hops are supported (that is the
+ * ports must be on "adjacent" switches).
+ *
+ * The paths are hard-coded to use hop 8 (the only working hop id available on
+ * my thunderbolt devices). Therefore at most ONE path per device may be
+ * activated.
+ *
+ * Return: Returns a tb_pci_tunnel on success or NULL on failure.
+ */
+struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
+ struct tb_port *down)
+{
+ struct tb_pci_tunnel *tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
+ if (!tunnel)
+ goto err;
+ tunnel->tb = tb;
+ tunnel->down_port = down;
+ tunnel->up_port = up;
+ INIT_LIST_HEAD(&tunnel->list);
+ tunnel->path_to_up = tb_path_alloc(up->sw->tb, 2);
+ if (!tunnel->path_to_up)
+ goto err;
+ tunnel->path_to_down = tb_path_alloc(up->sw->tb, 2);
+ if (!tunnel->path_to_down)
+ goto err;
+ tb_pci_init_path(tunnel->path_to_up);
+ tb_pci_init_path(tunnel->path_to_down);
+
+ tunnel->path_to_up->hops[0].in_port = down;
+ tunnel->path_to_up->hops[0].in_hop_index = 8;
+ tunnel->path_to_up->hops[0].in_counter_index = -1;
+ tunnel->path_to_up->hops[0].out_port = tb_upstream_port(up->sw)->remote;
+ tunnel->path_to_up->hops[0].next_hop_index = 8;
+
+ tunnel->path_to_up->hops[1].in_port = tb_upstream_port(up->sw);
+ tunnel->path_to_up->hops[1].in_hop_index = 8;
+ tunnel->path_to_up->hops[1].in_counter_index = -1;
+ tunnel->path_to_up->hops[1].out_port = up;
+ tunnel->path_to_up->hops[1].next_hop_index = 8;
+
+ tunnel->path_to_down->hops[0].in_port = up;
+ tunnel->path_to_down->hops[0].in_hop_index = 8;
+ tunnel->path_to_down->hops[0].in_counter_index = -1;
+ tunnel->path_to_down->hops[0].out_port = tb_upstream_port(up->sw);
+ tunnel->path_to_down->hops[0].next_hop_index = 8;
+
+ tunnel->path_to_down->hops[1].in_port =
+ tb_upstream_port(up->sw)->remote;
+ tunnel->path_to_down->hops[1].in_hop_index = 8;
+ tunnel->path_to_down->hops[1].in_counter_index = -1;
+ tunnel->path_to_down->hops[1].out_port = down;
+ tunnel->path_to_down->hops[1].next_hop_index = 8;
+ return tunnel;
+
+err:
+ if (tunnel) {
+ if (tunnel->path_to_down)
+ tb_path_free(tunnel->path_to_down);
+ if (tunnel->path_to_up)
+ tb_path_free(tunnel->path_to_up);
+ kfree(tunnel);
+ }
+ return NULL;
+}
+
+/**
+ * tb_pci_free() - free a tunnel
+ *
+ * The tunnel must have been deactivated.
+ */
+void tb_pci_free(struct tb_pci_tunnel *tunnel)
+{
+ if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
+ tb_tunnel_WARN(tunnel, "trying to free an activated tunnel\n");
+ return;
+ }
+ tb_path_free(tunnel->path_to_up);
+ tb_path_free(tunnel->path_to_down);
+ kfree(tunnel);
+}
+
+/**
+ * tb_pci_is_invalid - check whether an activated path is still valid
+ */
+bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel)
+{
+ WARN_ON(!tunnel->path_to_up->activated);
+ WARN_ON(!tunnel->path_to_down->activated);
+
+ return tb_path_is_invalid(tunnel->path_to_up)
+ || tb_path_is_invalid(tunnel->path_to_down);
+}
+
+/**
+ * tb_pci_port_active() - activate/deactivate PCI capability
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_pci_port_active(struct tb_port *port, bool active)
+{
+ u32 word = active ? 0x80000000 : 0x0;
+ int cap = tb_find_cap(port, TB_CFG_PORT, TB_CAP_PCIE);
+ if (cap <= 0) {
+ tb_port_warn(port, "TB_CAP_PCIE not found: %d\n", cap);
+ return cap ? cap : -ENXIO;
+ }
+ return tb_port_write(port, &word, TB_CFG_PORT, cap, 1);
+}
+
+/**
+ * tb_pci_activate() - activate a tunnel
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_pci_activate(struct tb_pci_tunnel *tunnel)
+{
+ int res;
+ if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
+ tb_tunnel_WARN(tunnel,
+ "trying to activate an already activated tunnel\n");
+ return -EINVAL;
+ }
+ tb_tunnel_info(tunnel, "activating\n");
+
+ res = tb_path_activate(tunnel->path_to_up);
+ if (res)
+ goto err;
+ res = tb_path_activate(tunnel->path_to_down);
+ if (res)
+ goto err;
+
+ res = tb_pci_port_active(tunnel->down_port, true);
+ if (res)
+ goto err;
+
+ res = tb_pci_port_active(tunnel->up_port, true);
+ if (res)
+ goto err;
+
+ list_add(&tunnel->list, &tunnel->tb->tunnel_list);
+ return 0;
+err:
+ tb_tunnel_warn(tunnel, "activation failed\n");
+ tb_pci_deactivate(tunnel);
+ return res;
+}
+
+/**
+ * tb_pci_deactivate() - deactivate a tunnel
+ */
+void tb_pci_deactivate(struct tb_pci_tunnel *tunnel)
+{
+ tb_tunnel_info(tunnel, "deactivating\n");
+ /*
+ * TODO: enable reset by writing 0x04000000 to TB_CAP_PCIE + 1 on up
+ * port. Seems to have no effect?
+ */
+ tb_pci_port_active(tunnel->up_port, false);
+ tb_pci_port_active(tunnel->down_port, false);
+ if (tunnel->path_to_down->activated)
+ tb_path_deactivate(tunnel->path_to_down);
+ if (tunnel->path_to_up->activated)
+ tb_path_deactivate(tunnel->path_to_up);
+
+ list_del_init(&tunnel->list);
+}
+
diff --git a/drivers/thunderbolt/tb_pci.h b/drivers/thunderbolt/tb_pci.h
new file mode 100644
index 0000000..5ffa6bc
--- /dev/null
+++ b/drivers/thunderbolt/tb_pci.h
@@ -0,0 +1,29 @@
+/*
+ * Thunderbolt PCIe tunnel
+ *
+ * Copyright (c) 2013 Andreas Noever <andreas.noever@gmail.com>
+ */
+
+#ifndef TB_PCI_H_
+#define TB_PCI_H_
+
+#include "tb.h"
+
+struct tb_pci_tunnel {
+ struct tb *tb;
+ struct tb_port *up_port;
+ struct tb_port *down_port;
+ struct tb_path *path_to_up;
+ struct tb_path *path_to_down;
+ struct list_head list;
+};
+
+struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
+ struct tb_port *down);
+void tb_pci_free(struct tb_pci_tunnel *tunnel);
+int tb_pci_activate(struct tb_pci_tunnel *tunnel);
+void tb_pci_deactivate(struct tb_pci_tunnel *tunnel);
+bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel);
+
+#endif
+
--
1.8.4.2
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH 12/12] thunderbolt: Scan and activate one PCI device
2013-11-29 1:35 [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed) Andreas Noever
` (10 preceding siblings ...)
2013-11-29 1:35 ` [PATCH 11/12] thunderbolt: Add support for simple pci tunnels Andreas Noever
@ 2013-11-29 1:35 ` Andreas Noever
2013-12-02 16:29 ` [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed) Matthew Garrett
12 siblings, 0 replies; 23+ messages in thread
From: Andreas Noever @ 2013-11-29 1:35 UTC (permalink / raw)
To: linux-kernel; +Cc: Andreas Noever
Use the tb_pci_* methods introduces in the last patch to activate
a the first PCI device on every plugged in thunderbolt device.
Signed-off-by: Andreas Noever <andreas.noever@gmail.com>
---
drivers/thunderbolt/tb.c | 118 +++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 115 insertions(+), 3 deletions(-)
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 9f25935..0e82afd 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -838,6 +838,109 @@ static void destroy_invalid_tunnels(struct tb *tb)
}
}
+/**
+ * find_pci_up_port() - return the first PCIe up port on @sw
+ */
+static struct tb_port *find_pci_up_port(struct tb_switch *sw)
+{
+ int i;
+ for (i = 1; i <= sw->config.max_port_number; i++)
+ if (sw->ports[i].config.type == TB_TYPE_PCIE_UP)
+ return &sw->ports[i];
+ return NULL;
+}
+
+/**
+ * find_unused_down_port() - return the first inactive PCIe down port on @sw
+ */
+static struct tb_port *find_unused_down_port(struct tb_switch *sw)
+{
+ int i;
+ int cap;
+ int res;
+ int data;
+ for (i = 1; i <= sw->config.max_port_number; i++) {
+ if (tb_is_upstream_port(&sw->ports[i]))
+ continue;
+ if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN)
+ continue;
+ cap = tb_find_cap(&sw->ports[i], TB_CFG_PORT, TB_CAP_PCIE);
+ if (cap <= 0)
+ continue;
+ res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1);
+ if (res < 0)
+ continue;
+ if (data & 0x80000000)
+ continue;
+ return &sw->ports[i];
+ }
+ return NULL;
+}
+
+/**
+ * tb_activate_pcie_devices() - scan for and activate PCIe devices
+ *
+ * This method is somewhat ad hoc. For now it only supports one device
+ * per port and only devices at depth 1.
+ */
+static void tb_activate_pcie_devices(struct tb *tb)
+{
+ int i;
+ int cap;
+ u32 data;
+ struct tb_switch *sw;
+ struct tb_port *up_port;
+ struct tb_port *down_port;
+ struct tb_pci_tunnel *tunnel;
+ /* scan for pcie devices at depth 1*/
+ for (i = 1; i <= tb->root_switch->config.max_port_number; i++) {
+ if (tb_is_upstream_port(&tb->root_switch->ports[i]))
+ continue;
+ if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT)
+ continue;
+ if (!tb->root_switch->ports[i].remote)
+ continue;
+ sw = tb->root_switch->ports[i].remote->sw;
+ up_port = find_pci_up_port(sw);
+ if (!up_port) {
+ tb_sw_info(sw, "no PCIe devices found, aborting\n");
+ continue;
+ }
+
+ /* check whether port is already activated */
+ cap = tb_find_cap(up_port, TB_CFG_PORT, TB_CAP_PCIE);
+ if (cap <= 0)
+ continue;
+ if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1))
+ continue;
+ if (data & 0x80000000) {
+ tb_port_info(up_port,
+ "PCIe port already activated, aborting\n");
+ continue;
+ }
+
+ down_port = find_unused_down_port(tb->root_switch);
+ if (!down_port) {
+ tb_port_info(up_port,
+ "All PCIe down ports are occupied, aborting\n");
+ continue;
+ }
+ tunnel = tb_pci_alloc(tb, up_port, down_port);
+ if (!tunnel) {
+ tb_port_info(up_port,
+ "PCIe tunnel allocation failed, aborting\n");
+ continue;
+ }
+
+ if (tb_pci_activate(tunnel)) {
+ tb_port_info(up_port,
+ "PCIe tunnel activation failed, aborting\n");
+ tb_pci_free(tunnel);
+ }
+
+ }
+}
+
struct tb_hotplug_event {
struct work_struct work;
struct tb *tb;
@@ -901,8 +1004,16 @@ static void tb_handle_hotplug(struct work_struct *work)
} else {
tb_port_info(port, "hotplug: scanning\n");
tb_scan_port(port);
- if (!port->remote)
+ if (!port->remote) {
tb_port_info(port, "hotplug: no switch found\n");
+ } else if (port->remote->sw->config.depth > 1) {
+ tb_sw_warn(port->remote->sw,
+ "hotplug: chaining not supported\n");
+ } else {
+ tb_sw_info(port->remote->sw,
+ "hotplug: activating pcie devices\n");
+ tb_activate_pcie_devices(tb);
+ }
}
out:
mutex_unlock(&tb->lock);
@@ -972,8 +1083,8 @@ void thunderbolt_shutdown_and_free(struct tb *tb)
/**
* thunderbolt_alloc_and_start() - setup the thunderbolt bus
*
- * Allocates a tb_cfg control channel, initializes the root switch and enables
- * plug events.
+ * Allocates a tb_cfg control channel, initializes the root switch, enables
+ * plug events and activates pci devices.
*
* Return: Returns NULL on error.
*/
@@ -1014,6 +1125,7 @@ struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi)
goto err_locked;
tb_scan_ports(tb->root_switch);
+ tb_activate_pcie_devices(tb);
mutex_unlock(&tb->lock);
return tb;
--
1.8.4.2
^ permalink raw reply related [flat|nested] 23+ messages in thread* Re: [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed)
2013-11-29 1:35 [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed) Andreas Noever
` (11 preceding siblings ...)
2013-11-29 1:35 ` [PATCH 12/12] thunderbolt: Scan and activate one PCI device Andreas Noever
@ 2013-12-02 16:29 ` Matthew Garrett
2014-03-04 0:09 ` Matthew Garrett
12 siblings, 1 reply; 23+ messages in thread
From: Matthew Garrett @ 2013-12-02 16:29 UTC (permalink / raw)
To: Andreas Noever; +Cc: linux-kernel
On Fri, Nov 29, 2013 at 02:35:37AM +0100, Andreas Noever wrote:>
> There are still a number of limitations:
> (1) The system must be booted with acpi_osi=Darwin. Otherwise ACPI will cut
> power to the controller.
> (2) After suspend the controller is gone. I think that ACPI thinks that we are
> Windows and cuts power, even with acpi_osi=Darwin.
There's a few ACPI methods involved here. The first is DTLK, which is
called by the firmware on resume. It'll cut power to the chip. So, you
need to be able to power it back up. However, you only want to power it
up if there's a device connected. The XRIL method will return 1 if
there's a device connected and 0 otherwise. If there's a device
connected, call XRPE with an argument of 1 to power it back up.
That works fine if there's no hotplugging involved, but there is so
things get a little more complicated. Apple provide an out of band
mechanism for receiving notifications on the device. Call the _GPE
method and install a gpe handler for the value that you get back. This
will be called on every device plug or unplug (possibly just plug? It's
been a while since I tested). Call XRIL to confirm that there's a device
attached and then call XRPE to power it up.
At least, that's my recollection. There's also the SXIO, SXIL, SXLV and
SXFP methods that provide some kind of power management, but I'm not
sure how those fit in.
If you want to confirm that you're supposed to be using these methods,
call _DSM on the device with a UUID of
C6B7B5A0-1813-1C44-B0C9-FE695EAF949B (see DTGP in the DSDT), arg1 of 1,
arg2 of 0. If it's an Apple you'll get back return value of 1 and a
buffer containing the string "power-save". If it's not an Apple, or if
the device doesn't support power saving, you'll get back a zero or an
error that the method wasn't found.
--
Matthew Garrett | mjg59@srcf.ucam.org
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed)
2013-12-02 16:29 ` [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed) Matthew Garrett
@ 2014-03-04 0:09 ` Matthew Garrett
2014-03-04 23:59 ` Andreas Noever
0 siblings, 1 reply; 23+ messages in thread
From: Matthew Garrett @ 2014-03-04 0:09 UTC (permalink / raw)
To: Andreas Noever; +Cc: linux-kernel
Actually, turns out there's a much easier way. Can you try this patch?
I see the Thunderbolt controller after resume, although it doesn't seem
to be in a working state.
commit 102547d63e2cbbda42a25f650df9a33cf929a385
Author: Matthew Garrett <matthew.garrett@nebula.com>
Date: Mon Mar 3 18:49:28 2014 -0500
ACPI: Support _OSI("Darwin") correctly
Apple hardware queries _OSI("Darwin") in order to determine whether the
system is running OS X, and changes firmware behaviour based on the answer.
The most obvious difference in behaviour is that Thunderbolt hardware is
forcibly powered down unless the system is running OS X. The obvious solution
would be to simply add Darwin to the list of supported _OSI strings, but this
causes problems.
Recent Apple hardware includes two separate methods for checking _OSI
strings. The first will check whether Darwin is supported, and if so will
exit. The second will check whether Darwin is supported, but will then
continue to check for further operating systems. If a further operating
system is found then later firmware code will assume that the OS is not OS X.
This results in the unfortunate situation where the Thunderbolt controller is
available at boot time but remains powered down after suspend.
The easiest way to handle this is to special-case it in the Linux-specific
OSI handling code. If we see Darwin, we should answer true and then disable
all other _OSI vendor strings.
Signed-off-by: Matthew Garrett <matthew.garrett@nebula.com>
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 54a20ff..ef8656c 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -156,6 +156,16 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
osi_linux.dmi ? " via DMI" : "");
}
+ if (!strcmp("Darwin", interface)) {
+ /*
+ * Apple firmware will behave poorly if it receives positive
+ * answers to "Darwin" and any other OS. Respond positively
+ * to Darwin and then disable all other vendor strings.
+ */
+ acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
+ supported = ACPI_UINT32_MAX;
+ }
+
return supported;
}
--
Matthew Garrett | mjg59@srcf.ucam.org
^ permalink raw reply related [flat|nested] 23+ messages in thread* Re: [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed)
2014-03-04 0:09 ` Matthew Garrett
@ 2014-03-04 23:59 ` Andreas Noever
2014-03-05 0:26 ` Matthew Garrett
2014-03-08 2:40 ` Matthew Garrett
0 siblings, 2 replies; 23+ messages in thread
From: Andreas Noever @ 2014-03-04 23:59 UTC (permalink / raw)
To: Matthew Garrett; +Cc: linux-kernel@vger.kernel.org
On Tue, Mar 4, 2014 at 1:09 AM, Matthew Garrett <mjg59@srcf.ucam.org> wrote:
>
> Actually, turns out there's a much easier way. Can you try this patch?
> I see the Thunderbolt controller after resume, although it doesn't seem
> to be in a working state.
>
> commit 102547d63e2cbbda42a25f650df9a33cf929a385
> Author: Matthew Garrett <matthew.garrett@nebula.com>
> Date: Mon Mar 3 18:49:28 2014 -0500
>
> ACPI: Support _OSI("Darwin") correctly
>
> Apple hardware queries _OSI("Darwin") in order to determine whether the
> system is running OS X, and changes firmware behaviour based on the answer.
> The most obvious difference in behaviour is that Thunderbolt hardware is
> forcibly powered down unless the system is running OS X. The obvious solution
> would be to simply add Darwin to the list of supported _OSI strings, but this
> causes problems.
>
> Recent Apple hardware includes two separate methods for checking _OSI
> strings. The first will check whether Darwin is supported, and if so will
> exit. The second will check whether Darwin is supported, but will then
> continue to check for further operating systems. If a further operating
> system is found then later firmware code will assume that the OS is not OS X.
> This results in the unfortunate situation where the Thunderbolt controller is
> available at boot time but remains powered down after suspend.
>
> The easiest way to handle this is to special-case it in the Linux-specific
> OSI handling code. If we see Darwin, we should answer true and then disable
> all other _OSI vendor strings.
>
> Signed-off-by: Matthew Garrett <matthew.garrett@nebula.com>
>
> diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
> index 54a20ff..ef8656c 100644
> --- a/drivers/acpi/osl.c
> +++ b/drivers/acpi/osl.c
> @@ -156,6 +156,16 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
> osi_linux.dmi ? " via DMI" : "");
> }
>
> + if (!strcmp("Darwin", interface)) {
> + /*
> + * Apple firmware will behave poorly if it receives positive
> + * answers to "Darwin" and any other OS. Respond positively
> + * to Darwin and then disable all other vendor strings.
> + */
> + acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
> + supported = ACPI_UINT32_MAX;
> + }
> +
> return supported;
> }
>
>
> --
> Matthew Garrett | mjg59@srcf.ucam.org
I belive that the patch has the same effect as passing
acpi_osi=! acpi_osi=Darwin
to the kernel. The problem with that approach is that it changes the
firmware behaviour quite a lot. In particular it prevents Linux from
taking over pci hotplug control:
acpi PNP0A08:00: _OSC: OS supports [ExtendedConfig ASPM ClockPM Segments MSI]
\_SB_.PCI0:_OSC invalid UUID
_OSC request data:1 1f 0
acpi PNP0A08:00: _OSC failed (AE_ERROR); disabling ASPM
Booting with just acpi_osi=Darwin (or without acpi_osi) gives:
acpi PNP0A08:00: _OSC: OS supports [ExtendedConfig ASPM ClockPM Segments MSI]
acpi PNP0A08:00: _OSC: platform does not support [PME]
acpi PNP0A08:00: _OSC: OS now controls [PCIeHotplug AER PCIeCapability]
The end result is that that pci hotplog events are not handled. If I
unplug the Thunderbolt Ethernet adapter the device is not removed from
lspci.
I would prefer to find a solution that boots without acpi_osi=Darwin
as seems to trigger quite a lot of ACPI code. My current approach is
to inject a custom OSDW method somewhere into the NHI namespace and to
replace _PTS and _WAK from my driver. I can then wake the controller
with the XRPE method. The last problem is that the PCI code does not
allocate enough (or any) bus numbers below the hotplug ports. I'm
trying to add some quirks to it but the code is not really made for
that...
Andreas
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed)
2014-03-04 23:59 ` Andreas Noever
@ 2014-03-05 0:26 ` Matthew Garrett
2014-03-08 2:40 ` Matthew Garrett
1 sibling, 0 replies; 23+ messages in thread
From: Matthew Garrett @ 2014-03-05 0:26 UTC (permalink / raw)
To: Andreas Noever; +Cc: linux-kernel@vger.kernel.org
On Wed, Mar 05, 2014 at 12:59:54AM +0100, Andreas Noever wrote:
>
> I belive that the patch has the same effect as passing
> acpi_osi=! acpi_osi=Darwin
> to the kernel. The problem with that approach is that it changes the
> firmware behaviour quite a lot. In particular it prevents Linux from
> taking over pci hotplug control:
There's not really any way around this. The method to power up the chip
will refuse to run unless the system claims Darwin and nothing else.
> I would prefer to find a solution that boots without acpi_osi=Darwin
> as seems to trigger quite a lot of ACPI code. My current approach is
> to inject a custom OSDW method somewhere into the NHI namespace and to
> replace _PTS and _WAK from my driver. I can then wake the controller
> with the XRPE method. The last problem is that the PCI code does not
> allocate enough (or any) bus numbers below the hotplug ports. I'm
> trying to add some quirks to it but the code is not really made for
> that...
I don't think that's a workable approach - any change in the firmware
implementation could break it. If we're going to insert quirks then I
think it makes more sense to do it in the PCI layer rather than
injecting things into ACPI.
--
Matthew Garrett | mjg59@srcf.ucam.org
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed)
2014-03-04 23:59 ` Andreas Noever
2014-03-05 0:26 ` Matthew Garrett
@ 2014-03-08 2:40 ` Matthew Garrett
2014-03-11 13:08 ` Andreas Noever
1 sibling, 1 reply; 23+ messages in thread
From: Matthew Garrett @ 2014-03-08 2:40 UTC (permalink / raw)
To: Andreas Noever; +Cc: linux-kernel@vger.kernel.org
Ok, can you try this one?
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 8856bd3..202b4da 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -62,6 +62,7 @@ ACPI_MODULE_NAME("utosi")
static struct acpi_interface_info acpi_default_supported_interfaces[] = {
/* Operating System Vendor Strings */
+ {"Darwin", NULL, 0, ACPI_OSI_DARWIN}, /* OS X */
{"Windows 2000", NULL, 0, ACPI_OSI_WIN_2000}, /* Windows 2000 */
{"Windows 2001", NULL, 0, ACPI_OSI_WIN_XP}, /* Windows XP */
{"Windows 2001 SP1", NULL, 0, ACPI_OSI_WIN_XP_SP1}, /* Windows XP SP1 */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index fc1aa79..5bf45c06 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -152,6 +152,16 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
osi_linux.dmi ? " via DMI" : "");
}
+ if (!strcmp("Darwin", interface)) {
+ /*
+ * Apple firmware will behave poorly if it receives positive
+ * answers to "Darwin" and any other OS. Respond positively
+ * to Darwin and then disable all other vendor strings.
+ */
+ acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
+ supported = ACPI_UINT32_MAX;
+ }
+
return supported;
}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index c1c4102..8d3178c 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -432,6 +432,17 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
acpi_handle handle = device->handle;
/*
+ * Apple always return failure on _OSC calls when _OSI("Darwin") has
+ * been called successfully. We know the feature set supported by the
+ * platform, so avoid calling _OSC at all
+ */
+
+ if (acpi_gbl_osi_data == ACPI_OSI_DARWIN) {
+ root->osc_control_set = ~OSC_PCI_EXPRESS_PME_CONTROL;
+ return;
+ }
+
+ /*
* All supported architectures that use ACPI have support for
* PCI domains, so we indicate this in _OSC support capabilities.
*/
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 68a3ada..4580c67 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -1210,17 +1210,18 @@ struct acpi_memory_list {
#define ACPI_ENABLE_ALL_FEATURE_STRINGS (ACPI_ENABLE_INTERFACES | ACPI_FEATURE_STRINGS)
#define ACPI_ENABLE_ALL_STRINGS (ACPI_ENABLE_INTERFACES | ACPI_VENDOR_STRINGS | ACPI_FEATURE_STRINGS)
-#define ACPI_OSI_WIN_2000 0x01
-#define ACPI_OSI_WIN_XP 0x02
-#define ACPI_OSI_WIN_XP_SP1 0x03
-#define ACPI_OSI_WINSRV_2003 0x04
-#define ACPI_OSI_WIN_XP_SP2 0x05
-#define ACPI_OSI_WINSRV_2003_SP1 0x06
-#define ACPI_OSI_WIN_VISTA 0x07
-#define ACPI_OSI_WINSRV_2008 0x08
-#define ACPI_OSI_WIN_VISTA_SP1 0x09
-#define ACPI_OSI_WIN_VISTA_SP2 0x0A
-#define ACPI_OSI_WIN_7 0x0B
-#define ACPI_OSI_WIN_8 0x0C
+#define ACPI_OSI_DARWIN 0x01
+#define ACPI_OSI_WIN_2000 0x02
+#define ACPI_OSI_WIN_XP 0x03
+#define ACPI_OSI_WIN_XP_SP1 0x04
+#define ACPI_OSI_WINSRV_2003 0x05
+#define ACPI_OSI_WIN_XP_SP2 0x06
+#define ACPI_OSI_WINSRV_2003_SP1 0x07
+#define ACPI_OSI_WIN_VISTA 0x08
+#define ACPI_OSI_WINSRV_2008 0x09
+#define ACPI_OSI_WIN_VISTA_SP1 0x0A
+#define ACPI_OSI_WIN_VISTA_SP2 0x0B
+#define ACPI_OSI_WIN_7 0x0C
+#define ACPI_OSI_WIN_8 0x0D
#endif /* __ACTYPES_H__ */
--
Matthew Garrett | mjg59@srcf.ucam.org
^ permalink raw reply related [flat|nested] 23+ messages in thread* Re: [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed)
2014-03-08 2:40 ` Matthew Garrett
@ 2014-03-11 13:08 ` Andreas Noever
2014-03-11 14:00 ` Matthew Garrett
0 siblings, 1 reply; 23+ messages in thread
From: Andreas Noever @ 2014-03-11 13:08 UTC (permalink / raw)
To: Matthew Garrett; +Cc: linux-kernel@vger.kernel.org
On Sat, Mar 8, 2014 at 3:40 AM, Matthew Garrett <mjg59@srcf.ucam.org> wrote:
> Ok, can you try this one?
>
That seems to do it. I was afraid that setting OSDW globally would
affect other parts (USB?) But that does not seem to be the case.
What is the reason for clearing the PME flag?
Andreas
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH 00/12] Thunderbolt hotplug support for Apple hardware (testers needed)
2014-03-11 13:08 ` Andreas Noever
@ 2014-03-11 14:00 ` Matthew Garrett
0 siblings, 0 replies; 23+ messages in thread
From: Matthew Garrett @ 2014-03-11 14:00 UTC (permalink / raw)
To: Andreas Noever; +Cc: linux-kernel@vger.kernel.org
[-- Attachment #1: Type: text/plain, Size: 729 bytes --]
On Tue, Mar 11, 2014 at 02:08:55PM +0100, Andreas Noever wrote:
> That seems to do it. I was afraid that setting OSDW globally would
> affect other parts (USB?) But that does not seem to be the case.
I think it results in the USB ports coming up in XHCI mode by default,
but we can handle that. There's one other thing - the battery switches
from being a control method battery to being an SBS one, and Apple's
implementation seems to break our driver. I've got a trivial patch that
makes it work (attached).
> What is the reason for clearing the PME flag?
It looks like the PME flag gets cleared on the _OSC route in non-Darwin
mode? I'll go back and check that to make sure.
--
Matthew Garrett | mjg59@srcf.ucam.org
[-- Attachment #2: 0021-ACPI-Don-t-re-select-SBS-battery-if-it-s-already-sel.patch --]
[-- Type: text/plain, Size: 1808 bytes --]
>From 2a8ffff11f91aad146916ca14c768ebc407865aa Mon Sep 17 00:00:00 2001
From: Matthew Garrett <matthew.garrett@nebula.com>
Date: Mon, 10 Mar 2014 14:44:55 -0400
Subject: [PATCH 21/28] ACPI: Don't re-select SBS battery if it's already
selected
The existing SBS code explicitly sets the selected battery in the SBS
manager regardless of whether the battery in question is already selected.
This causes bus timeouts on Apple hardware. Check for this case and avoid
it.
Signed-off-by: Matthew Garrett <matthew.garrett@nebula.com>
---
drivers/acpi/sbs.c | 18 +++++++++++++-----
1 file changed, 13 insertions(+), 5 deletions(-)
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index dbd4849..c386505 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -470,17 +470,25 @@ static struct device_attribute alarm_attr = {
static int acpi_battery_read(struct acpi_battery *battery)
{
int result = 0, saved_present = battery->present;
- u16 state;
+ u16 state, selected, desired;
if (battery->sbs->manager_present) {
result = acpi_smbus_read(battery->sbs->hc, SMBUS_READ_WORD,
ACPI_SBS_MANAGER, 0x01, (u8 *)&state);
if (!result)
battery->present = state & (1 << battery->id);
- state &= 0x0fff;
- state |= 1 << (battery->id + 12);
- acpi_smbus_write(battery->sbs->hc, SMBUS_WRITE_WORD,
- ACPI_SBS_MANAGER, 0x01, (u8 *)&state, 2);
+ /*
+ * Don't switch battery if the correct one is already selected
+ */
+ selected = state & 0xf000;
+ desired = 1 << (battery->id + 12);
+ if (selected != desired) {
+ state &= 0x0fff;
+ state |= desired;
+ acpi_smbus_write(battery->sbs->hc, SMBUS_WRITE_WORD,
+ ACPI_SBS_MANAGER, 0x01,
+ (u8 *)&state, 2);
+ }
} else if (battery->id == 0)
battery->present = 1;
if (result || !battery->present)
--
1.8.5.3
^ permalink raw reply related [flat|nested] 23+ messages in thread