* [PATCH] cleanup swiotlb.c a bit
@ 2005-01-06 17:45 Jesse Barnes
2005-01-06 19:38 ` David Mosberger
` (12 more replies)
0 siblings, 13 replies; 14+ messages in thread
From: Jesse Barnes @ 2005-01-06 17:45 UTC (permalink / raw)
To: linux-ia64
[-- Attachment #1: Type: text/plain, Size: 427 bytes --]
This patch mostly cleans up trailing whitespace and long lines, but also
changes a few printks and gets rid of some unnecessary {} blocks. Does it
look ok to you David? I was thinking it might be nice to abstract it
slightly more to make the swiotlb functions callable from a platform's
regular PCI mapping routines as needed, since swiotlb assumes that physical
addresses and bus addresses are the same.
Thanks,
Jesse
[-- Attachment #2: swiotlb-cleanup.patch --]
[-- Type: text/plain, Size: 22690 bytes --]
===== arch/ia64/lib/swiotlb.c 1.20 vs edited =====
--- 1.20/arch/ia64/lib/swiotlb.c 2004-10-19 23:41:20 -07:00
+++ edited/arch/ia64/lib/swiotlb.c 2005-01-05 13:59:06 -08:00
@@ -44,41 +44,44 @@
#define IO_TLB_SEGSIZE 128
/*
- * log of the size of each IO TLB slab. The number of slabs is command line controllable.
+ * log of the size of each IO TLB slab. The number of slabs is command line
+ * controllable.
*/
#define IO_TLB_SHIFT 11
int swiotlb_force;
/*
- * Used to do a quick range check in swiotlb_unmap_single and swiotlb_sync_single_*, to see
- * if the memory was in fact allocated by this API.
+ * Used to do a quick range check in swiotlb_unmap_single and
+ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
+ * API.
*/
static char *io_tlb_start, *io_tlb_end;
/*
- * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and io_tlb_end.
- * This is command line adjustable via setup_io_tlb_npages.
+ * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
+ * io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
* Default to 64MB.
*/
static unsigned long io_tlb_nslabs = 32768;
-/*
+/*
* When the IOMMU overflows we return a fallback buffer. This sets the size.
*/
static unsigned long io_tlb_overflow = 32*1024;
-void *io_tlb_overflow_buffer;
+void *io_tlb_overflow_buffer;
/*
- * This is a free list describing the number of free entries available from each index
+ * This is a free list describing the number of free entries available from
+ * each index
*/
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;
/*
- * We need to save away the original address corresponding to a mapped entry for the sync
- * operations.
+ * We need to save away the original address corresponding to a mapped entry
+ * for the sync operations.
*/
static unsigned char **io_tlb_orig_addr;
@@ -88,10 +91,11 @@
static spinlock_t io_tlb_lock = SPIN_LOCK_UNLOCKED;
static int __init
-setup_io_tlb_npages (char *str)
+setup_io_tlb_npages(char *str)
{
- if (isdigit(*str)) {
- io_tlb_nslabs = simple_strtoul(str, &str, 0) << (PAGE_SHIFT - IO_TLB_SHIFT);
+ if (isdigit(*str)) {
+ io_tlb_nslabs = simple_strtoul(str, &str, 0) <<
+ (PAGE_SHIFT - IO_TLB_SHIFT);
/* avoid tail segment of size < IO_TLB_SEGSIZE */
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
@@ -105,18 +109,19 @@
/* make io_tlb_overflow tunable too? */
/*
- * Statically reserve bounce buffer space and initialize bounce buffer data structures for
- * the software IO TLB used to implement the PCI DMA API.
+ * Statically reserve bounce buffer space and initialize bounce buffer data
+ * structures for the software IO TLB used to implement the PCI DMA API.
*/
void
-swiotlb_init (void)
+swiotlb_init(void)
{
unsigned long i;
/*
* Get IO TLB memory from the low pages
*/
- io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
+ io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs *
+ (1 << IO_TLB_SHIFT));
if (!io_tlb_start)
panic("Cannot allocate SWIOTLB buffer");
io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
@@ -131,28 +136,30 @@
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_index = 0;
io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
-
- /*
- * Get the overflow emergency buffer
+
+ /*
+ * Get the overflow emergency buffer
*/
- io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
+ io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
}
-static inline int address_needs_mapping(struct device *hwdev, dma_addr_t addr)
-{
- dma_addr_t mask = 0xffffffff;
- if (hwdev && hwdev->dma_mask)
- mask = *hwdev->dma_mask;
- return (addr & ~mask) != 0;
-}
+static inline int
+address_needs_mapping(struct device *hwdev, dma_addr_t addr)
+{
+ dma_addr_t mask = 0xffffffff;
+ /* If the device has a mask, use it, otherwise default to 32 bits */
+ if (hwdev && hwdev->dma_mask)
+ mask = *hwdev->dma_mask;
+ return (addr & ~mask) != 0;
+}
/*
* Allocates bounce buffer and returns its kernel virtual address.
*/
static void *
-map_single (struct device *hwdev, char *buffer, size_t size, int dir)
+map_single(struct device *hwdev, char *buffer, size_t size, int dir)
{
unsigned long flags;
char *dma_addr;
@@ -160,11 +167,11 @@
int i;
/*
- * For mappings greater than a page size, we limit the stride (and hence alignment)
- * to a page size.
+ * For mappings greater than a page, we limit the stride (and
+ * hence alignment) to a page size.
*/
nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
- if (size > (1 << PAGE_SHIFT))
+ if (size > PAGE_SIZE)
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
else
stride = 1;
@@ -173,54 +180,55 @@
BUG();
/*
- * Find suitable number of IO TLB entries size that will fit this request and
- * allocate a buffer from that IO TLB pool.
+ * Find suitable number of IO TLB entries size that will fit this
+ * request and allocate a buffer from that IO TLB pool.
*/
spin_lock_irqsave(&io_tlb_lock, flags);
- {
- wrap = index = ALIGN(io_tlb_index, stride);
+ wrap = index = ALIGN(io_tlb_index, stride);
- if (index >= io_tlb_nslabs)
- wrap = index = 0;
+ if (index >= io_tlb_nslabs)
+ wrap = index = 0;
+
+ do {
+ /*
+ * If we find a slot that indicates we have 'nslots'
+ * number of contiguous buffers, we allocate the
+ * buffers from that slot and mark the entries as '0'
+ * indicating unavailable.
+ */
+ if (io_tlb_list[index] >= nslots) {
+ int count = 0;
+
+ for (i = index; i < (int) (index + nslots); i++)
+ io_tlb_list[i] = 0;
+ for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
+ io_tlb_list[i] = ++count;
+ dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
- do {
/*
- * If we find a slot that indicates we have 'nslots' number of
- * contiguous buffers, we allocate the buffers from that slot and
- * mark the entries as '0' indicating unavailable.
+ * Update the indices to avoid searching in the next
+ * round.
*/
- if (io_tlb_list[index] >= nslots) {
- int count = 0;
+ io_tlb_index = ((index + nslots) < io_tlb_nslabs
+ ? (index + nslots) : 0);
- for (i = index; i < (int) (index + nslots); i++)
- io_tlb_list[i] = 0;
- for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1)
- && io_tlb_list[i]; i--)
- io_tlb_list[i] = ++count;
- dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
-
- /*
- * Update the indices to avoid searching in the next round.
- */
- io_tlb_index = ((index + nslots) < io_tlb_nslabs
- ? (index + nslots) : 0);
+ goto found;
+ }
+ index += stride;
+ if (index >= io_tlb_nslabs)
+ index = 0;
+ } while (index != wrap);
- goto found;
- }
- index += stride;
- if (index >= io_tlb_nslabs)
- index = 0;
- } while (index != wrap);
+ spin_unlock_irqrestore(&io_tlb_lock, flags);
+ return NULL;
- spin_unlock_irqrestore(&io_tlb_lock, flags);
- return NULL;
- }
found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
/*
- * Save away the mapping from the original address to the DMA address. This is
- * needed when we sync the memory. Then we sync the buffer if needed.
+ * Save away the mapping from the original address to the DMA address.
+ * This is needed when we sync the memory. Then we sync the buffer if
+ * needed.
*/
io_tlb_orig_addr[index] = buffer;
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
@@ -233,10 +241,10 @@
* dma_addr is the kernel virtual address of the bounce buffer to unmap.
*/
static void
-unmap_single (struct device *hwdev, char *dma_addr, size_t size, int dir)
+unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
{
unsigned long flags;
- int i, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+ int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
char *buffer = io_tlb_orig_addr[index];
@@ -245,40 +253,37 @@
*/
if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
/*
- * bounce... copy the data back into the original buffer * and delete the
- * bounce buffer.
+ * bounce... copy the data back into the original buffer * and
+ * delete the bounce buffer.
*/
memcpy(buffer, dma_addr, size);
/*
- * Return the buffer to the free list by setting the corresponding entries to
- * indicate the number of contigous entries available. While returning the
- * entries to the free list, we merge the entries with slots below and above the
- * pool being returned.
+ * Return the buffer to the free list by setting the corresponding
+ * entries to indicate the number of contigous entries available.
+ * While returning the entries to the free list, we merge the entries
+ * with slots below and above the pool being returned.
*/
spin_lock_irqsave(&io_tlb_lock, flags);
- {
- int count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
- io_tlb_list[index + nslots] : 0);
- /*
- * Step 1: return the slots to the free list, merging the slots with
- * superceeding slots
- */
- for (i = index + nslots - 1; i >= index; i--)
- io_tlb_list[i] = ++count;
- /*
- * Step 2: merge the returned slots with the preceding slots, if
- * available (non zero)
- */
- for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) &&
- io_tlb_list[i]; i--)
- io_tlb_list[i] = ++count;
- }
+ count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
+ io_tlb_list[index + nslots] : 0);
+ /*
+ * Step 1: return the slots to the free list, merging the
+ * slots with superceeding slots
+ */
+ for (i = index + nslots - 1; i >= index; i--)
+ io_tlb_list[i] = ++count;
+ /*
+ * Step 2: merge the returned slots with the preceding slots,
+ * if available (non zero)
+ */
+ for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
+ io_tlb_list[i] = ++count;
spin_unlock_irqrestore(&io_tlb_lock, flags);
}
static void
-sync_single (struct device *hwdev, char *dma_addr, size_t size, int dir)
+sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
{
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
char *buffer = io_tlb_orig_addr[index];
@@ -296,17 +301,22 @@
}
void *
-swiotlb_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handle, int flags)
+swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, int flags)
{
unsigned long dev_addr;
void *ret;
- /* XXX fix me: the DMA API should pass us an explicit DMA mask instead: */
+ /*
+ * XXX fix me: the DMA API should pass us an explicit DMA mask
+ * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
+ * bit range instead of a 16MB one).
+ */
flags |= GFP_DMA;
ret = (void *)__get_free_pages(flags, get_order(size));
if (!ret) {
- /* DMA_FROM_DEVICE is to avoid the memcpy in map_single */
+ /* DMA_FROM_DEVICE is to avoid the memcpy in map_single */
dma_addr_t handle;
handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
if (dma_mapping_error(handle))
@@ -317,14 +327,21 @@
memset(ret, 0, size);
dev_addr = virt_to_phys(ret);
- if (address_needs_mapping(hwdev,dev_addr))
- panic("swiotlb_alloc_consistent: allocated memory is out of range for device");
+
+ /* Confirm address can be DMA'd by device */
+ if (address_needs_mapping(hwdev, dev_addr)) {
+ printk("hwdev DMA mask = 0x%016lx, dev_addr = 0x%016lx\n",
+ *hwdev->dma_mask, dev_addr);
+ panic("swiotlb_alloc_coherent: allocated memory is out of "
+ "range for device");
+ }
*dma_handle = dev_addr;
return ret;
}
void
-swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
+swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
{
if (!(vaddr >= (void *)io_tlb_start
&& vaddr < (void *)io_tlb_end))
@@ -334,66 +351,63 @@
swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
}
-static void swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
+static void
+swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
{
- /*
+ /*
* Ran out of IOMMU space for this operation. This is very bad.
* Unfortunately the drivers cannot handle this operation properly.
* unless they check for pci_dma_mapping_error (most don't)
* When the mapping is small enough return a static buffer to limit
- * the damage, or panic when the transfer is too big.
- */
-
- printk(KERN_ERR
- "PCI-DMA: Out of SW-IOMMU space for %lu bytes at device %s\n",
- size, dev ? dev->bus_id : "?");
+ * the damage, or panic when the transfer is too big.
+ */
+ printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
+ "device %s\n", size, dev ? dev->bus_id : "?");
if (size > io_tlb_overflow && do_panic) {
if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
panic("PCI-DMA: Memory would be corrupted\n");
- if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
- panic("PCI-DMA: Random memory would be DMAed\n");
- }
-}
+ if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
+ panic("PCI-DMA: Random memory would be DMAed\n");
+ }
+}
/*
- * Map a single buffer of the indicated size for DMA in streaming mode. The PCI address
- * to use is returned.
+ * Map a single buffer of the indicated size for DMA in streaming mode. The
+ * PCI address to use is returned.
*
- * Once the device is given the dma address, the device owns this memory until either
- * swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
+ * Once the device is given the dma address, the device owns this memory until
+ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
*/
dma_addr_t
-swiotlb_map_single (struct device *hwdev, void *ptr, size_t size, int dir)
+swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
{
unsigned long dev_addr = virt_to_phys(ptr);
- void *map;
+ void *map;
if (dir == DMA_NONE)
BUG();
/*
- * Check if the PCI device can DMA to ptr... if so, just return ptr
+ * If the pointer passed in happens to be in the device's DMA window,
+ * we can safely return the device addr and not worry about bounce
+ * buffering it.
*/
- if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
- /*
- * Device is bit capable of DMA'ing to the buffer... just return the PCI
- * address of ptr
- */
+ if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
return dev_addr;
/*
- * get a bounce buffer:
+ * Oh well, have to allocate and map a bounce buffer.
*/
map = map_single(hwdev, ptr, size, dir);
- if (!map) {
- swiotlb_full(hwdev, size, dir, 1);
- map = io_tlb_overflow_buffer;
+ if (!map) {
+ swiotlb_full(hwdev, size, dir, 1);
+ map = io_tlb_overflow_buffer;
}
dev_addr = virt_to_phys(map);
/*
- * Ensure that the address returned is DMA'ble:
+ * Ensure that the address returned is DMA'ble
*/
if (address_needs_mapping(hwdev, dev_addr))
panic("map_single: bounce buffer is not DMA'ble");
@@ -407,7 +421,7 @@
* flush them when they get mapped into an executable vm-area.
*/
static void
-mark_clean (void *addr, size_t size)
+mark_clean(void *addr, size_t size)
{
unsigned long pg_addr, end;
@@ -421,15 +435,16 @@
}
/*
- * Unmap a single streaming mode DMA translation. The dma_addr and size must match what
- * was provided for in a previous swiotlb_map_single call. All other usages are
- * undefined.
+ * Unmap a single streaming mode DMA translation. The dma_addr and size must
+ * match what was provided for in a previous swiotlb_map_single call. All
+ * other usages are undefined.
*
- * After this call, reads by the cpu to the buffer are guaranteed to see whatever the
- * device wrote there.
+ * After this call, reads by the cpu to the buffer are guaranteed to see
+ * whatever the device wrote there.
*/
void
-swiotlb_unmap_single (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir)
+swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
+ int dir)
{
char *dma_addr = phys_to_virt(dev_addr);
@@ -442,16 +457,18 @@
}
/*
- * Make physical memory consistent for a single streaming mode DMA translation after a
- * transfer.
+ * Make physical memory consistent for a single streaming mode DMA translation
+ * after a transfer.
*
- * If you perform a swiotlb_map_single() but wish to interrogate the buffer using the cpu,
- * yet do not wish to teardown the PCI dma mapping, you must call this function before
- * doing so. At the next point you give the PCI dma address back to the card, you must
- * first perform a swiotlb_dma_sync_for_device, and then the device again owns the buffer
+ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
+ * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
+ * call this function before doing so. At the next point you give the PCI dma
+ * address back to the card, you must first perform a
+ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
*/
void
-swiotlb_sync_single_for_cpu (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir)
+swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, int dir)
{
char *dma_addr = phys_to_virt(dev_addr);
@@ -464,7 +481,8 @@
}
void
-swiotlb_sync_single_for_device (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir)
+swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, int dir)
{
char *dma_addr = phys_to_virt(dev_addr);
@@ -477,10 +495,11 @@
}
/*
- * Map a set of buffers described by scatterlist in streaming mode for DMA. This is the
- * scatter-gather version of the above swiotlb_map_single interface. Here the scatter
- * gather list elements are each tagged with the appropriate dma address and length. They
- * are obtained via sg_dma_{address,length}(SG).
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * This is the scatter-gather version of the above swiotlb_map_single
+ * interface. Here the scatter gather list elements are each tagged with the
+ * appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}(SG).
*
* NOTE: An implementation may be able to use a smaller number of
* DMA address/length pairs than there are SG table elements.
@@ -488,10 +507,12 @@
* The routine returns the number of addr/length pairs actually
* used, at most nents.
*
- * Device ownership issues as mentioned above for swiotlb_map_single are the same here.
+ * Device ownership issues as mentioned above for swiotlb_map_single are the
+ * same here.
*/
int
-swiotlb_map_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
+swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
+ int dir)
{
void *addr;
unsigned long dev_addr;
@@ -506,9 +527,9 @@
if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
sg->dma_address = (dma_addr_t) virt_to_phys(map_single(hwdev, addr, sg->length, dir));
if (!sg->dma_address) {
- /* Don't panic here, we expect pci_map_sg users
+ /* Don't panic here, we expect map_sg users
to do proper error handling. */
- swiotlb_full(hwdev, sg->length, dir, 0);
+ swiotlb_full(hwdev, sg->length, dir, 0);
swiotlb_unmap_sg(hwdev, sg - i, i, dir);
sg[0].dma_length = 0;
return 0;
@@ -521,11 +542,12 @@
}
/*
- * Unmap a set of streaming mode DMA translations. Again, cpu read rules concerning calls
- * here are the same as for swiotlb_unmap_single() above.
+ * Unmap a set of streaming mode DMA translations. Again, cpu read rules
+ * concerning calls here are the same as for swiotlb_unmap_single() above.
*/
void
-swiotlb_unmap_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
+swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
+ int dir)
{
int i;
@@ -540,14 +562,15 @@
}
/*
- * Make physical memory consistent for a set of streaming mode DMA translations after a
- * transfer.
+ * Make physical memory consistent for a set of streaming mode DMA translations
+ * after a transfer.
*
- * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules and
- * usage.
+ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
+ * and usage.
*/
void
-swiotlb_sync_sg_for_cpu (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
+swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
+ int nelems, int dir)
{
int i;
@@ -556,11 +579,13 @@
for (i = 0; i < nelems; i++, sg++)
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
- sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, dir);
+ sync_single(hwdev, (void *) sg->dma_address,
+ sg->dma_length, dir);
}
void
-swiotlb_sync_sg_for_device (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
+swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
+ int nelems, int dir)
{
int i;
@@ -569,19 +594,21 @@
for (i = 0; i < nelems; i++, sg++)
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
- sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, dir);
+ sync_single(hwdev, (void *) sg->dma_address,
+ sg->dma_length, dir);
}
int
-swiotlb_dma_mapping_error (dma_addr_t dma_addr)
+swiotlb_dma_mapping_error(dma_addr_t dma_addr)
{
return (dma_addr == virt_to_phys(io_tlb_overflow_buffer));
}
/*
- * Return whether the given PCI device DMA address mask can be supported properly. For
- * example, if your device can only drive the low 24-bits during PCI bus mastering, then
- * you would pass 0x00ffffff as the mask to this function.
+ * Return whether the given PCI device DMA address mask can be supported
+ * properly. For example, if your device can only drive the low 24-bits
+ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
+ * this function.
*/
int
swiotlb_dma_supported (struct device *hwdev, u64 mask)
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH] cleanup swiotlb.c a bit
2005-01-06 17:45 [PATCH] cleanup swiotlb.c a bit Jesse Barnes
@ 2005-01-06 19:38 ` David Mosberger
2005-01-06 19:42 ` Jesse Barnes
` (11 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: David Mosberger @ 2005-01-06 19:38 UTC (permalink / raw)
To: linux-ia64
>>>>> On Thu, 6 Jan 2005 09:45:12 -0800, Jesse Barnes <jbarnes@engr.sgi.com> said:
Jesse> This patch mostly cleans up trailing whitespace
That's OK, and really needed. I was tempted to do that with my last
swiotlb fix, but didn't want to mix formatting cleanups with real
fixes.
Jesse> and long lines
Well, I don't like how you "fixed" some of them and I continue to be
of the opinion that 100 cols is OK (yes, I know that somebody managed
to get the 80 cols into the kernel formatting document, but that
doesn't change reality...).
For example, something like this:
+ io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs *
+ (1 << IO_TLB_SHIFT));
I find more readable if it's formatted as:
io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs
* (1 << IO_TLB_SHIFT));
Jesse> gets rid of some unnecessary {} blocks.
The blocks where there to indicate locking. I find that useful, even
if Andrew (and perhaps others) disagree.
Jesse> Does it look ok to you David? I was thinking it might be
Jesse> nice to abstract it slightly more to make the swiotlb
Jesse> functions callable from a platform's regular PCI mapping
Jesse> routines as needed, since swiotlb assumes that physical
Jesse> addresses and bus addresses are the same.
Well, it probably should move outside of the ia64 tree anyhow. The
way x86_64 includes swiotlb.c at the moment is just absolutely gross.
--david
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH] cleanup swiotlb.c a bit
2005-01-06 17:45 [PATCH] cleanup swiotlb.c a bit Jesse Barnes
2005-01-06 19:38 ` David Mosberger
@ 2005-01-06 19:42 ` Jesse Barnes
2005-01-06 19:47 ` Matthew Wilcox
` (10 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Jesse Barnes @ 2005-01-06 19:42 UTC (permalink / raw)
To: linux-ia64
On Thursday, January 6, 2005 11:38 am, David Mosberger wrote:
> >>>>> On Thu, 6 Jan 2005 09:45:12 -0800, Jesse Barnes
> >>>>> <jbarnes@engr.sgi.com> said:
>
> Jesse> This patch mostly cleans up trailing whitespace
>
> That's OK, and really needed. I was tempted to do that with my last
> swiotlb fix, but didn't want to mix formatting cleanups with real
> fixes.
Usually a good idea, I figured that a cleanup patch should stand by itself.
> Jesse> and long lines
>
> Well, I don't like how you "fixed" some of them and I continue to be
> of the opinion that 100 cols is OK (yes, I know that somebody managed
> to get the 80 cols into the kernel formatting document, but that
> doesn't change reality...).
Yeah, I know, but your opinion is wrong :) I often find myself editing files
on VTs or other 80 col terminals, and long lines are a pain...
> For example, something like this:
>
> + io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs *
> + (1 << IO_TLB_SHIFT));
>
> I find more readable if it's formatted as:
>
> io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs
> * (1 << IO_TLB_SHIFT));
I can change that, though I think the former is more common.
> Jesse> gets rid of some unnecessary {} blocks.
>
> The blocks where there to indicate locking. I find that useful, even
> if Andrew (and perhaps others) disagree.
I thought that might be controversial, I can change it back.
> Jesse> Does it look ok to you David? I was thinking it might be
> Jesse> nice to abstract it slightly more to make the swiotlb
> Jesse> functions callable from a platform's regular PCI mapping
> Jesse> routines as needed, since swiotlb assumes that physical
> Jesse> addresses and bus addresses are the same.
>
> Well, it probably should move outside of the ia64 tree anyhow. The
> way x86_64 includes swiotlb.c at the moment is just absolutely gross.
That was going to be step two. include/linux/swiotlb.h and lib/swiotlb.c seem
like the right way to go in the long run, along with a few abstractions.
Thanks,
Jesse
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH] cleanup swiotlb.c a bit
2005-01-06 17:45 [PATCH] cleanup swiotlb.c a bit Jesse Barnes
2005-01-06 19:38 ` David Mosberger
2005-01-06 19:42 ` Jesse Barnes
@ 2005-01-06 19:47 ` Matthew Wilcox
2005-01-06 19:48 ` David Mosberger
` (9 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Matthew Wilcox @ 2005-01-06 19:47 UTC (permalink / raw)
To: linux-ia64
On Thu, Jan 06, 2005 at 11:38:07AM -0800, David Mosberger wrote:
> For example, something like this:
>
> + io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs *
> + (1 << IO_TLB_SHIFT));
>
> I find more readable if it's formatted as:
>
> io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs
> * (1 << IO_TLB_SHIFT));
I find the former more readable. It lets you know there's something
coming on the next line.
> Well, it probably should move outside of the ia64 tree anyhow. The
> way x86_64 includes swiotlb.c at the moment is just absolutely gross.
kernel/swiotlb.c would make most sense, I guess?
--
"Next the statesmen will invent cheap lies, putting the blame upon
the nation that is attacked, and every man will be glad of those
conscience-soothing falsities, and will diligently study them, and refuse
to examine any refutations of them; and thus he will by and by convince
himself that the war is just, and will thank God for the better sleep
he enjoys after this process of grotesque self-deception." -- Mark Twain
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH] cleanup swiotlb.c a bit
2005-01-06 17:45 [PATCH] cleanup swiotlb.c a bit Jesse Barnes
` (2 preceding siblings ...)
2005-01-06 19:47 ` Matthew Wilcox
@ 2005-01-06 19:48 ` David Mosberger
2005-01-06 19:50 ` Jesse Barnes
` (8 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: David Mosberger @ 2005-01-06 19:48 UTC (permalink / raw)
To: linux-ia64
>>>>> On Thu, 6 Jan 2005 11:42:52 -0800, Jesse Barnes <jbarnes@engr.sgi.com> said:
>> Well, I don't like how you "fixed" some of them and I continue
>> to be of the opinion that 100 cols is OK (yes, I know that
>> somebody managed to get the 80 cols into the kernel formatting
>> document, but that doesn't change reality...).
Jesse> Yeah, I know, but your opinion is wrong :) I often find
Jesse> myself editing files on VTs or other 80 col terminals, and
Jesse> long lines are a pain...
It's not opinion, it's reality. Unless someone goes through all the
files in the top-level directories and formats them to 80 cols, this
won't change.
--david
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH] cleanup swiotlb.c a bit
2005-01-06 17:45 [PATCH] cleanup swiotlb.c a bit Jesse Barnes
` (3 preceding siblings ...)
2005-01-06 19:48 ` David Mosberger
@ 2005-01-06 19:50 ` Jesse Barnes
2005-01-06 20:00 ` David Mosberger
` (7 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Jesse Barnes @ 2005-01-06 19:50 UTC (permalink / raw)
To: linux-ia64
On Thursday, January 6, 2005 11:48 am, David Mosberger wrote:
> >>>>> On Thu, 6 Jan 2005 11:42:52 -0800, Jesse Barnes
> >>>>> <jbarnes@engr.sgi.com> said:
> >>
> >> Well, I don't like how you "fixed" some of them and I continue
> >> to be of the opinion that 100 cols is OK (yes, I know that
> >> somebody managed to get the 80 cols into the kernel formatting
> >> document, but that doesn't change reality...).
>
> Jesse> Yeah, I know, but your opinion is wrong :) I often find
> Jesse> myself editing files on VTs or other 80 col terminals, and
> Jesse> long lines are a pain...
>
> It's not opinion, it's reality. Unless someone goes through all the
> files in the top-level directories and formats them to 80 cols, this
> won't change.
I'd be happy to do that if you're ok with it. The long lines have always
irritated me a little. I think I understand why you like them though--with
long lines you can fit slightly more code on the screen vertically, which can
be a help, but I don't think they're common enough for that to be very
compelling...
Jesse
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH] cleanup swiotlb.c a bit
2005-01-06 17:45 [PATCH] cleanup swiotlb.c a bit Jesse Barnes
` (4 preceding siblings ...)
2005-01-06 19:50 ` Jesse Barnes
@ 2005-01-06 20:00 ` David Mosberger
2005-01-06 20:00 ` David Mosberger
` (6 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: David Mosberger @ 2005-01-06 20:00 UTC (permalink / raw)
To: linux-ia64
>>>>> On Thu, 6 Jan 2005 11:50:56 -0800, Jesse Barnes <jbarnes@engr.sgi.com> said:
>> It's not opinion, it's reality. Unless someone goes through all
>> the files in the top-level directories and formats them to 80
>> cols, this won't change.
Jesse> I'd be happy to do that if you're ok with it.
Eh, I'm talking about linux/kernel/* etc, so it's really Andrew and
Linus who'd have to be OK with it.
If you can fix those, I'd be happy to adjust the ia64 files to fit
within 80 cols.
Jesse> The long lines have always irritated me a little. I think I
Jesse> understand why you like them though--with long lines you can
Jesse> fit slightly more code on the screen vertically, which can be
Jesse> a help, but I don't think they're common enough for that to
Jesse> be very compelling...
I don't particularly like 100 cols. I'd be happy if all (major) Linux
kernel files fit into 80 cols, it's just that in reality, you need 100
cols to look at most important kernel files.
--david
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH] cleanup swiotlb.c a bit
2005-01-06 17:45 [PATCH] cleanup swiotlb.c a bit Jesse Barnes
` (5 preceding siblings ...)
2005-01-06 20:00 ` David Mosberger
@ 2005-01-06 20:00 ` David Mosberger
2005-01-06 20:01 ` Matthew Wilcox
` (5 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: David Mosberger @ 2005-01-06 20:00 UTC (permalink / raw)
To: linux-ia64
>>>>> On Thu, 6 Jan 2005 19:47:33 +0000, Matthew Wilcox <matthew@wil.cx> said:
>> Well, it probably should move outside of the ia64 tree anyhow.
>> The way x86_64 includes swiotlb.c at the moment is just
>> absolutely gross.
Matthew> kernel/swiotlb.c would make most sense, I guess?
lib/swiotlb.c, actually.
--david
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH] cleanup swiotlb.c a bit
2005-01-06 17:45 [PATCH] cleanup swiotlb.c a bit Jesse Barnes
` (6 preceding siblings ...)
2005-01-06 20:00 ` David Mosberger
@ 2005-01-06 20:01 ` Matthew Wilcox
2005-01-06 20:06 ` Andi Kleen
` (4 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Matthew Wilcox @ 2005-01-06 20:01 UTC (permalink / raw)
To: linux-ia64
On Thu, Jan 06, 2005 at 11:48:19AM -0800, David Mosberger wrote:
> >>>>> On Thu, 6 Jan 2005 11:42:52 -0800, Jesse Barnes <jbarnes@engr.sgi.com> said:
>
> >> Well, I don't like how you "fixed" some of them and I continue
> >> to be of the opinion that 100 cols is OK (yes, I know that
> >> somebody managed to get the 80 cols into the kernel formatting
> >> document, but that doesn't change reality...).
>
> Jesse> Yeah, I know, but your opinion is wrong :) I often find
> Jesse> myself editing files on VTs or other 80 col terminals, and
> Jesse> long lines are a pain...
>
> It's not opinion, it's reality. Unless someone goes through all the
> files in the top-level directories and formats them to 80 cols, this
> won't change.
Personally, I don't mind it too much when code wraps a little bit.
But the comments wrapping after 80 columns *really* bugs me. Probably
because code has to read slower than prose anyway.
--
"Next the statesmen will invent cheap lies, putting the blame upon
the nation that is attacked, and every man will be glad of those
conscience-soothing falsities, and will diligently study them, and refuse
to examine any refutations of them; and thus he will by and by convince
himself that the war is just, and will thank God for the better sleep
he enjoys after this process of grotesque self-deception." -- Mark Twain
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH] cleanup swiotlb.c a bit
2005-01-06 17:45 [PATCH] cleanup swiotlb.c a bit Jesse Barnes
` (7 preceding siblings ...)
2005-01-06 20:01 ` Matthew Wilcox
@ 2005-01-06 20:06 ` Andi Kleen
2005-01-06 20:07 ` Jesse Barnes
` (3 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Andi Kleen @ 2005-01-06 20:06 UTC (permalink / raw)
To: linux-ia64
> > Well, it probably should move outside of the ia64 tree anyhow. The
> > way x86_64 includes swiotlb.c at the moment is just absolutely gross.
>
> kernel/swiotlb.c would make most sense, I guess?
No problem from my side, as long as whoever moves it to some other
place fixes up the x86-64 Makefile at the same time :)
-Andi
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH] cleanup swiotlb.c a bit
2005-01-06 17:45 [PATCH] cleanup swiotlb.c a bit Jesse Barnes
` (8 preceding siblings ...)
2005-01-06 20:06 ` Andi Kleen
@ 2005-01-06 20:07 ` Jesse Barnes
2005-01-06 20:10 ` Matthew Wilcox
` (2 subsequent siblings)
12 siblings, 0 replies; 14+ messages in thread
From: Jesse Barnes @ 2005-01-06 20:07 UTC (permalink / raw)
To: linux-ia64
On Thursday, January 6, 2005 12:00 pm, David Mosberger wrote:
> >>>>> On Thu, 6 Jan 2005 11:50:56 -0800, Jesse Barnes
> >>>>> <jbarnes@engr.sgi.com> said:
> >>
> >> It's not opinion, it's reality. Unless someone goes through all
> >> the files in the top-level directories and formats them to 80
> >> cols, this won't change.
>
> Jesse> I'd be happy to do that if you're ok with it.
>
> Eh, I'm talking about linux/kernel/* etc, so it's really Andrew and
> Linus who'd have to be OK with it.
>
> If you can fix those, I'd be happy to adjust the ia64 files to fit
> within 80 cols.
Really? In my experience the core files are generally pretty good (every now
and then a long line creeps in, but there aren't that many that I can see).
Any big offenders that really bug you?
> I don't particularly like 100 cols. I'd be happy if all (major) Linux
> kernel files fit into 80 cols, it's just that in reality, you need 100
> cols to look at most important kernel files.
Well if you don't like them either (that makes none that I know of), shouldn't
we at least fixup the ia64 bits, regardless of what happens with the core
kernel files?
Jesse
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH] cleanup swiotlb.c a bit
2005-01-06 17:45 [PATCH] cleanup swiotlb.c a bit Jesse Barnes
` (9 preceding siblings ...)
2005-01-06 20:07 ` Jesse Barnes
@ 2005-01-06 20:10 ` Matthew Wilcox
2005-01-06 20:11 ` Luck, Tony
2005-01-06 21:19 ` David Mosberger
12 siblings, 0 replies; 14+ messages in thread
From: Matthew Wilcox @ 2005-01-06 20:10 UTC (permalink / raw)
To: linux-ia64
On Thu, Jan 06, 2005 at 12:00:11PM -0800, David Mosberger wrote:
> >> It's not opinion, it's reality. Unless someone goes through all
> >> the files in the top-level directories and formats them to 80
> >> cols, this won't change.
>
> Jesse> I'd be happy to do that if you're ok with it.
>
> Eh, I'm talking about linux/kernel/* etc, so it's really Andrew and
> Linus who'd have to be OK with it.
>
> If you can fix those, I'd be happy to adjust the ia64 files to fit
> within 80 cols.
$ echo `for i in kernel/*.c; do expand $i |grep -c -E '^.{81,}$'; done`
0 0 0 3 7 5 0 0 0 0 15 0 12 0 4 1 6 0 0 0 0 0 3 1 2 0 4 13 1 4 0 11 19 11 2
0 0 22 0 9 4 12 4 2 1 1
$echo `for i in arch/ia64/kernel/*.c; do expand $i |grep -c -E '^.{81,}$'; done`
4 31 62 6 6 0 191 3 2 52 15 7 0 3 79 19 108 53 16 398 15 47 183 1 27 0 43 87
18 36 19 22 44 110 176 0
Very few exceptions to the 80-column rule in kernel/*.c. A ridiculous
number of exceptions in arch/ia64/kernel/*.c.
--
"Next the statesmen will invent cheap lies, putting the blame upon
the nation that is attacked, and every man will be glad of those
conscience-soothing falsities, and will diligently study them, and refuse
to examine any refutations of them; and thus he will by and by convince
himself that the war is just, and will thank God for the better sleep
he enjoys after this process of grotesque self-deception." -- Mark Twain
^ permalink raw reply [flat|nested] 14+ messages in thread* RE: [PATCH] cleanup swiotlb.c a bit
2005-01-06 17:45 [PATCH] cleanup swiotlb.c a bit Jesse Barnes
` (10 preceding siblings ...)
2005-01-06 20:10 ` Matthew Wilcox
@ 2005-01-06 20:11 ` Luck, Tony
2005-01-06 21:19 ` David Mosberger
12 siblings, 0 replies; 14+ messages in thread
From: Luck, Tony @ 2005-01-06 20:11 UTC (permalink / raw)
To: linux-ia64
>I find the former more readable. It lets you know there's something
>coming on the next line.
I too prefer to leave the binary operator trailing on the first line,
scripts/Lindent seems to do this too for the cases that I tried, though
it makes poor choices on which binary operator to leave dangling in a
long expression ... it just makes the first line as long as possible
without going over the 80 char limit ... which often isn't the best
place to split the line.
>> Well, it probably should move outside of the ia64 tree anyhow. The
>> way x86_64 includes swiotlb.c at the moment is just absolutely gross.
>
>kernel/swiotlb.c would make most sense, I guess?
That would be fine for me ... one less file for me to manage :-)
-Tony
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH] cleanup swiotlb.c a bit
2005-01-06 17:45 [PATCH] cleanup swiotlb.c a bit Jesse Barnes
` (11 preceding siblings ...)
2005-01-06 20:11 ` Luck, Tony
@ 2005-01-06 21:19 ` David Mosberger
12 siblings, 0 replies; 14+ messages in thread
From: David Mosberger @ 2005-01-06 21:19 UTC (permalink / raw)
To: linux-ia64
>>>>> On Thu, 6 Jan 2005 12:07:57 -0800, Jesse Barnes <jbarnes@engr.sgi.com> said:
Jesse> Really? In my experience the core files are generally pretty
Jesse> good (every now and then a long line creeps in, but there
Jesse> aren't that many that I can see). Any big offenders that
Jesse> really bug you?
It certainly has improved recent, but still there enough declarations
etc that are too wide for 80 cols.
Jesse> Well if you don't like them either (that makes none that I
Jesse> know of), shouldn't we at least fixup the ia64 bits,
Jesse> regardless of what happens with the core kernel files?
I'm not sure that's much of an improvement, but if you think
otherwise, go for it.
--david
^ permalink raw reply [flat|nested] 14+ messages in thread
end of thread, other threads:[~2005-01-06 21:19 UTC | newest]
Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2005-01-06 17:45 [PATCH] cleanup swiotlb.c a bit Jesse Barnes
2005-01-06 19:38 ` David Mosberger
2005-01-06 19:42 ` Jesse Barnes
2005-01-06 19:47 ` Matthew Wilcox
2005-01-06 19:48 ` David Mosberger
2005-01-06 19:50 ` Jesse Barnes
2005-01-06 20:00 ` David Mosberger
2005-01-06 20:00 ` David Mosberger
2005-01-06 20:01 ` Matthew Wilcox
2005-01-06 20:06 ` Andi Kleen
2005-01-06 20:07 ` Jesse Barnes
2005-01-06 20:10 ` Matthew Wilcox
2005-01-06 20:11 ` Luck, Tony
2005-01-06 21:19 ` David Mosberger
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox