aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-03-04 14:28:57 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-04 16:35:14 -0800
commitcf5401454863df8e6dc3ebe8faad09141cbec187 (patch)
treebe78c56b34ba8995348fdafa9b6e3985a9e24289
parent23d7e0390ab57cf15a5cfe8d6806192f0997e5a8 (diff)
downloadvexpress-lsk-cf5401454863df8e6dc3ebe8faad09141cbec187.tar.gz
alpha: make IOMMU respect the segment boundary limits
This patch makes the IOMMU code not allocate a memory area spanning LLD's segment boundary. is_span_boundary() judges whether a memory area spans LLD's segment boundary. If iommu_arena_find_pages() finds such a area, it tries to find the next available memory area. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/alpha/kernel/pci_iommu.c40
1 files changed, 34 insertions, 6 deletions
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index e54f829528c..54540c369c9 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -126,13 +126,34 @@ iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
return iommu_arena_new_node(0, hose, base, window_size, align);
}
+static inline int is_span_boundary(unsigned int index, unsigned int nr,
+ unsigned long shift,
+ unsigned long boundary_size)
+{
+ shift = (shift + index) & (boundary_size - 1);
+ return shift + nr > boundary_size;
+}
+
/* Must be called with the arena lock held */
static long
-iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)
+iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
+ long n, long mask)
{
unsigned long *ptes;
long i, p, nent;
int pass = 0;
+ unsigned long base;
+ unsigned long boundary_size;
+
+ BUG_ON(arena->dma_base & ~PAGE_MASK);
+ base = arena->dma_base >> PAGE_SHIFT;
+ if (dev)
+ boundary_size = ALIGN(dma_get_max_seg_size(dev) + 1, PAGE_SIZE)
+ >> PAGE_SHIFT;
+ else
+ boundary_size = ALIGN(1UL << 32, PAGE_SIZE) >> PAGE_SHIFT;
+
+ BUG_ON(!is_power_of_2(boundary_size));
/* Search forward for the first mask-aligned sequence of N free ptes */
ptes = arena->ptes;
@@ -142,6 +163,11 @@ iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)
again:
while (i < n && p+i < nent) {
+ if (!i && is_span_boundary(p, n, base, boundary_size)) {
+ p = ALIGN(p + 1, mask + 1);
+ goto again;
+ }
+
if (ptes[p+i])
p = ALIGN(p + i + 1, mask + 1), i = 0;
else
@@ -170,7 +196,8 @@ again:
}
static long
-iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align)
+iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
+ unsigned int align)
{
unsigned long flags;
unsigned long *ptes;
@@ -181,7 +208,7 @@ iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align)
/* Search for N empty ptes */
ptes = arena->ptes;
mask = max(align, arena->align_entry) - 1;
- p = iommu_arena_find_pages(arena, n, mask);
+ p = iommu_arena_find_pages(dev, arena, n, mask);
if (p < 0) {
spin_unlock_irqrestore(&arena->lock, flags);
return -1;
@@ -231,6 +258,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
unsigned long paddr;
dma_addr_t ret;
unsigned int align = 0;
+ struct device *dev = pdev ? &pdev->dev : NULL;
paddr = __pa(cpu_addr);
@@ -278,7 +306,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
/* Force allocation to 64KB boundary for ISA bridges. */
if (pdev && pdev == isa_bridge)
align = 8;
- dma_ofs = iommu_arena_alloc(arena, npages, align);
+ dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
if (dma_ofs < 0) {
printk(KERN_WARNING "pci_map_single failed: "
"could not allocate dma page tables\n");
@@ -565,7 +593,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
paddr &= ~PAGE_MASK;
npages = calc_npages(paddr + size);
- dma_ofs = iommu_arena_alloc(arena, npages, 0);
+ dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
if (dma_ofs < 0) {
/* If we attempted a direct map above but failed, die. */
if (leader->dma_address == 0)
@@ -832,7 +860,7 @@ iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
/* Search for N empty ptes. */
ptes = arena->ptes;
- p = iommu_arena_find_pages(arena, pg_count, align_mask);
+ p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
if (p < 0) {
spin_unlock_irqrestore(&arena->lock, flags);
return -1;