Skip to content

Commit d24f9c6

Browse files
Milton Millerozbenh
Milton Miller
authored andcommittedSep 19, 2011
powerpc: Use the newly added get_required_mask dma_map_ops hook
Now that the generic code has dma_map_ops set, instead of having a messy ifdef & if block in the base dma_get_required_mask hook push the computation into the dma ops. If the ops fails to set the get_required_mask hook default to the width of dma_addr_t. This also corrects ibmbus ibmebus_dma_supported to require a 64 bit mask. I doubt anything is checking or setting the dma mask on that bus. Signed-off-by: Milton Miller <[email protected]> Signed-off-by: Nishanth Aravamudan <[email protected]> Cc: [email protected] Cc: [email protected] Cc: [email protected] Signed-off-by: Benjamin Herrenschmidt <[email protected]>
1 parent 3a8f755 commit d24f9c6

File tree

10 files changed

+68
-34
lines changed

10 files changed

+68
-34
lines changed
 

‎arch/powerpc/include/asm/device.h

+2
Original file line numberDiff line numberDiff line change
@@ -37,4 +37,6 @@ struct pdev_archdata {
3737
u64 dma_mask;
3838
};
3939

40+
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
41+
4042
#endif /* _ASM_POWERPC_DEVICE_H */

‎arch/powerpc/include/asm/dma-mapping.h

-3
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,6 @@
2020

2121
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
2222

23-
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
24-
2523
/* Some dma direct funcs must be visible for use in other dma_ops */
2624
extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
2725
dma_addr_t *dma_handle, gfp_t flag);
@@ -71,7 +69,6 @@ static inline unsigned long device_to_mask(struct device *dev)
7169
*/
7270
#ifdef CONFIG_PPC64
7371
extern struct dma_map_ops dma_iommu_ops;
74-
extern u64 dma_iommu_get_required_mask(struct device *dev);
7572
#endif
7673
extern struct dma_map_ops dma_direct_ops;
7774

‎arch/powerpc/kernel/dma-iommu.c

+2-1
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
9090
return 1;
9191
}
9292

93-
u64 dma_iommu_get_required_mask(struct device *dev)
93+
static u64 dma_iommu_get_required_mask(struct device *dev)
9494
{
9595
struct iommu_table *tbl = get_iommu_table_base(dev);
9696
u64 mask;
@@ -111,5 +111,6 @@ struct dma_map_ops dma_iommu_ops = {
111111
.dma_supported = dma_iommu_dma_supported,
112112
.map_page = dma_iommu_map_page,
113113
.unmap_page = dma_iommu_unmap_page,
114+
.get_required_mask = dma_iommu_get_required_mask,
114115
};
115116
EXPORT_SYMBOL(dma_iommu_ops);

‎arch/powerpc/kernel/dma-swiotlb.c

+16
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,21 @@
2424

2525
unsigned int ppc_swiotlb_enable;
2626

27+
static u64 swiotlb_powerpc_get_required(struct device *dev)
28+
{
29+
u64 end, mask, max_direct_dma_addr = dev->archdata.max_direct_dma_addr;
30+
31+
end = memblock_end_of_DRAM();
32+
if (max_direct_dma_addr && end > max_direct_dma_addr)
33+
end = max_direct_dma_addr;
34+
end += get_dma_offset(dev);
35+
36+
mask = 1ULL << (fls64(end) - 1);
37+
mask += mask - 1;
38+
39+
return mask;
40+
}
41+
2742
/*
2843
* At the moment, all platforms that use this code only require
2944
* swiotlb to be used if we're operating on HIGHMEM. Since
@@ -44,6 +59,7 @@ struct dma_map_ops swiotlb_dma_ops = {
4459
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
4560
.sync_sg_for_device = swiotlb_sync_sg_for_device,
4661
.mapping_error = swiotlb_dma_mapping_error,
62+
.get_required_mask = swiotlb_powerpc_get_required,
4763
};
4864

4965
void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)

‎arch/powerpc/kernel/dma.c

+16-25
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,18 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask)
9696
#endif
9797
}
9898

99+
static u64 dma_direct_get_required_mask(struct device *dev)
100+
{
101+
u64 end, mask;
102+
103+
end = memblock_end_of_DRAM() + get_dma_offset(dev);
104+
105+
mask = 1ULL << (fls64(end) - 1);
106+
mask += mask - 1;
107+
108+
return mask;
109+
}
110+
99111
static inline dma_addr_t dma_direct_map_page(struct device *dev,
100112
struct page *page,
101113
unsigned long offset,
@@ -144,6 +156,7 @@ struct dma_map_ops dma_direct_ops = {
144156
.dma_supported = dma_direct_dma_supported,
145157
.map_page = dma_direct_map_page,
146158
.unmap_page = dma_direct_unmap_page,
159+
.get_required_mask = dma_direct_get_required_mask,
147160
#ifdef CONFIG_NOT_COHERENT_CACHE
148161
.sync_single_for_cpu = dma_direct_sync_single,
149162
.sync_single_for_device = dma_direct_sync_single,
@@ -173,39 +186,17 @@ EXPORT_SYMBOL(dma_set_mask);
173186
u64 dma_get_required_mask(struct device *dev)
174187
{
175188
struct dma_map_ops *dma_ops = get_dma_ops(dev);
176-
u64 mask, end = 0;
177189

178190
if (ppc_md.dma_get_required_mask)
179191
return ppc_md.dma_get_required_mask(dev);
180192

181193
if (unlikely(dma_ops == NULL))
182194
return 0;
183195

184-
#ifdef CONFIG_PPC64
185-
else if (dma_ops == &dma_iommu_ops)
186-
return dma_iommu_get_required_mask(dev);
187-
#endif
188-
#ifdef CONFIG_SWIOTLB
189-
else if (dma_ops == &swiotlb_dma_ops) {
190-
u64 max_direct_dma_addr = dev->archdata.max_direct_dma_addr;
191-
192-
end = memblock_end_of_DRAM();
193-
if (max_direct_dma_addr && end > max_direct_dma_addr)
194-
end = max_direct_dma_addr;
195-
end += get_dma_offset(dev);
196-
}
197-
#endif
198-
else if (dma_ops == &dma_direct_ops)
199-
end = memblock_end_of_DRAM() + get_dma_offset(dev);
200-
else {
201-
WARN_ONCE(1, "%s: unknown ops %p\n", __func__, dma_ops);
202-
end = memblock_end_of_DRAM();
203-
}
196+
if (dma_ops->get_required_mask)
197+
return dma_ops->get_required_mask(dev);
204198

205-
mask = 1ULL << (fls64(end) - 1);
206-
mask += mask - 1;
207-
208-
return mask;
199+
return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
209200
}
210201
EXPORT_SYMBOL_GPL(dma_get_required_mask);
211202

‎arch/powerpc/kernel/ibmebus.c

+7-1
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,12 @@ static void ibmebus_unmap_sg(struct device *dev,
125125

126126
static int ibmebus_dma_supported(struct device *dev, u64 mask)
127127
{
128-
return 1;
128+
return mask == DMA_BIT_MASK(64);
129+
}
130+
131+
static u64 ibmebus_dma_get_required_mask(struct device *dev)
132+
{
133+
return DMA_BIT_MASK(64);
129134
}
130135

131136
static struct dma_map_ops ibmebus_dma_ops = {
@@ -134,6 +139,7 @@ static struct dma_map_ops ibmebus_dma_ops = {
134139
.map_sg = ibmebus_map_sg,
135140
.unmap_sg = ibmebus_unmap_sg,
136141
.dma_supported = ibmebus_dma_supported,
142+
.get_required_mask = ibmebus_dma_get_required_mask,
137143
.map_page = ibmebus_map_page,
138144
.unmap_page = ibmebus_unmap_page,
139145
};

‎arch/powerpc/kernel/vio.c

+6-1
Original file line numberDiff line numberDiff line change
@@ -605,6 +605,11 @@ static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
605605
return dma_iommu_ops.dma_supported(dev, mask);
606606
}
607607

608+
static u64 vio_dma_get_required_mask(struct device *dev)
609+
{
610+
return dma_iommu_ops.get_required_mask(dev);
611+
}
612+
608613
struct dma_map_ops vio_dma_mapping_ops = {
609614
.alloc_coherent = vio_dma_iommu_alloc_coherent,
610615
.free_coherent = vio_dma_iommu_free_coherent,
@@ -613,7 +618,7 @@ struct dma_map_ops vio_dma_mapping_ops = {
613618
.map_page = vio_dma_iommu_map_page,
614619
.unmap_page = vio_dma_iommu_unmap_page,
615620
.dma_supported = vio_dma_iommu_dma_supported,
616-
621+
.get_required_mask = vio_dma_get_required_mask,
617622
};
618623

619624
/**

‎arch/powerpc/platforms/cell/iommu.c

+11-2
Original file line numberDiff line numberDiff line change
@@ -1161,11 +1161,20 @@ __setup("iommu_fixed=", setup_iommu_fixed);
11611161

11621162
static u64 cell_dma_get_required_mask(struct device *dev)
11631163
{
1164+
struct dma_map_ops *dma_ops;
1165+
11641166
if (!dev->dma_mask)
11651167
return 0;
11661168

1167-
if (iommu_fixed_disabled && get_dma_ops(dev) == &dma_iommu_ops)
1168-
return dma_iommu_get_required_mask(dev);
1169+
if (!iommu_fixed_disabled &&
1170+
cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
1171+
return DMA_BIT_MASK(64);
1172+
1173+
dma_ops = get_dma_ops(dev);
1174+
if (dma_ops->get_required_mask)
1175+
return dma_ops->get_required_mask(dev);
1176+
1177+
WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops);
11691178

11701179
return DMA_BIT_MASK(64);
11711180
}

‎arch/powerpc/platforms/ps3/system-bus.c

+7
Original file line numberDiff line numberDiff line change
@@ -695,12 +695,18 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
695695
return mask >= DMA_BIT_MASK(32);
696696
}
697697

698+
static u64 ps3_dma_get_required_mask(struct device *_dev)
699+
{
700+
return DMA_BIT_MASK(32);
701+
}
702+
698703
static struct dma_map_ops ps3_sb_dma_ops = {
699704
.alloc_coherent = ps3_alloc_coherent,
700705
.free_coherent = ps3_free_coherent,
701706
.map_sg = ps3_sb_map_sg,
702707
.unmap_sg = ps3_sb_unmap_sg,
703708
.dma_supported = ps3_dma_supported,
709+
.get_required_mask = ps3_dma_get_required_mask,
704710
.map_page = ps3_sb_map_page,
705711
.unmap_page = ps3_unmap_page,
706712
};
@@ -711,6 +717,7 @@ static struct dma_map_ops ps3_ioc0_dma_ops = {
711717
.map_sg = ps3_ioc0_map_sg,
712718
.unmap_sg = ps3_ioc0_unmap_sg,
713719
.dma_supported = ps3_dma_supported,
720+
.get_required_mask = ps3_dma_get_required_mask,
714721
.map_page = ps3_ioc0_map_page,
715722
.unmap_page = ps3_unmap_page,
716723
};

‎arch/powerpc/platforms/pseries/iommu.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -1099,7 +1099,7 @@ static u64 dma_get_required_mask_pSeriesLP(struct device *dev)
10991099
return DMA_BIT_MASK(64);
11001100
}
11011101

1102-
return dma_iommu_get_required_mask(dev);
1102+
return dma_iommu_ops.get_required_mask(dev);
11031103
}
11041104

11051105
#else /* CONFIG_PCI */

0 commit comments

Comments
 (0)
Please sign in to comment.