mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-27 03:47:43 +00:00
3fd94c6b1a
The 32 bits PCI code now uses the generic code for assigning unassigned resources and an algorithm similar to x86 for claiming existing ones. This works far better than the 64 bits code which basically can only claim existing ones (pci_probe_only=1) or would fall apart completely. This merges them so that the new 32 bits implementation is used for both. 64 bits now gets the new PCI flags for controlling the behaviour, though the old pci_probe_only global is still there for now to be cleared if you want to. I kept a pcibios_claim_one_bus() function mostly based on the old 64 bits code for use by the DLPAR hotplug. This will have to be cleaned up, thought I hope it will work in the meantime. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
229 lines
6.2 KiB
C
229 lines
6.2 KiB
C
#ifndef __ASM_POWERPC_PCI_H
|
|
#define __ASM_POWERPC_PCI_H
|
|
#ifdef __KERNEL__
|
|
|
|
/*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/string.h>
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <asm/machdep.h>
|
|
#include <asm/scatterlist.h>
|
|
#include <asm/io.h>
|
|
#include <asm/prom.h>
|
|
#include <asm/pci-bridge.h>
|
|
|
|
#include <asm-generic/pci-dma-compat.h>
|
|
|
|
#define PCIBIOS_MIN_IO 0x1000
|
|
#define PCIBIOS_MIN_MEM 0x10000000
|
|
|
|
struct pci_dev;
|
|
|
|
/* Values for the `which' argument to sys_pciconfig_iobase syscall. */
|
|
#define IOBASE_BRIDGE_NUMBER 0
|
|
#define IOBASE_MEMORY 1
|
|
#define IOBASE_IO 2
|
|
#define IOBASE_ISA_IO 3
|
|
#define IOBASE_ISA_MEM 4
|
|
|
|
/*
|
|
* Set this to 1 if you want the kernel to re-assign all PCI
|
|
* bus numbers (don't do that on ppc64 yet !)
|
|
*/
|
|
#define pcibios_assign_all_busses() (ppc_pci_flags & \
|
|
PPC_PCI_REASSIGN_ALL_BUS)
|
|
#define pcibios_scan_all_fns(a, b) 0
|
|
|
|
static inline void pcibios_set_master(struct pci_dev *dev)
|
|
{
|
|
/* No special bus mastering setup handling */
|
|
}
|
|
|
|
static inline void pcibios_penalize_isa_irq(int irq, int active)
|
|
{
|
|
/* We don't do dynamic PCI IRQ allocation */
|
|
}
|
|
|
|
#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
|
|
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
|
|
{
|
|
if (ppc_md.pci_get_legacy_ide_irq)
|
|
return ppc_md.pci_get_legacy_ide_irq(dev, channel);
|
|
return channel ? 15 : 14;
|
|
}
|
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
/*
|
|
* We want to avoid touching the cacheline size or MWI bit.
|
|
* pSeries firmware sets the cacheline size (which is not the cpu cacheline
|
|
* size in all cases) and hardware treats MWI the same as memory write.
|
|
*/
|
|
#define PCI_DISABLE_MWI
|
|
|
|
#ifdef CONFIG_PCI
|
|
extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops);
|
|
extern struct dma_mapping_ops *get_pci_dma_ops(void);
|
|
|
|
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
|
|
enum pci_dma_burst_strategy *strat,
|
|
unsigned long *strategy_parameter)
|
|
{
|
|
unsigned long cacheline_size;
|
|
u8 byte;
|
|
|
|
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
|
|
if (byte == 0)
|
|
cacheline_size = 1024;
|
|
else
|
|
cacheline_size = (int) byte * 4;
|
|
|
|
*strat = PCI_DMA_BURST_MULTIPLE;
|
|
*strategy_parameter = cacheline_size;
|
|
}
|
|
#else /* CONFIG_PCI */
|
|
#define set_pci_dma_ops(d)
|
|
#define get_pci_dma_ops() NULL
|
|
#endif
|
|
|
|
#else /* 32-bit */
|
|
|
|
#ifdef CONFIG_PCI
|
|
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
|
|
enum pci_dma_burst_strategy *strat,
|
|
unsigned long *strategy_parameter)
|
|
{
|
|
*strat = PCI_DMA_BURST_INFINITY;
|
|
*strategy_parameter = ~0UL;
|
|
}
|
|
#endif
|
|
#endif /* CONFIG_PPC64 */
|
|
|
|
extern int pci_domain_nr(struct pci_bus *bus);
|
|
|
|
/* Decide whether to display the domain number in /proc */
|
|
extern int pci_proc_domain(struct pci_bus *bus);
|
|
|
|
|
|
struct vm_area_struct;
|
|
/* Map a range of PCI memory or I/O space for a device into user space */
|
|
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
|
|
enum pci_mmap_state mmap_state, int write_combine);
|
|
|
|
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
|
|
#define HAVE_PCI_MMAP 1
|
|
|
|
#if defined(CONFIG_PPC64) || defined(CONFIG_NOT_COHERENT_CACHE)
|
|
/*
|
|
* For 64-bit kernels, pci_unmap_{single,page} is not a nop.
|
|
* For 32-bit non-coherent kernels, pci_dma_sync_single_for_cpu() and
|
|
* so on are not nops.
|
|
* and thus...
|
|
*/
|
|
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
|
|
dma_addr_t ADDR_NAME;
|
|
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
|
|
__u32 LEN_NAME;
|
|
#define pci_unmap_addr(PTR, ADDR_NAME) \
|
|
((PTR)->ADDR_NAME)
|
|
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
|
|
(((PTR)->ADDR_NAME) = (VAL))
|
|
#define pci_unmap_len(PTR, LEN_NAME) \
|
|
((PTR)->LEN_NAME)
|
|
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
|
|
(((PTR)->LEN_NAME) = (VAL))
|
|
|
|
#else /* 32-bit && coherent */
|
|
|
|
/* pci_unmap_{page,single} is a nop so... */
|
|
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
|
|
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
|
|
#define pci_unmap_addr(PTR, ADDR_NAME) (0)
|
|
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
|
|
#define pci_unmap_len(PTR, LEN_NAME) (0)
|
|
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
|
|
|
|
#endif /* CONFIG_PPC64 || CONFIG_NOT_COHERENT_CACHE */
|
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
/* The PCI address space does not equal the physical memory address
|
|
* space (we have an IOMMU). The IDE and SCSI device layers use
|
|
* this boolean for bounce buffer decisions.
|
|
*/
|
|
#define PCI_DMA_BUS_IS_PHYS (0)
|
|
|
|
#else /* 32-bit */
|
|
|
|
/* The PCI address space does equal the physical memory
|
|
* address space (no IOMMU). The IDE and SCSI device layers use
|
|
* this boolean for bounce buffer decisions.
|
|
*/
|
|
#define PCI_DMA_BUS_IS_PHYS (1)
|
|
|
|
#endif /* CONFIG_PPC64 */
|
|
|
|
extern void pcibios_resource_to_bus(struct pci_dev *dev,
|
|
struct pci_bus_region *region,
|
|
struct resource *res);
|
|
|
|
extern void pcibios_bus_to_resource(struct pci_dev *dev,
|
|
struct resource *res,
|
|
struct pci_bus_region *region);
|
|
|
|
static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
|
|
struct resource *res)
|
|
{
|
|
struct resource *root = NULL;
|
|
|
|
if (res->flags & IORESOURCE_IO)
|
|
root = &ioport_resource;
|
|
if (res->flags & IORESOURCE_MEM)
|
|
root = &iomem_resource;
|
|
|
|
return root;
|
|
}
|
|
|
|
extern void pcibios_setup_new_device(struct pci_dev *dev);
|
|
|
|
extern void pcibios_claim_one_bus(struct pci_bus *b);
|
|
|
|
extern void pcibios_resource_survey(void);
|
|
|
|
extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
|
|
|
|
extern struct pci_dev *of_create_pci_dev(struct device_node *node,
|
|
struct pci_bus *bus, int devfn);
|
|
|
|
extern void of_scan_pci_bridge(struct device_node *node,
|
|
struct pci_dev *dev);
|
|
|
|
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
|
|
|
|
extern int pci_read_irq_line(struct pci_dev *dev);
|
|
|
|
struct file;
|
|
extern pgprot_t pci_phys_mem_access_prot(struct file *file,
|
|
unsigned long pfn,
|
|
unsigned long size,
|
|
pgprot_t prot);
|
|
|
|
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
|
|
extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
|
|
const struct resource *rsrc,
|
|
resource_size_t *start, resource_size_t *end);
|
|
|
|
extern void pcibios_do_bus_setup(struct pci_bus *bus);
|
|
extern void pcibios_fixup_of_probed_bus(struct pci_bus *bus);
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* __ASM_POWERPC_PCI_H */
|