mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-14 12:49:08 +00:00
Merge branch 'for-4.12/dax' into libnvdimm-for-next
This commit is contained in:
commit
736163671b
@ -128,9 +128,6 @@
|
||||
</sect1>
|
||||
<sect1 id="Device_model_support"><title>Device model support</title>
|
||||
!Idrivers/rapidio/rio-driver.c
|
||||
</sect1>
|
||||
<sect1 id="Sysfs_support"><title>Sysfs support</title>
|
||||
!Idrivers/rapidio/rio-sysfs.c
|
||||
</sect1>
|
||||
<sect1 id="PPC32_support"><title>PPC32 support</title>
|
||||
!Iarch/powerpc/sysdev/fsl_rio.c
|
||||
|
44
Documentation/devicetree/bindings/auxdisplay/hit,hd44780.txt
Normal file
44
Documentation/devicetree/bindings/auxdisplay/hit,hd44780.txt
Normal file
@ -0,0 +1,44 @@
|
||||
DT bindings for the Hitachi HD44780 Character LCD Controller
|
||||
|
||||
The Hitachi HD44780 Character LCD Controller is commonly used on character LCDs
|
||||
that can display one or more lines of text. It exposes an M6800 bus interface,
|
||||
which can be used in either 4-bit or 8-bit mode.
|
||||
|
||||
Required properties:
|
||||
- compatible: Must contain "hit,hd44780",
|
||||
- data-gpios: Must contain an array of either 4 or 8 GPIO specifiers,
|
||||
referring to the GPIO pins connected to the data signal lines DB0-DB7
|
||||
(8-bit mode) or DB4-DB7 (4-bit mode) of the LCD Controller's bus interface,
|
||||
- enable-gpios: Must contain a GPIO specifier, referring to the GPIO pin
|
||||
connected to the "E" (Enable) signal line of the LCD Controller's bus
|
||||
interface,
|
||||
- rs-gpios: Must contain a GPIO specifier, referring to the GPIO pin
|
||||
connected to the "RS" (Register Select) signal line of the LCD Controller's
|
||||
bus interface,
|
||||
- display-height: Height of the display, in character cells,
|
||||
- display-width: Width of the display, in character cells.
|
||||
|
||||
Optional properties:
|
||||
- rw-gpios: Must contain a GPIO specifier, referring to the GPIO pin
|
||||
connected to the "RW" (Read/Write) signal line of the LCD Controller's bus
|
||||
interface,
|
||||
- backlight-gpios: Must contain a GPIO specifier, referring to the GPIO pin
|
||||
used for enabling the LCD's backlight,
|
||||
- internal-buffer-width: Internal buffer width (default is 40 for displays
|
||||
with 1 or 2 lines, and display-width for displays with more than 2 lines).
|
||||
|
||||
Example:
|
||||
|
||||
auxdisplay {
|
||||
compatible = "hit,hd44780";
|
||||
|
||||
data-gpios = <&hc595 0 GPIO_ACTIVE_HIGH>,
|
||||
<&hc595 1 GPIO_ACTIVE_HIGH>,
|
||||
<&hc595 2 GPIO_ACTIVE_HIGH>,
|
||||
<&hc595 3 GPIO_ACTIVE_HIGH>;
|
||||
enable-gpios = <&hc595 4 GPIO_ACTIVE_HIGH>;
|
||||
rs-gpios = <&hc595 5 GPIO_ACTIVE_HIGH>;
|
||||
|
||||
display-height = <2>;
|
||||
display-width = <16>;
|
||||
};
|
@ -186,6 +186,7 @@ Optional properties:
|
||||
otherwise full reconfiguration is done.
|
||||
- external-fpga-config : boolean, set if the FPGA has already been configured
|
||||
prior to OS boot up.
|
||||
- encrypted-fpga-config : boolean, set if the bitstream is encrypted
|
||||
- region-unfreeze-timeout-us : The maximum time in microseconds to wait for
|
||||
bridges to successfully become enabled after the region has been
|
||||
programmed.
|
||||
|
@ -0,0 +1,21 @@
|
||||
Lattice iCE40 FPGA Manager
|
||||
|
||||
Required properties:
|
||||
- compatible: Should contain "lattice,ice40-fpga-mgr"
|
||||
- reg: SPI chip select
|
||||
- spi-max-frequency: Maximum SPI frequency (>=1000000, <=25000000)
|
||||
- cdone-gpios: GPIO input connected to CDONE pin
|
||||
- reset-gpios: Active-low GPIO output connected to CRESET_B pin. Note
|
||||
that unless the GPIO is held low during startup, the
|
||||
FPGA will enter Master SPI mode and drive SCK with a
|
||||
clock signal potentially jamming other devices on the
|
||||
bus until the firmware is loaded.
|
||||
|
||||
Example:
|
||||
fpga: fpga@0 {
|
||||
compatible = "lattice,ice40-fpga-mgr";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <1000000>;
|
||||
cdone-gpios = <&gpio 24 GPIO_ACTIVE_HIGH>;
|
||||
reset-gpios = <&gpio 22 GPIO_ACTIVE_LOW>;
|
||||
};
|
@ -6,36 +6,15 @@ Driver registration
|
||||
|
||||
As with other subsystems within the Linux kernel, VME device drivers register
|
||||
with the VME subsystem, typically called from the devices init routine. This is
|
||||
achieved via a call to the following function:
|
||||
achieved via a call to :c:func:`vme_register_driver`.
|
||||
|
||||
.. code-block:: c
|
||||
A pointer to a structure of type :c:type:`struct vme_driver <vme_driver>` must
|
||||
be provided to the registration function. Along with the maximum number of
|
||||
devices your driver is able to support.
|
||||
|
||||
int vme_register_driver (struct vme_driver *driver, unsigned int ndevs);
|
||||
|
||||
If driver registration is successful this function returns zero, if an error
|
||||
occurred a negative error code will be returned.
|
||||
|
||||
A pointer to a structure of type 'vme_driver' must be provided to the
|
||||
registration function. Along with ndevs, which is the number of devices your
|
||||
driver is able to support. The structure is as follows:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct vme_driver {
|
||||
struct list_head node;
|
||||
const char *name;
|
||||
int (*match)(struct vme_dev *);
|
||||
int (*probe)(struct vme_dev *);
|
||||
int (*remove)(struct vme_dev *);
|
||||
void (*shutdown)(void);
|
||||
struct device_driver driver;
|
||||
struct list_head devices;
|
||||
unsigned int ndev;
|
||||
};
|
||||
|
||||
At the minimum, the '.name', '.match' and '.probe' elements of this structure
|
||||
should be correctly set. The '.name' element is a pointer to a string holding
|
||||
the device driver's name.
|
||||
At the minimum, the '.name', '.match' and '.probe' elements of
|
||||
:c:type:`struct vme_driver <vme_driver>` should be correctly set. The '.name'
|
||||
element is a pointer to a string holding the device driver's name.
|
||||
|
||||
The '.match' function allows control over which VME devices should be registered
|
||||
with the driver. The match function should return 1 if a device should be
|
||||
@ -54,29 +33,16 @@ the number of devices probed to one:
|
||||
}
|
||||
|
||||
The '.probe' element should contain a pointer to the probe routine. The
|
||||
probe routine is passed a 'struct vme_dev' pointer as an argument. The
|
||||
'struct vme_dev' structure looks like the following:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct vme_dev {
|
||||
int num;
|
||||
struct vme_bridge *bridge;
|
||||
struct device dev;
|
||||
struct list_head drv_list;
|
||||
struct list_head bridge_list;
|
||||
};
|
||||
probe routine is passed a :c:type:`struct vme_dev <vme_dev>` pointer as an
|
||||
argument.
|
||||
|
||||
Here, the 'num' field refers to the sequential device ID for this specific
|
||||
driver. The bridge number (or bus number) can be accessed using
|
||||
dev->bridge->num.
|
||||
|
||||
A function is also provided to unregister the driver from the VME core and is
|
||||
usually called from the device driver's exit routine:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
void vme_unregister_driver (struct vme_driver *driver);
|
||||
A function is also provided to unregister the driver from the VME core called
|
||||
:c:func:`vme_unregister_driver` and should usually be called from the device
|
||||
driver's exit routine.
|
||||
|
||||
|
||||
Resource management
|
||||
@ -90,47 +56,29 @@ driver is called. The probe routine is passed a pointer to the devices
|
||||
device structure. This pointer should be saved, it will be required for
|
||||
requesting VME resources.
|
||||
|
||||
The driver can request ownership of one or more master windows, slave windows
|
||||
and/or dma channels. Rather than allowing the device driver to request a
|
||||
specific window or DMA channel (which may be used by a different driver) this
|
||||
driver allows a resource to be assigned based on the required attributes of the
|
||||
driver in question:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct vme_resource * vme_master_request(struct vme_dev *dev,
|
||||
u32 aspace, u32 cycle, u32 width);
|
||||
|
||||
struct vme_resource * vme_slave_request(struct vme_dev *dev, u32 aspace,
|
||||
u32 cycle);
|
||||
|
||||
struct vme_resource *vme_dma_request(struct vme_dev *dev, u32 route);
|
||||
|
||||
For slave windows these attributes are split into the VME address spaces that
|
||||
need to be accessed in 'aspace' and VME bus cycle types required in 'cycle'.
|
||||
Master windows add a further set of attributes in 'width' specifying the
|
||||
required data transfer widths. These attributes are defined as bitmasks and as
|
||||
such any combination of the attributes can be requested for a single window,
|
||||
the core will assign a window that meets the requirements, returning a pointer
|
||||
of type vme_resource that should be used to identify the allocated resource
|
||||
when it is used. For DMA controllers, the request function requires the
|
||||
potential direction of any transfers to be provided in the route attributes.
|
||||
This is typically VME-to-MEM and/or MEM-to-VME, though some hardware can
|
||||
support VME-to-VME and MEM-to-MEM transfers as well as test pattern generation.
|
||||
If an unallocated window fitting the requirements can not be found a NULL
|
||||
pointer will be returned.
|
||||
The driver can request ownership of one or more master windows
|
||||
(:c:func:`vme_master_request`), slave windows (:c:func:`vme_slave_request`)
|
||||
and/or dma channels (:c:func:`vme_dma_request`). Rather than allowing the device
|
||||
driver to request a specific window or DMA channel (which may be used by a
|
||||
different driver) the API allows a resource to be assigned based on the required
|
||||
attributes of the driver in question. For slave windows these attributes are
|
||||
split into the VME address spaces that need to be accessed in 'aspace' and VME
|
||||
bus cycle types required in 'cycle'. Master windows add a further set of
|
||||
attributes in 'width' specifying the required data transfer widths. These
|
||||
attributes are defined as bitmasks and as such any combination of the
|
||||
attributes can be requested for a single window, the core will assign a window
|
||||
that meets the requirements, returning a pointer of type vme_resource that
|
||||
should be used to identify the allocated resource when it is used. For DMA
|
||||
controllers, the request function requires the potential direction of any
|
||||
transfers to be provided in the route attributes. This is typically VME-to-MEM
|
||||
and/or MEM-to-VME, though some hardware can support VME-to-VME and MEM-to-MEM
|
||||
transfers as well as test pattern generation. If an unallocated window fitting
|
||||
the requirements can not be found a NULL pointer will be returned.
|
||||
|
||||
Functions are also provided to free window allocations once they are no longer
|
||||
required. These functions should be passed the pointer to the resource provided
|
||||
during resource allocation:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
void vme_master_free(struct vme_resource *res);
|
||||
|
||||
void vme_slave_free(struct vme_resource *res);
|
||||
|
||||
void vme_dma_free(struct vme_resource *res);
|
||||
required. These functions (:c:func:`vme_master_free`, :c:func:`vme_slave_free`
|
||||
and :c:func:`vme_dma_free`) should be passed the pointer to the resource
|
||||
provided during resource allocation.
|
||||
|
||||
|
||||
Master windows
|
||||
@ -144,61 +92,22 @@ the underlying chipset. A window must be configured before it can be used.
|
||||
Master window configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Once a master window has been assigned the following functions can be used to
|
||||
configure it and retrieve the current settings:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int vme_master_set (struct vme_resource *res, int enabled,
|
||||
unsigned long long base, unsigned long long size, u32 aspace,
|
||||
u32 cycle, u32 width);
|
||||
|
||||
int vme_master_get (struct vme_resource *res, int *enabled,
|
||||
unsigned long long *base, unsigned long long *size, u32 *aspace,
|
||||
u32 *cycle, u32 *width);
|
||||
|
||||
The address spaces, transfer widths and cycle types are the same as described
|
||||
Once a master window has been assigned :c:func:`vme_master_set` can be used to
|
||||
configure it and :c:func:`vme_master_get` to retrieve the current settings. The
|
||||
address spaces, transfer widths and cycle types are the same as described
|
||||
under resource management, however some of the options are mutually exclusive.
|
||||
For example, only one address space may be specified.
|
||||
|
||||
These functions return 0 on success or an error code should the call fail.
|
||||
|
||||
|
||||
Master window access
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The following functions can be used to read from and write to configured master
|
||||
windows. These functions return the number of bytes copied:
|
||||
The function :c:func:`vme_master_read` can be used to read from and
|
||||
:c:func:`vme_master_write` used to write to configured master windows.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
ssize_t vme_master_read(struct vme_resource *res, void *buf,
|
||||
size_t count, loff_t offset);
|
||||
|
||||
ssize_t vme_master_write(struct vme_resource *res, void *buf,
|
||||
size_t count, loff_t offset);
|
||||
|
||||
In addition to simple reads and writes, a function is provided to do a
|
||||
read-modify-write transaction. This function returns the original value of the
|
||||
VME bus location :
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
unsigned int vme_master_rmw (struct vme_resource *res,
|
||||
unsigned int mask, unsigned int compare, unsigned int swap,
|
||||
loff_t offset);
|
||||
|
||||
This functions by reading the offset, applying the mask. If the bits selected in
|
||||
the mask match with the values of the corresponding bits in the compare field,
|
||||
the value of swap is written the specified offset.
|
||||
|
||||
Parts of a VME window can be mapped into user space memory using the following
|
||||
function:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int vme_master_mmap(struct vme_resource *resource,
|
||||
struct vm_area_struct *vma)
|
||||
In addition to simple reads and writes, :c:func:`vme_master_rmw` is provided to
|
||||
do a read-modify-write transaction. Parts of a VME window can also be mapped
|
||||
into user space memory using :c:func:`vme_master_mmap`.
|
||||
|
||||
|
||||
Slave windows
|
||||
@ -213,41 +122,23 @@ it can be used.
|
||||
Slave window configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Once a slave window has been assigned the following functions can be used to
|
||||
configure it and retrieve the current settings:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int vme_slave_set (struct vme_resource *res, int enabled,
|
||||
unsigned long long base, unsigned long long size,
|
||||
dma_addr_t mem, u32 aspace, u32 cycle);
|
||||
|
||||
int vme_slave_get (struct vme_resource *res, int *enabled,
|
||||
unsigned long long *base, unsigned long long *size,
|
||||
dma_addr_t *mem, u32 *aspace, u32 *cycle);
|
||||
Once a slave window has been assigned :c:func:`vme_slave_set` can be used to
|
||||
configure it and :c:func:`vme_slave_get` to retrieve the current settings.
|
||||
|
||||
The address spaces, transfer widths and cycle types are the same as described
|
||||
under resource management, however some of the options are mutually exclusive.
|
||||
For example, only one address space may be specified.
|
||||
|
||||
These functions return 0 on success or an error code should the call fail.
|
||||
|
||||
|
||||
Slave window buffer allocation
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Functions are provided to allow the user to allocate and free a contiguous
|
||||
buffers which will be accessible by the VME bridge. These functions do not have
|
||||
to be used, other methods can be used to allocate a buffer, though care must be
|
||||
taken to ensure that they are contiguous and accessible by the VME bridge:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
void * vme_alloc_consistent(struct vme_resource *res, size_t size,
|
||||
dma_addr_t *mem);
|
||||
|
||||
void vme_free_consistent(struct vme_resource *res, size_t size,
|
||||
void *virt, dma_addr_t mem);
|
||||
Functions are provided to allow the user to allocate
|
||||
(:c:func:`vme_alloc_consistent`) and free (:c:func:`vme_free_consistent`)
|
||||
contiguous buffers which will be accessible by the VME bridge. These functions
|
||||
do not have to be used, other methods can be used to allocate a buffer, though
|
||||
care must be taken to ensure that they are contiguous and accessible by the VME
|
||||
bridge.
|
||||
|
||||
|
||||
Slave window access
|
||||
@ -269,29 +160,18 @@ executed, reused and destroyed.
|
||||
List Management
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
The following functions are provided to create and destroy DMA lists. Execution
|
||||
of a list will not automatically destroy the list, thus enabling a list to be
|
||||
reused for repetitive tasks:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct vme_dma_list *vme_new_dma_list(struct vme_resource *res);
|
||||
|
||||
int vme_dma_list_free(struct vme_dma_list *list);
|
||||
The function :c:func:`vme_new_dma_list` is provided to create and
|
||||
:c:func:`vme_dma_list_free` to destroy DMA lists. Execution of a list will not
|
||||
automatically destroy the list, thus enabling a list to be reused for repetitive
|
||||
tasks.
|
||||
|
||||
|
||||
List Population
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
An item can be added to a list using the following function ( the source and
|
||||
An item can be added to a list using :c:func:`vme_dma_list_add` (the source and
|
||||
destination attributes need to be created before calling this function, this is
|
||||
covered under "Transfer Attributes"):
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int vme_dma_list_add(struct vme_dma_list *list,
|
||||
struct vme_dma_attr *src, struct vme_dma_attr *dest,
|
||||
size_t count);
|
||||
covered under "Transfer Attributes").
|
||||
|
||||
.. note::
|
||||
|
||||
@ -310,41 +190,19 @@ an item to a list. This is due to the diverse attributes required for each type
|
||||
of source and destination. There are functions to create attributes for PCI, VME
|
||||
and pattern sources and destinations (where appropriate):
|
||||
|
||||
Pattern source:
|
||||
- PCI source or destination: :c:func:`vme_dma_pci_attribute`
|
||||
- VME source or destination: :c:func:`vme_dma_vme_attribute`
|
||||
- Pattern source: :c:func:`vme_dma_pattern_attribute`
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type);
|
||||
|
||||
PCI source or destination:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t mem);
|
||||
|
||||
VME source or destination:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long base,
|
||||
u32 aspace, u32 cycle, u32 width);
|
||||
|
||||
The following function should be used to free an attribute:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
void vme_dma_free_attribute(struct vme_dma_attr *attr);
|
||||
The function :c:func:`vme_dma_free_attribute` should be used to free an
|
||||
attribute.
|
||||
|
||||
|
||||
List Execution
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
The following function queues a list for execution. The function will return
|
||||
once the list has been executed:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int vme_dma_list_exec(struct vme_dma_list *list);
|
||||
The function :c:func:`vme_dma_list_exec` queues a list for execution and will
|
||||
return once the list has been executed.
|
||||
|
||||
|
||||
Interrupts
|
||||
@ -358,20 +216,13 @@ specific VME level and status IDs.
|
||||
Attaching Interrupt Handlers
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The following functions can be used to attach and free a specific VME level and
|
||||
status ID combination. Any given combination can only be assigned a single
|
||||
callback function. A void pointer parameter is provided, the value of which is
|
||||
passed to the callback function, the use of this pointer is user undefined:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int vme_irq_request(struct vme_dev *dev, int level, int statid,
|
||||
void (*callback)(int, int, void *), void *priv);
|
||||
|
||||
void vme_irq_free(struct vme_dev *dev, int level, int statid);
|
||||
|
||||
The callback parameters are as follows. Care must be taken in writing a callback
|
||||
function, callback functions run in interrupt context:
|
||||
The function :c:func:`vme_irq_request` can be used to attach and
|
||||
:c:func:`vme_irq_free` to free a specific VME level and status ID combination.
|
||||
Any given combination can only be assigned a single callback function. A void
|
||||
pointer parameter is provided, the value of which is passed to the callback
|
||||
function, the use of this pointer is user undefined. The callback parameters are
|
||||
as follows. Care must be taken in writing a callback function, callback
|
||||
functions run in interrupt context:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
@ -381,12 +232,8 @@ function, callback functions run in interrupt context:
|
||||
Interrupt Generation
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The following function can be used to generate a VME interrupt at a given VME
|
||||
level and VME status ID:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int vme_irq_generate(struct vme_dev *dev, int level, int statid);
|
||||
The function :c:func:`vme_irq_generate` can be used to generate a VME interrupt
|
||||
at a given VME level and VME status ID.
|
||||
|
||||
|
||||
Location monitors
|
||||
@ -399,54 +246,29 @@ monitor.
|
||||
Location Monitor Management
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The following functions are provided to request the use of a block of location
|
||||
monitors and to free them after they are no longer required:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct vme_resource * vme_lm_request(struct vme_dev *dev);
|
||||
|
||||
void vme_lm_free(struct vme_resource * res);
|
||||
|
||||
Each block may provide a number of location monitors, monitoring adjacent
|
||||
locations. The following function can be used to determine how many locations
|
||||
are provided:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int vme_lm_count(struct vme_resource * res);
|
||||
The function :c:func:`vme_lm_request` is provided to request the use of a block
|
||||
of location monitors and :c:func:`vme_lm_free` to free them after they are no
|
||||
longer required. Each block may provide a number of location monitors,
|
||||
monitoring adjacent locations. The function :c:func:`vme_lm_count` can be used
|
||||
to determine how many locations are provided.
|
||||
|
||||
|
||||
Location Monitor Configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Once a bank of location monitors has been allocated, the following functions
|
||||
are provided to configure the location and mode of the location monitor:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int vme_lm_set(struct vme_resource *res, unsigned long long base,
|
||||
u32 aspace, u32 cycle);
|
||||
|
||||
int vme_lm_get(struct vme_resource *res, unsigned long long *base,
|
||||
u32 *aspace, u32 *cycle);
|
||||
Once a bank of location monitors has been allocated, the function
|
||||
:c:func:`vme_lm_set` is provided to configure the location and mode of the
|
||||
location monitor. The function :c:func:`vme_lm_get` can be used to retrieve
|
||||
existing settings.
|
||||
|
||||
|
||||
Location Monitor Use
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The following functions allow a callback to be attached and detached from each
|
||||
location monitor location. Each location monitor can monitor a number of
|
||||
adjacent locations:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int vme_lm_attach(struct vme_resource *res, int num,
|
||||
void (*callback)(void *));
|
||||
|
||||
int vme_lm_detach(struct vme_resource *res, int num);
|
||||
|
||||
The callback function is declared as follows.
|
||||
The function :c:func:`vme_lm_attach` enables a callback to be attached and
|
||||
:c:func:`vme_lm_detach` allows on to be detached from each location monitor
|
||||
location. Each location monitor can monitor a number of adjacent locations. The
|
||||
callback function is declared as follows.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
@ -456,19 +278,20 @@ The callback function is declared as follows.
|
||||
Slot Detection
|
||||
--------------
|
||||
|
||||
This function returns the slot ID of the provided bridge.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int vme_slot_num(struct vme_dev *dev);
|
||||
The function :c:func:`vme_slot_num` returns the slot ID of the provided bridge.
|
||||
|
||||
|
||||
Bus Detection
|
||||
-------------
|
||||
|
||||
This function returns the bus ID of the provided bridge.
|
||||
The function :c:func:`vme_bus_num` returns the bus ID of the provided bridge.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int vme_bus_num(struct vme_dev *dev);
|
||||
VME API
|
||||
-------
|
||||
|
||||
.. kernel-doc:: include/linux/vme.h
|
||||
:internal:
|
||||
|
||||
.. kernel-doc:: drivers/vme/vme.c
|
||||
:export:
|
||||
|
@ -2,7 +2,11 @@
|
||||
- This file
|
||||
w1_therm
|
||||
- The Maxim/Dallas Semiconductor ds18*20 temperature sensor.
|
||||
w1_ds2413
|
||||
- The Maxim/Dallas Semiconductor ds2413 dual channel addressable switch.
|
||||
w1_ds2423
|
||||
- The Maxim/Dallas Semiconductor ds2423 counter device.
|
||||
w1_ds2438
|
||||
- The Maxim/Dallas Semiconductor ds2438 smart battery monitor.
|
||||
w1_ds28e04
|
||||
- The Maxim/Dallas Semiconductor ds28e04 eeprom.
|
||||
|
50
Documentation/w1/slaves/w1_ds2413
Normal file
50
Documentation/w1/slaves/w1_ds2413
Normal file
@ -0,0 +1,50 @@
|
||||
Kernel driver w1_ds2413
|
||||
=======================
|
||||
|
||||
Supported chips:
|
||||
* Maxim DS2413 1-Wire Dual Channel Addressable Switch
|
||||
|
||||
supported family codes:
|
||||
W1_FAMILY_DS2413 0x3A
|
||||
|
||||
Author: Mariusz Bialonczyk <manio@skyboo.net>
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
||||
The DS2413 chip has two open-drain outputs (PIO A and PIO B).
|
||||
Support is provided through the sysfs files "output" and "state".
|
||||
|
||||
Reading state
|
||||
-------------
|
||||
The "state" file provides one-byte value which is in the same format as for
|
||||
the chip PIO_ACCESS_READ command (refer the datasheet for details):
|
||||
|
||||
Bit 0: PIOA Pin State
|
||||
Bit 1: PIOA Output Latch State
|
||||
Bit 2: PIOB Pin State
|
||||
Bit 3: PIOB Output Latch State
|
||||
Bit 4-7: Complement of Bit 3 to Bit 0 (verified by the kernel module)
|
||||
|
||||
This file is readonly.
|
||||
|
||||
Writing output
|
||||
--------------
|
||||
You can set the PIO pins using the "output" file.
|
||||
It is writable, you can write one-byte value to this sysfs file.
|
||||
Similarly the byte format is the same as for the PIO_ACCESS_WRITE command:
|
||||
|
||||
Bit 0: PIOA
|
||||
Bit 1: PIOB
|
||||
Bit 2-7: No matter (driver will set it to "1"s)
|
||||
|
||||
|
||||
The chip has some kind of basic protection against transmission errors.
|
||||
When reading the state, there is a four complement bits.
|
||||
The driver is checking this complement, and when it is wrong then it is
|
||||
returning I/O error.
|
||||
|
||||
When writing output, the master must repeat the PIO Output Data byte in
|
||||
its inverted form and it is waiting for a confirmation.
|
||||
If the write is unsuccessful for three times, the write also returns
|
||||
I/O error.
|
63
Documentation/w1/slaves/w1_ds2438
Normal file
63
Documentation/w1/slaves/w1_ds2438
Normal file
@ -0,0 +1,63 @@
|
||||
Kernel driver w1_ds2438
|
||||
=======================
|
||||
|
||||
Supported chips:
|
||||
* Maxim DS2438 Smart Battery Monitor
|
||||
|
||||
supported family codes:
|
||||
W1_FAMILY_DS2438 0x26
|
||||
|
||||
Author: Mariusz Bialonczyk <manio@skyboo.net>
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
||||
The DS2438 chip provides several functions that are desirable to carry in
|
||||
a battery pack. It also has a 40 bytes of nonvolatile EEPROM.
|
||||
Because the ability of temperature, current and voltage measurement, the chip
|
||||
is also often used in weather stations and applications such as: rain gauge,
|
||||
wind speed/direction measuring, humidity sensing, etc.
|
||||
|
||||
Current support is provided through the following sysfs files (all files
|
||||
except "iad" are readonly):
|
||||
|
||||
"iad"
|
||||
-----
|
||||
This file controls the 'Current A/D Control Bit' (IAD) in the
|
||||
Status/Configuration Register.
|
||||
Writing a zero value will clear the IAD bit and disables the current
|
||||
measurements.
|
||||
Writing value "1" is setting the IAD bit (enables the measurements).
|
||||
The IAD bit is enabled by default in the DS2438.
|
||||
|
||||
When writing to sysfs file bits 2-7 are ignored, so it's safe to write ASCII.
|
||||
An I/O error is returned when there is a problem setting the new value.
|
||||
|
||||
"page0"
|
||||
-------
|
||||
This file provides full 8 bytes of the chip Page 0 (00h).
|
||||
This page contains the most frequently accessed information of the DS2438.
|
||||
Internally when this file is read, the additional CRC byte is also obtained
|
||||
from the slave device. If it is correct, the 8 bytes page data are passed
|
||||
to userspace, otherwise an I/O error is returned.
|
||||
|
||||
"temperature"
|
||||
-------------
|
||||
Opening and reading this file initiates the CONVERT_T (temperature conversion)
|
||||
command of the chip, afterwards the temperature is read from the device
|
||||
registers and provided as an ASCII decimal value.
|
||||
|
||||
Important: The returned value has to be divided by 256 to get a real
|
||||
temperature in degrees Celsius.
|
||||
|
||||
"vad", "vdd"
|
||||
------------
|
||||
Opening and reading this file initiates the CONVERT_V (voltage conversion)
|
||||
command of the chip.
|
||||
|
||||
Depending on a sysfs filename a different input for the A/D will be selected:
|
||||
vad: general purpose A/D input (VAD)
|
||||
vdd: battery input (VDD)
|
||||
|
||||
After the voltage conversion the value is returned as decimal ASCII.
|
||||
Note: The value is in mV, so to get a volts the value has to be divided by 10.
|
@ -5109,7 +5109,6 @@ F: include/uapi/linux/firewire*.h
|
||||
F: tools/firewire/
|
||||
|
||||
FIRMWARE LOADER (request_firmware)
|
||||
M: Ming Lei <ming.lei@canonical.com>
|
||||
M: Luis R. Rodriguez <mcgrof@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -210,6 +210,28 @@ static struct ep93xx_eth_data __initdata ts72xx_eth_data = {
|
||||
.phy_id = 1,
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_FPGA_MGR_TS73XX)
|
||||
|
||||
/* Relative to EP93XX_CS1_PHYS_BASE */
|
||||
#define TS73XX_FPGA_LOADER_BASE 0x03c00000
|
||||
|
||||
static struct resource ts73xx_fpga_resources[] = {
|
||||
{
|
||||
.start = EP93XX_CS1_PHYS_BASE + TS73XX_FPGA_LOADER_BASE,
|
||||
.end = EP93XX_CS1_PHYS_BASE + TS73XX_FPGA_LOADER_BASE + 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device ts73xx_fpga_device = {
|
||||
.name = "ts73xx-fpga-mgr",
|
||||
.id = -1,
|
||||
.resource = ts73xx_fpga_resources,
|
||||
.num_resources = ARRAY_SIZE(ts73xx_fpga_resources),
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
static void __init ts72xx_init_machine(void)
|
||||
{
|
||||
ep93xx_init_devices();
|
||||
@ -218,6 +240,10 @@ static void __init ts72xx_init_machine(void)
|
||||
platform_device_register(&ts72xx_wdt_device);
|
||||
|
||||
ep93xx_register_eth(&ts72xx_eth_data, 1);
|
||||
#if IS_ENABLED(CONFIG_FPGA_MGR_TS73XX)
|
||||
if (board_is_ts7300())
|
||||
platform_device_register(&ts73xx_fpga_device);
|
||||
#endif
|
||||
}
|
||||
|
||||
MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC")
|
||||
|
@ -284,6 +284,7 @@ config CPM2
|
||||
config AXON_RAM
|
||||
tristate "Axon DDR2 memory device driver"
|
||||
depends on PPC_IBM_CELL_BLADE && BLOCK
|
||||
select DAX
|
||||
default m
|
||||
help
|
||||
It registers one block device per Axon's DDR2 memory bank found
|
||||
|
@ -25,6 +25,7 @@
|
||||
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/dax.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/fs.h>
|
||||
@ -62,6 +63,7 @@ static int azfs_major, azfs_minor;
|
||||
struct axon_ram_bank {
|
||||
struct platform_device *device;
|
||||
struct gendisk *disk;
|
||||
struct dax_device *dax_dev;
|
||||
unsigned int irq_id;
|
||||
unsigned long ph_addr;
|
||||
unsigned long io_addr;
|
||||
@ -137,25 +139,32 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* axon_ram_direct_access - direct_access() method for block device
|
||||
* @device, @sector, @data: see block_device_operations method
|
||||
*/
|
||||
static const struct block_device_operations axon_ram_devops = {
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static long
|
||||
axon_ram_direct_access(struct block_device *device, sector_t sector,
|
||||
void **kaddr, pfn_t *pfn, long size)
|
||||
__axon_ram_direct_access(struct axon_ram_bank *bank, pgoff_t pgoff, long nr_pages,
|
||||
void **kaddr, pfn_t *pfn)
|
||||
{
|
||||
struct axon_ram_bank *bank = device->bd_disk->private_data;
|
||||
loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT;
|
||||
resource_size_t offset = pgoff * PAGE_SIZE;
|
||||
|
||||
*kaddr = (void *) bank->io_addr + offset;
|
||||
*pfn = phys_to_pfn_t(bank->ph_addr + offset, PFN_DEV);
|
||||
return bank->size - offset;
|
||||
return (bank->size - offset) / PAGE_SIZE;
|
||||
}
|
||||
|
||||
static const struct block_device_operations axon_ram_devops = {
|
||||
.owner = THIS_MODULE,
|
||||
.direct_access = axon_ram_direct_access
|
||||
static long
|
||||
axon_ram_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
||||
void **kaddr, pfn_t *pfn)
|
||||
{
|
||||
struct axon_ram_bank *bank = dax_get_private(dax_dev);
|
||||
|
||||
return __axon_ram_direct_access(bank, pgoff, nr_pages, kaddr, pfn);
|
||||
}
|
||||
|
||||
static const struct dax_operations axon_ram_dax_ops = {
|
||||
.direct_access = axon_ram_dax_direct_access,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -219,6 +228,7 @@ static int axon_ram_probe(struct platform_device *device)
|
||||
goto failed;
|
||||
}
|
||||
|
||||
|
||||
bank->disk->major = azfs_major;
|
||||
bank->disk->first_minor = azfs_minor;
|
||||
bank->disk->fops = &axon_ram_devops;
|
||||
@ -227,6 +237,13 @@ static int axon_ram_probe(struct platform_device *device)
|
||||
sprintf(bank->disk->disk_name, "%s%d",
|
||||
AXON_RAM_DEVICE_NAME, axon_ram_bank_id);
|
||||
|
||||
bank->dax_dev = alloc_dax(bank, bank->disk->disk_name,
|
||||
&axon_ram_dax_ops);
|
||||
if (!bank->dax_dev) {
|
||||
rc = -ENOMEM;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
bank->disk->queue = blk_alloc_queue(GFP_KERNEL);
|
||||
if (bank->disk->queue == NULL) {
|
||||
dev_err(&device->dev, "Cannot register disk queue\n");
|
||||
@ -278,6 +295,8 @@ failed:
|
||||
del_gendisk(bank->disk);
|
||||
put_disk(bank->disk);
|
||||
}
|
||||
kill_dax(bank->dax_dev);
|
||||
put_dax(bank->dax_dev);
|
||||
device->dev.platform_data = NULL;
|
||||
if (bank->io_addr != 0)
|
||||
iounmap((void __iomem *) bank->io_addr);
|
||||
@ -300,6 +319,8 @@ axon_ram_remove(struct platform_device *device)
|
||||
|
||||
device_remove_file(&device->dev, &dev_attr_ecc);
|
||||
free_irq(bank->irq_id, device);
|
||||
kill_dax(bank->dax_dev);
|
||||
put_dax(bank->dax_dev);
|
||||
del_gendisk(bank->disk);
|
||||
put_disk(bank->disk);
|
||||
iounmap((void __iomem *) bank->io_addr);
|
||||
|
@ -25,7 +25,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/clockchips.h>
|
||||
|
||||
#include <linux/hyperv.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
|
@ -44,11 +44,6 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
|
||||
{
|
||||
return memcpy_mcsafe(dst, src, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* arch_wb_cache_pmem - write back a cache range with CLWB
|
||||
* @vaddr: virtual start address
|
||||
|
@ -79,6 +79,7 @@ int strcmp(const char *cs, const char *ct);
|
||||
#define memset(s, c, n) __memset(s, c, n)
|
||||
#endif
|
||||
|
||||
#define __HAVE_ARCH_MEMCPY_MCSAFE 1
|
||||
__must_check int memcpy_mcsafe_unrolled(void *dst, const void *src, size_t cnt);
|
||||
DECLARE_STATIC_KEY_FALSE(mcsafe_key);
|
||||
|
||||
|
@ -124,7 +124,7 @@
|
||||
* Recommend using hypercall for address space switches rather
|
||||
* than MOV to CR3 instruction
|
||||
*/
|
||||
#define HV_X64_MWAIT_RECOMMENDED (1 << 0)
|
||||
#define HV_X64_AS_SWITCH_RECOMMENDED (1 << 0)
|
||||
/* Recommend using hypercall for local TLB flushes rather
|
||||
* than INVLPG or MOV to CR3 instructions */
|
||||
#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED (1 << 1)
|
||||
@ -147,6 +147,11 @@
|
||||
*/
|
||||
#define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5)
|
||||
|
||||
/*
|
||||
* Virtual APIC support
|
||||
*/
|
||||
#define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9)
|
||||
|
||||
/*
|
||||
* Crash notification flag.
|
||||
*/
|
||||
|
@ -6,6 +6,7 @@ menuconfig BLOCK
|
||||
default y
|
||||
select SBITMAP
|
||||
select SRCU
|
||||
select DAX
|
||||
help
|
||||
Provide block layer support for the kernel.
|
||||
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/dax.h>
|
||||
#include <linux/blktrace_api.h>
|
||||
|
||||
#include "partitions/check.h"
|
||||
@ -631,24 +630,12 @@ int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
|
||||
{
|
||||
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
||||
|
||||
return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)),
|
||||
NULL);
|
||||
}
|
||||
|
||||
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
|
||||
{
|
||||
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
||||
struct page *page;
|
||||
|
||||
/* don't populate page cache for dax capable devices */
|
||||
if (IS_DAX(bdev->bd_inode))
|
||||
page = read_dax_sector(bdev, n);
|
||||
else
|
||||
page = read_pagecache_sector(bdev, n);
|
||||
|
||||
page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)), NULL);
|
||||
if (!IS_ERR(page)) {
|
||||
if (PageError(page))
|
||||
goto fail;
|
||||
|
@ -71,7 +71,7 @@ obj-$(CONFIG_PARPORT) += parport/
|
||||
obj-$(CONFIG_NVM) += lightnvm/
|
||||
obj-y += base/ block/ misc/ mfd/ nfc/
|
||||
obj-$(CONFIG_LIBNVDIMM) += nvdimm/
|
||||
obj-$(CONFIG_DEV_DAX) += dax/
|
||||
obj-$(CONFIG_DAX) += dax/
|
||||
obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
|
||||
obj-$(CONFIG_NUBUS) += nubus/
|
||||
obj-y += macintosh/
|
||||
|
@ -1849,8 +1849,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
|
||||
mmio_flush_range((void __force *)
|
||||
mmio->addr.aperture + offset, c);
|
||||
|
||||
memcpy_from_pmem(iobuf + copied,
|
||||
mmio->addr.aperture + offset, c);
|
||||
memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
|
||||
}
|
||||
|
||||
copied += c;
|
||||
|
@ -22,7 +22,7 @@ config ANDROID_BINDER_IPC
|
||||
config ANDROID_BINDER_DEVICES
|
||||
string "Android Binder devices"
|
||||
depends on ANDROID_BINDER_IPC
|
||||
default "binder"
|
||||
default "binder,hwbinder"
|
||||
---help---
|
||||
Default value for the binder.devices parameter.
|
||||
|
||||
|
@ -13,8 +13,22 @@ menuconfig AUXDISPLAY
|
||||
|
||||
If you say N, all options in this submenu will be skipped and disabled.
|
||||
|
||||
config CHARLCD
|
||||
tristate "Character LCD core support" if COMPILE_TEST
|
||||
|
||||
if AUXDISPLAY
|
||||
|
||||
config HD44780
|
||||
tristate "HD44780 Character LCD support"
|
||||
depends on GPIOLIB || COMPILE_TEST
|
||||
select CHARLCD
|
||||
---help---
|
||||
Enable support for Character LCDs using a HD44780 controller.
|
||||
The LCD is accessible through the /dev/lcd char device (10, 156).
|
||||
This code can either be compiled as a module, or linked into the
|
||||
kernel and started at boot.
|
||||
If you don't understand what all this is about, say N.
|
||||
|
||||
config KS0108
|
||||
tristate "KS0108 LCD Controller"
|
||||
depends on PARPORT_PC
|
||||
|
@ -2,7 +2,9 @@
|
||||
# Makefile for the kernel auxiliary displays device drivers.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_CHARLCD) += charlcd.o
|
||||
obj-$(CONFIG_KS0108) += ks0108.o
|
||||
obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o
|
||||
obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o
|
||||
obj-$(CONFIG_HD44780) += hd44780.o
|
||||
obj-$(CONFIG_HT16K33) += ht16k33.o
|
||||
|
818
drivers/auxdisplay/charlcd.c
Normal file
818
drivers/auxdisplay/charlcd.c
Normal file
@ -0,0 +1,818 @@
|
||||
/*
|
||||
* Character LCD driver for Linux
|
||||
*
|
||||
* Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
|
||||
* Copyright (C) 2016-2017 Glider bvba
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <generated/utsrelease.h>
|
||||
|
||||
#include <misc/charlcd.h>
|
||||
|
||||
#define LCD_MINOR 156
|
||||
|
||||
#define DEFAULT_LCD_BWIDTH 40
|
||||
#define DEFAULT_LCD_HWIDTH 64
|
||||
|
||||
/* Keep the backlight on this many seconds for each flash */
|
||||
#define LCD_BL_TEMPO_PERIOD 4
|
||||
|
||||
#define LCD_FLAG_B 0x0004 /* Blink on */
|
||||
#define LCD_FLAG_C 0x0008 /* Cursor on */
|
||||
#define LCD_FLAG_D 0x0010 /* Display on */
|
||||
#define LCD_FLAG_F 0x0020 /* Large font mode */
|
||||
#define LCD_FLAG_N 0x0040 /* 2-rows mode */
|
||||
#define LCD_FLAG_L 0x0080 /* Backlight enabled */
|
||||
|
||||
/* LCD commands */
|
||||
#define LCD_CMD_DISPLAY_CLEAR 0x01 /* Clear entire display */
|
||||
|
||||
#define LCD_CMD_ENTRY_MODE 0x04 /* Set entry mode */
|
||||
#define LCD_CMD_CURSOR_INC 0x02 /* Increment cursor */
|
||||
|
||||
#define LCD_CMD_DISPLAY_CTRL 0x08 /* Display control */
|
||||
#define LCD_CMD_DISPLAY_ON 0x04 /* Set display on */
|
||||
#define LCD_CMD_CURSOR_ON 0x02 /* Set cursor on */
|
||||
#define LCD_CMD_BLINK_ON 0x01 /* Set blink on */
|
||||
|
||||
#define LCD_CMD_SHIFT 0x10 /* Shift cursor/display */
|
||||
#define LCD_CMD_DISPLAY_SHIFT 0x08 /* Shift display instead of cursor */
|
||||
#define LCD_CMD_SHIFT_RIGHT 0x04 /* Shift display/cursor to the right */
|
||||
|
||||
#define LCD_CMD_FUNCTION_SET 0x20 /* Set function */
|
||||
#define LCD_CMD_DATA_LEN_8BITS 0x10 /* Set data length to 8 bits */
|
||||
#define LCD_CMD_TWO_LINES 0x08 /* Set to two display lines */
|
||||
#define LCD_CMD_FONT_5X10_DOTS 0x04 /* Set char font to 5x10 dots */
|
||||
|
||||
#define LCD_CMD_SET_CGRAM_ADDR 0x40 /* Set char generator RAM address */
|
||||
|
||||
#define LCD_CMD_SET_DDRAM_ADDR 0x80 /* Set display data RAM address */
|
||||
|
||||
#define LCD_ESCAPE_LEN 24 /* Max chars for LCD escape command */
|
||||
#define LCD_ESCAPE_CHAR 27 /* Use char 27 for escape command */
|
||||
|
||||
struct charlcd_priv {
|
||||
struct charlcd lcd;
|
||||
|
||||
struct delayed_work bl_work;
|
||||
struct mutex bl_tempo_lock; /* Protects access to bl_tempo */
|
||||
bool bl_tempo;
|
||||
|
||||
bool must_clear;
|
||||
|
||||
/* contains the LCD config state */
|
||||
unsigned long int flags;
|
||||
|
||||
/* Contains the LCD X and Y offset */
|
||||
struct {
|
||||
unsigned long int x;
|
||||
unsigned long int y;
|
||||
} addr;
|
||||
|
||||
/* Current escape sequence and it's length or -1 if outside */
|
||||
struct {
|
||||
char buf[LCD_ESCAPE_LEN + 1];
|
||||
int len;
|
||||
} esc_seq;
|
||||
|
||||
unsigned long long drvdata[0];
|
||||
};
|
||||
|
||||
#define to_priv(p) container_of(p, struct charlcd_priv, lcd)
|
||||
|
||||
/* Device single-open policy control */
|
||||
static atomic_t charlcd_available = ATOMIC_INIT(1);
|
||||
|
||||
/* sleeps that many milliseconds with a reschedule */
|
||||
static void long_sleep(int ms)
|
||||
{
|
||||
if (in_interrupt())
|
||||
mdelay(ms);
|
||||
else
|
||||
schedule_timeout_interruptible(msecs_to_jiffies(ms));
|
||||
}
|
||||
|
||||
/* turn the backlight on or off */
|
||||
static void charlcd_backlight(struct charlcd *lcd, int on)
|
||||
{
|
||||
struct charlcd_priv *priv = to_priv(lcd);
|
||||
|
||||
if (!lcd->ops->backlight)
|
||||
return;
|
||||
|
||||
mutex_lock(&priv->bl_tempo_lock);
|
||||
if (!priv->bl_tempo)
|
||||
lcd->ops->backlight(lcd, on);
|
||||
mutex_unlock(&priv->bl_tempo_lock);
|
||||
}
|
||||
|
||||
static void charlcd_bl_off(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *dwork = to_delayed_work(work);
|
||||
struct charlcd_priv *priv =
|
||||
container_of(dwork, struct charlcd_priv, bl_work);
|
||||
|
||||
mutex_lock(&priv->bl_tempo_lock);
|
||||
if (priv->bl_tempo) {
|
||||
priv->bl_tempo = false;
|
||||
if (!(priv->flags & LCD_FLAG_L))
|
||||
priv->lcd.ops->backlight(&priv->lcd, 0);
|
||||
}
|
||||
mutex_unlock(&priv->bl_tempo_lock);
|
||||
}
|
||||
|
||||
/* turn the backlight on for a little while */
|
||||
void charlcd_poke(struct charlcd *lcd)
|
||||
{
|
||||
struct charlcd_priv *priv = to_priv(lcd);
|
||||
|
||||
if (!lcd->ops->backlight)
|
||||
return;
|
||||
|
||||
cancel_delayed_work_sync(&priv->bl_work);
|
||||
|
||||
mutex_lock(&priv->bl_tempo_lock);
|
||||
if (!priv->bl_tempo && !(priv->flags & LCD_FLAG_L))
|
||||
lcd->ops->backlight(lcd, 1);
|
||||
priv->bl_tempo = true;
|
||||
schedule_delayed_work(&priv->bl_work, LCD_BL_TEMPO_PERIOD * HZ);
|
||||
mutex_unlock(&priv->bl_tempo_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(charlcd_poke);
|
||||
|
||||
static void charlcd_gotoxy(struct charlcd *lcd)
|
||||
{
|
||||
struct charlcd_priv *priv = to_priv(lcd);
|
||||
unsigned int addr;
|
||||
|
||||
/*
|
||||
* we force the cursor to stay at the end of the
|
||||
* line if it wants to go farther
|
||||
*/
|
||||
addr = priv->addr.x < lcd->bwidth ? priv->addr.x & (lcd->hwidth - 1)
|
||||
: lcd->bwidth - 1;
|
||||
if (priv->addr.y & 1)
|
||||
addr += lcd->hwidth;
|
||||
if (priv->addr.y & 2)
|
||||
addr += lcd->bwidth;
|
||||
lcd->ops->write_cmd(lcd, LCD_CMD_SET_DDRAM_ADDR | addr);
|
||||
}
|
||||
|
||||
static void charlcd_home(struct charlcd *lcd)
|
||||
{
|
||||
struct charlcd_priv *priv = to_priv(lcd);
|
||||
|
||||
priv->addr.x = 0;
|
||||
priv->addr.y = 0;
|
||||
charlcd_gotoxy(lcd);
|
||||
}
|
||||
|
||||
static void charlcd_print(struct charlcd *lcd, char c)
|
||||
{
|
||||
struct charlcd_priv *priv = to_priv(lcd);
|
||||
|
||||
if (priv->addr.x < lcd->bwidth) {
|
||||
if (lcd->char_conv)
|
||||
c = lcd->char_conv[(unsigned char)c];
|
||||
lcd->ops->write_data(lcd, c);
|
||||
priv->addr.x++;
|
||||
}
|
||||
/* prevents the cursor from wrapping onto the next line */
|
||||
if (priv->addr.x == lcd->bwidth)
|
||||
charlcd_gotoxy(lcd);
|
||||
}
|
||||
|
||||
static void charlcd_clear_fast(struct charlcd *lcd)
|
||||
{
|
||||
int pos;
|
||||
|
||||
charlcd_home(lcd);
|
||||
|
||||
if (lcd->ops->clear_fast)
|
||||
lcd->ops->clear_fast(lcd);
|
||||
else
|
||||
for (pos = 0; pos < min(2, lcd->height) * lcd->hwidth; pos++)
|
||||
lcd->ops->write_data(lcd, ' ');
|
||||
|
||||
charlcd_home(lcd);
|
||||
}
|
||||
|
||||
/* clears the display and resets X/Y */
|
||||
static void charlcd_clear_display(struct charlcd *lcd)
|
||||
{
|
||||
struct charlcd_priv *priv = to_priv(lcd);
|
||||
|
||||
lcd->ops->write_cmd(lcd, LCD_CMD_DISPLAY_CLEAR);
|
||||
priv->addr.x = 0;
|
||||
priv->addr.y = 0;
|
||||
/* we must wait a few milliseconds (15) */
|
||||
long_sleep(15);
|
||||
}
|
||||
|
||||
static int charlcd_init_display(struct charlcd *lcd)
|
||||
{
|
||||
void (*write_cmd_raw)(struct charlcd *lcd, int cmd);
|
||||
struct charlcd_priv *priv = to_priv(lcd);
|
||||
u8 init;
|
||||
|
||||
if (lcd->ifwidth != 4 && lcd->ifwidth != 8)
|
||||
return -EINVAL;
|
||||
|
||||
priv->flags = ((lcd->height > 1) ? LCD_FLAG_N : 0) | LCD_FLAG_D |
|
||||
LCD_FLAG_C | LCD_FLAG_B;
|
||||
|
||||
long_sleep(20); /* wait 20 ms after power-up for the paranoid */
|
||||
|
||||
/*
|
||||
* 8-bit mode, 1 line, small fonts; let's do it 3 times, to make sure
|
||||
* the LCD is in 8-bit mode afterwards
|
||||
*/
|
||||
init = LCD_CMD_FUNCTION_SET | LCD_CMD_DATA_LEN_8BITS;
|
||||
if (lcd->ifwidth == 4) {
|
||||
init >>= 4;
|
||||
write_cmd_raw = lcd->ops->write_cmd_raw4;
|
||||
} else {
|
||||
write_cmd_raw = lcd->ops->write_cmd;
|
||||
}
|
||||
write_cmd_raw(lcd, init);
|
||||
long_sleep(10);
|
||||
write_cmd_raw(lcd, init);
|
||||
long_sleep(10);
|
||||
write_cmd_raw(lcd, init);
|
||||
long_sleep(10);
|
||||
|
||||
if (lcd->ifwidth == 4) {
|
||||
/* Switch to 4-bit mode, 1 line, small fonts */
|
||||
lcd->ops->write_cmd_raw4(lcd, LCD_CMD_FUNCTION_SET >> 4);
|
||||
long_sleep(10);
|
||||
}
|
||||
|
||||
/* set font height and lines number */
|
||||
lcd->ops->write_cmd(lcd,
|
||||
LCD_CMD_FUNCTION_SET |
|
||||
((lcd->ifwidth == 8) ? LCD_CMD_DATA_LEN_8BITS : 0) |
|
||||
((priv->flags & LCD_FLAG_F) ? LCD_CMD_FONT_5X10_DOTS : 0) |
|
||||
((priv->flags & LCD_FLAG_N) ? LCD_CMD_TWO_LINES : 0));
|
||||
long_sleep(10);
|
||||
|
||||
/* display off, cursor off, blink off */
|
||||
lcd->ops->write_cmd(lcd, LCD_CMD_DISPLAY_CTRL);
|
||||
long_sleep(10);
|
||||
|
||||
lcd->ops->write_cmd(lcd,
|
||||
LCD_CMD_DISPLAY_CTRL | /* set display mode */
|
||||
((priv->flags & LCD_FLAG_D) ? LCD_CMD_DISPLAY_ON : 0) |
|
||||
((priv->flags & LCD_FLAG_C) ? LCD_CMD_CURSOR_ON : 0) |
|
||||
((priv->flags & LCD_FLAG_B) ? LCD_CMD_BLINK_ON : 0));
|
||||
|
||||
charlcd_backlight(lcd, (priv->flags & LCD_FLAG_L) ? 1 : 0);
|
||||
|
||||
long_sleep(10);
|
||||
|
||||
/* entry mode set : increment, cursor shifting */
|
||||
lcd->ops->write_cmd(lcd, LCD_CMD_ENTRY_MODE | LCD_CMD_CURSOR_INC);
|
||||
|
||||
charlcd_clear_display(lcd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* These are the file operation function for user access to /dev/lcd
|
||||
* This function can also be called from inside the kernel, by
|
||||
* setting file and ppos to NULL.
|
||||
*
|
||||
*/
|
||||
|
||||
static inline int handle_lcd_special_code(struct charlcd *lcd)
|
||||
{
|
||||
struct charlcd_priv *priv = to_priv(lcd);
|
||||
|
||||
/* LCD special codes */
|
||||
|
||||
int processed = 0;
|
||||
|
||||
char *esc = priv->esc_seq.buf + 2;
|
||||
int oldflags = priv->flags;
|
||||
|
||||
/* check for display mode flags */
|
||||
switch (*esc) {
|
||||
case 'D': /* Display ON */
|
||||
priv->flags |= LCD_FLAG_D;
|
||||
processed = 1;
|
||||
break;
|
||||
case 'd': /* Display OFF */
|
||||
priv->flags &= ~LCD_FLAG_D;
|
||||
processed = 1;
|
||||
break;
|
||||
case 'C': /* Cursor ON */
|
||||
priv->flags |= LCD_FLAG_C;
|
||||
processed = 1;
|
||||
break;
|
||||
case 'c': /* Cursor OFF */
|
||||
priv->flags &= ~LCD_FLAG_C;
|
||||
processed = 1;
|
||||
break;
|
||||
case 'B': /* Blink ON */
|
||||
priv->flags |= LCD_FLAG_B;
|
||||
processed = 1;
|
||||
break;
|
||||
case 'b': /* Blink OFF */
|
||||
priv->flags &= ~LCD_FLAG_B;
|
||||
processed = 1;
|
||||
break;
|
||||
case '+': /* Back light ON */
|
||||
priv->flags |= LCD_FLAG_L;
|
||||
processed = 1;
|
||||
break;
|
||||
case '-': /* Back light OFF */
|
||||
priv->flags &= ~LCD_FLAG_L;
|
||||
processed = 1;
|
||||
break;
|
||||
case '*': /* Flash back light */
|
||||
charlcd_poke(lcd);
|
||||
processed = 1;
|
||||
break;
|
||||
case 'f': /* Small Font */
|
||||
priv->flags &= ~LCD_FLAG_F;
|
||||
processed = 1;
|
||||
break;
|
||||
case 'F': /* Large Font */
|
||||
priv->flags |= LCD_FLAG_F;
|
||||
processed = 1;
|
||||
break;
|
||||
case 'n': /* One Line */
|
||||
priv->flags &= ~LCD_FLAG_N;
|
||||
processed = 1;
|
||||
break;
|
||||
case 'N': /* Two Lines */
|
||||
priv->flags |= LCD_FLAG_N;
|
||||
break;
|
||||
case 'l': /* Shift Cursor Left */
|
||||
if (priv->addr.x > 0) {
|
||||
/* back one char if not at end of line */
|
||||
if (priv->addr.x < lcd->bwidth)
|
||||
lcd->ops->write_cmd(lcd, LCD_CMD_SHIFT);
|
||||
priv->addr.x--;
|
||||
}
|
||||
processed = 1;
|
||||
break;
|
||||
case 'r': /* shift cursor right */
|
||||
if (priv->addr.x < lcd->width) {
|
||||
/* allow the cursor to pass the end of the line */
|
||||
if (priv->addr.x < (lcd->bwidth - 1))
|
||||
lcd->ops->write_cmd(lcd,
|
||||
LCD_CMD_SHIFT | LCD_CMD_SHIFT_RIGHT);
|
||||
priv->addr.x++;
|
||||
}
|
||||
processed = 1;
|
||||
break;
|
||||
case 'L': /* shift display left */
|
||||
lcd->ops->write_cmd(lcd, LCD_CMD_SHIFT | LCD_CMD_DISPLAY_SHIFT);
|
||||
processed = 1;
|
||||
break;
|
||||
case 'R': /* shift display right */
|
||||
lcd->ops->write_cmd(lcd,
|
||||
LCD_CMD_SHIFT | LCD_CMD_DISPLAY_SHIFT |
|
||||
LCD_CMD_SHIFT_RIGHT);
|
||||
processed = 1;
|
||||
break;
|
||||
case 'k': { /* kill end of line */
|
||||
int x;
|
||||
|
||||
for (x = priv->addr.x; x < lcd->bwidth; x++)
|
||||
lcd->ops->write_data(lcd, ' ');
|
||||
|
||||
/* restore cursor position */
|
||||
charlcd_gotoxy(lcd);
|
||||
processed = 1;
|
||||
break;
|
||||
}
|
||||
case 'I': /* reinitialize display */
|
||||
charlcd_init_display(lcd);
|
||||
processed = 1;
|
||||
break;
|
||||
case 'G': {
|
||||
/* Generator : LGcxxxxx...xx; must have <c> between '0'
|
||||
* and '7', representing the numerical ASCII code of the
|
||||
* redefined character, and <xx...xx> a sequence of 16
|
||||
* hex digits representing 8 bytes for each character.
|
||||
* Most LCDs will only use 5 lower bits of the 7 first
|
||||
* bytes.
|
||||
*/
|
||||
|
||||
unsigned char cgbytes[8];
|
||||
unsigned char cgaddr;
|
||||
int cgoffset;
|
||||
int shift;
|
||||
char value;
|
||||
int addr;
|
||||
|
||||
if (!strchr(esc, ';'))
|
||||
break;
|
||||
|
||||
esc++;
|
||||
|
||||
cgaddr = *(esc++) - '0';
|
||||
if (cgaddr > 7) {
|
||||
processed = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
cgoffset = 0;
|
||||
shift = 0;
|
||||
value = 0;
|
||||
while (*esc && cgoffset < 8) {
|
||||
shift ^= 4;
|
||||
if (*esc >= '0' && *esc <= '9') {
|
||||
value |= (*esc - '0') << shift;
|
||||
} else if (*esc >= 'A' && *esc <= 'Z') {
|
||||
value |= (*esc - 'A' + 10) << shift;
|
||||
} else if (*esc >= 'a' && *esc <= 'z') {
|
||||
value |= (*esc - 'a' + 10) << shift;
|
||||
} else {
|
||||
esc++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (shift == 0) {
|
||||
cgbytes[cgoffset++] = value;
|
||||
value = 0;
|
||||
}
|
||||
|
||||
esc++;
|
||||
}
|
||||
|
||||
lcd->ops->write_cmd(lcd, LCD_CMD_SET_CGRAM_ADDR | (cgaddr * 8));
|
||||
for (addr = 0; addr < cgoffset; addr++)
|
||||
lcd->ops->write_data(lcd, cgbytes[addr]);
|
||||
|
||||
/* ensures that we stop writing to CGRAM */
|
||||
charlcd_gotoxy(lcd);
|
||||
processed = 1;
|
||||
break;
|
||||
}
|
||||
case 'x': /* gotoxy : LxXXX[yYYY]; */
|
||||
case 'y': /* gotoxy : LyYYY[xXXX]; */
|
||||
if (!strchr(esc, ';'))
|
||||
break;
|
||||
|
||||
while (*esc) {
|
||||
if (*esc == 'x') {
|
||||
esc++;
|
||||
if (kstrtoul(esc, 10, &priv->addr.x) < 0)
|
||||
break;
|
||||
} else if (*esc == 'y') {
|
||||
esc++;
|
||||
if (kstrtoul(esc, 10, &priv->addr.y) < 0)
|
||||
break;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
charlcd_gotoxy(lcd);
|
||||
processed = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
/* TODO: This indent party here got ugly, clean it! */
|
||||
/* Check whether one flag was changed */
|
||||
if (oldflags == priv->flags)
|
||||
return processed;
|
||||
|
||||
/* check whether one of B,C,D flags were changed */
|
||||
if ((oldflags ^ priv->flags) &
|
||||
(LCD_FLAG_B | LCD_FLAG_C | LCD_FLAG_D))
|
||||
/* set display mode */
|
||||
lcd->ops->write_cmd(lcd,
|
||||
LCD_CMD_DISPLAY_CTRL |
|
||||
((priv->flags & LCD_FLAG_D) ? LCD_CMD_DISPLAY_ON : 0) |
|
||||
((priv->flags & LCD_FLAG_C) ? LCD_CMD_CURSOR_ON : 0) |
|
||||
((priv->flags & LCD_FLAG_B) ? LCD_CMD_BLINK_ON : 0));
|
||||
/* check whether one of F,N flags was changed */
|
||||
else if ((oldflags ^ priv->flags) & (LCD_FLAG_F | LCD_FLAG_N))
|
||||
lcd->ops->write_cmd(lcd,
|
||||
LCD_CMD_FUNCTION_SET |
|
||||
((lcd->ifwidth == 8) ? LCD_CMD_DATA_LEN_8BITS : 0) |
|
||||
((priv->flags & LCD_FLAG_F) ? LCD_CMD_FONT_5X10_DOTS : 0) |
|
||||
((priv->flags & LCD_FLAG_N) ? LCD_CMD_TWO_LINES : 0));
|
||||
/* check whether L flag was changed */
|
||||
else if ((oldflags ^ priv->flags) & LCD_FLAG_L)
|
||||
charlcd_backlight(lcd, !!(priv->flags & LCD_FLAG_L));
|
||||
|
||||
return processed;
|
||||
}
|
||||
|
||||
static void charlcd_write_char(struct charlcd *lcd, char c)
|
||||
{
|
||||
struct charlcd_priv *priv = to_priv(lcd);
|
||||
|
||||
/* first, we'll test if we're in escape mode */
|
||||
if ((c != '\n') && priv->esc_seq.len >= 0) {
|
||||
/* yes, let's add this char to the buffer */
|
||||
priv->esc_seq.buf[priv->esc_seq.len++] = c;
|
||||
priv->esc_seq.buf[priv->esc_seq.len] = 0;
|
||||
} else {
|
||||
/* aborts any previous escape sequence */
|
||||
priv->esc_seq.len = -1;
|
||||
|
||||
switch (c) {
|
||||
case LCD_ESCAPE_CHAR:
|
||||
/* start of an escape sequence */
|
||||
priv->esc_seq.len = 0;
|
||||
priv->esc_seq.buf[priv->esc_seq.len] = 0;
|
||||
break;
|
||||
case '\b':
|
||||
/* go back one char and clear it */
|
||||
if (priv->addr.x > 0) {
|
||||
/*
|
||||
* check if we're not at the
|
||||
* end of the line
|
||||
*/
|
||||
if (priv->addr.x < lcd->bwidth)
|
||||
/* back one char */
|
||||
lcd->ops->write_cmd(lcd, LCD_CMD_SHIFT);
|
||||
priv->addr.x--;
|
||||
}
|
||||
/* replace with a space */
|
||||
lcd->ops->write_data(lcd, ' ');
|
||||
/* back one char again */
|
||||
lcd->ops->write_cmd(lcd, LCD_CMD_SHIFT);
|
||||
break;
|
||||
case '\014':
|
||||
/* quickly clear the display */
|
||||
charlcd_clear_fast(lcd);
|
||||
break;
|
||||
case '\n':
|
||||
/*
|
||||
* flush the remainder of the current line and
|
||||
* go to the beginning of the next line
|
||||
*/
|
||||
for (; priv->addr.x < lcd->bwidth; priv->addr.x++)
|
||||
lcd->ops->write_data(lcd, ' ');
|
||||
priv->addr.x = 0;
|
||||
priv->addr.y = (priv->addr.y + 1) % lcd->height;
|
||||
charlcd_gotoxy(lcd);
|
||||
break;
|
||||
case '\r':
|
||||
/* go to the beginning of the same line */
|
||||
priv->addr.x = 0;
|
||||
charlcd_gotoxy(lcd);
|
||||
break;
|
||||
case '\t':
|
||||
/* print a space instead of the tab */
|
||||
charlcd_print(lcd, ' ');
|
||||
break;
|
||||
default:
|
||||
/* simply print this char */
|
||||
charlcd_print(lcd, c);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* now we'll see if we're in an escape mode and if the current
|
||||
* escape sequence can be understood.
|
||||
*/
|
||||
if (priv->esc_seq.len >= 2) {
|
||||
int processed = 0;
|
||||
|
||||
if (!strcmp(priv->esc_seq.buf, "[2J")) {
|
||||
/* clear the display */
|
||||
charlcd_clear_fast(lcd);
|
||||
processed = 1;
|
||||
} else if (!strcmp(priv->esc_seq.buf, "[H")) {
|
||||
/* cursor to home */
|
||||
charlcd_home(lcd);
|
||||
processed = 1;
|
||||
}
|
||||
/* codes starting with ^[[L */
|
||||
else if ((priv->esc_seq.len >= 3) &&
|
||||
(priv->esc_seq.buf[0] == '[') &&
|
||||
(priv->esc_seq.buf[1] == 'L')) {
|
||||
processed = handle_lcd_special_code(lcd);
|
||||
}
|
||||
|
||||
/* LCD special escape codes */
|
||||
/*
|
||||
* flush the escape sequence if it's been processed
|
||||
* or if it is getting too long.
|
||||
*/
|
||||
if (processed || (priv->esc_seq.len >= LCD_ESCAPE_LEN))
|
||||
priv->esc_seq.len = -1;
|
||||
} /* escape codes */
|
||||
}
|
||||
|
||||
static struct charlcd *the_charlcd;
|
||||
|
||||
static ssize_t charlcd_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
const char __user *tmp = buf;
|
||||
char c;
|
||||
|
||||
for (; count-- > 0; (*ppos)++, tmp++) {
|
||||
if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
|
||||
/*
|
||||
* let's be a little nice with other processes
|
||||
* that need some CPU
|
||||
*/
|
||||
schedule();
|
||||
|
||||
if (get_user(c, tmp))
|
||||
return -EFAULT;
|
||||
|
||||
charlcd_write_char(the_charlcd, c);
|
||||
}
|
||||
|
||||
return tmp - buf;
|
||||
}
|
||||
|
||||
static int charlcd_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct charlcd_priv *priv = to_priv(the_charlcd);
|
||||
|
||||
if (!atomic_dec_and_test(&charlcd_available))
|
||||
return -EBUSY; /* open only once at a time */
|
||||
|
||||
if (file->f_mode & FMODE_READ) /* device is write-only */
|
||||
return -EPERM;
|
||||
|
||||
if (priv->must_clear) {
|
||||
charlcd_clear_display(&priv->lcd);
|
||||
priv->must_clear = false;
|
||||
}
|
||||
return nonseekable_open(inode, file);
|
||||
}
|
||||
|
||||
static int charlcd_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
atomic_inc(&charlcd_available);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations charlcd_fops = {
|
||||
.write = charlcd_write,
|
||||
.open = charlcd_open,
|
||||
.release = charlcd_release,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static struct miscdevice charlcd_dev = {
|
||||
.minor = LCD_MINOR,
|
||||
.name = "lcd",
|
||||
.fops = &charlcd_fops,
|
||||
};
|
||||
|
||||
static void charlcd_puts(struct charlcd *lcd, const char *s)
|
||||
{
|
||||
const char *tmp = s;
|
||||
int count = strlen(s);
|
||||
|
||||
for (; count-- > 0; tmp++) {
|
||||
if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
|
||||
/*
|
||||
* let's be a little nice with other processes
|
||||
* that need some CPU
|
||||
*/
|
||||
schedule();
|
||||
|
||||
charlcd_write_char(lcd, *tmp);
|
||||
}
|
||||
}
|
||||
|
||||
/* initialize the LCD driver */
|
||||
static int charlcd_init(struct charlcd *lcd)
|
||||
{
|
||||
struct charlcd_priv *priv = to_priv(lcd);
|
||||
int ret;
|
||||
|
||||
if (lcd->ops->backlight) {
|
||||
mutex_init(&priv->bl_tempo_lock);
|
||||
INIT_DELAYED_WORK(&priv->bl_work, charlcd_bl_off);
|
||||
}
|
||||
|
||||
/*
|
||||
* before this line, we must NOT send anything to the display.
|
||||
* Since charlcd_init_display() needs to write data, we have to
|
||||
* enable mark the LCD initialized just before.
|
||||
*/
|
||||
ret = charlcd_init_display(lcd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* display a short message */
|
||||
#ifdef CONFIG_PANEL_CHANGE_MESSAGE
|
||||
#ifdef CONFIG_PANEL_BOOT_MESSAGE
|
||||
charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE);
|
||||
#endif
|
||||
#else
|
||||
charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\n");
|
||||
#endif
|
||||
/* clear the display on the next device opening */
|
||||
priv->must_clear = true;
|
||||
charlcd_home(lcd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct charlcd *charlcd_alloc(unsigned int drvdata_size)
|
||||
{
|
||||
struct charlcd_priv *priv;
|
||||
struct charlcd *lcd;
|
||||
|
||||
priv = kzalloc(sizeof(*priv) + drvdata_size, GFP_KERNEL);
|
||||
if (!priv)
|
||||
return NULL;
|
||||
|
||||
priv->esc_seq.len = -1;
|
||||
|
||||
lcd = &priv->lcd;
|
||||
lcd->ifwidth = 8;
|
||||
lcd->bwidth = DEFAULT_LCD_BWIDTH;
|
||||
lcd->hwidth = DEFAULT_LCD_HWIDTH;
|
||||
lcd->drvdata = priv->drvdata;
|
||||
|
||||
return lcd;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(charlcd_alloc);
|
||||
|
||||
static int panel_notify_sys(struct notifier_block *this, unsigned long code,
|
||||
void *unused)
|
||||
{
|
||||
struct charlcd *lcd = the_charlcd;
|
||||
|
||||
switch (code) {
|
||||
case SYS_DOWN:
|
||||
charlcd_puts(lcd,
|
||||
"\x0cReloading\nSystem...\x1b[Lc\x1b[Lb\x1b[L+");
|
||||
break;
|
||||
case SYS_HALT:
|
||||
charlcd_puts(lcd, "\x0cSystem Halted.\x1b[Lc\x1b[Lb\x1b[L+");
|
||||
break;
|
||||
case SYS_POWER_OFF:
|
||||
charlcd_puts(lcd, "\x0cPower off.\x1b[Lc\x1b[Lb\x1b[L+");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block panel_notifier = {
|
||||
panel_notify_sys,
|
||||
NULL,
|
||||
0
|
||||
};
|
||||
|
||||
int charlcd_register(struct charlcd *lcd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = charlcd_init(lcd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = misc_register(&charlcd_dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
the_charlcd = lcd;
|
||||
register_reboot_notifier(&panel_notifier);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(charlcd_register);
|
||||
|
||||
int charlcd_unregister(struct charlcd *lcd)
|
||||
{
|
||||
struct charlcd_priv *priv = to_priv(lcd);
|
||||
|
||||
unregister_reboot_notifier(&panel_notifier);
|
||||
charlcd_puts(lcd, "\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-");
|
||||
misc_deregister(&charlcd_dev);
|
||||
the_charlcd = NULL;
|
||||
if (lcd->ops->backlight) {
|
||||
cancel_delayed_work_sync(&priv->bl_work);
|
||||
priv->lcd.ops->backlight(&priv->lcd, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(charlcd_unregister);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
325
drivers/auxdisplay/hd44780.c
Normal file
325
drivers/auxdisplay/hd44780.c
Normal file
@ -0,0 +1,325 @@
|
||||
/*
|
||||
* HD44780 Character LCD driver for Linux
|
||||
*
|
||||
* Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
|
||||
* Copyright (C) 2016-2017 Glider bvba
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <misc/charlcd.h>
|
||||
|
||||
|
||||
enum hd44780_pin {
|
||||
/* Order does matter due to writing to GPIO array subsets! */
|
||||
PIN_DATA0, /* Optional */
|
||||
PIN_DATA1, /* Optional */
|
||||
PIN_DATA2, /* Optional */
|
||||
PIN_DATA3, /* Optional */
|
||||
PIN_DATA4,
|
||||
PIN_DATA5,
|
||||
PIN_DATA6,
|
||||
PIN_DATA7,
|
||||
PIN_CTRL_RS,
|
||||
PIN_CTRL_RW, /* Optional */
|
||||
PIN_CTRL_E,
|
||||
PIN_CTRL_BL, /* Optional */
|
||||
PIN_NUM
|
||||
};
|
||||
|
||||
struct hd44780 {
|
||||
struct gpio_desc *pins[PIN_NUM];
|
||||
};
|
||||
|
||||
static void hd44780_backlight(struct charlcd *lcd, int on)
|
||||
{
|
||||
struct hd44780 *hd = lcd->drvdata;
|
||||
|
||||
if (hd->pins[PIN_CTRL_BL])
|
||||
gpiod_set_value_cansleep(hd->pins[PIN_CTRL_BL], on);
|
||||
}
|
||||
|
||||
static void hd44780_strobe_gpio(struct hd44780 *hd)
|
||||
{
|
||||
/* Maintain the data during 20 us before the strobe */
|
||||
udelay(20);
|
||||
|
||||
gpiod_set_value_cansleep(hd->pins[PIN_CTRL_E], 1);
|
||||
|
||||
/* Maintain the strobe during 40 us */
|
||||
udelay(40);
|
||||
|
||||
gpiod_set_value_cansleep(hd->pins[PIN_CTRL_E], 0);
|
||||
}
|
||||
|
||||
/* write to an LCD panel register in 8 bit GPIO mode */
|
||||
static void hd44780_write_gpio8(struct hd44780 *hd, u8 val, unsigned int rs)
|
||||
{
|
||||
int values[10]; /* for DATA[0-7], RS, RW */
|
||||
unsigned int i, n;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
values[PIN_DATA0 + i] = !!(val & BIT(i));
|
||||
values[PIN_CTRL_RS] = rs;
|
||||
n = 9;
|
||||
if (hd->pins[PIN_CTRL_RW]) {
|
||||
values[PIN_CTRL_RW] = 0;
|
||||
n++;
|
||||
}
|
||||
|
||||
/* Present the data to the port */
|
||||
gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA0], values);
|
||||
|
||||
hd44780_strobe_gpio(hd);
|
||||
}
|
||||
|
||||
/* write to an LCD panel register in 4 bit GPIO mode */
|
||||
static void hd44780_write_gpio4(struct hd44780 *hd, u8 val, unsigned int rs)
|
||||
{
|
||||
int values[10]; /* for DATA[0-7], RS, RW, but DATA[0-3] is unused */
|
||||
unsigned int i, n;
|
||||
|
||||
/* High nibble + RS, RW */
|
||||
for (i = 4; i < 8; i++)
|
||||
values[PIN_DATA0 + i] = !!(val & BIT(i));
|
||||
values[PIN_CTRL_RS] = rs;
|
||||
n = 5;
|
||||
if (hd->pins[PIN_CTRL_RW]) {
|
||||
values[PIN_CTRL_RW] = 0;
|
||||
n++;
|
||||
}
|
||||
|
||||
/* Present the data to the port */
|
||||
gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4],
|
||||
&values[PIN_DATA4]);
|
||||
|
||||
hd44780_strobe_gpio(hd);
|
||||
|
||||
/* Low nibble */
|
||||
for (i = 0; i < 4; i++)
|
||||
values[PIN_DATA4 + i] = !!(val & BIT(i));
|
||||
|
||||
/* Present the data to the port */
|
||||
gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4],
|
||||
&values[PIN_DATA4]);
|
||||
|
||||
hd44780_strobe_gpio(hd);
|
||||
}
|
||||
|
||||
/* Send a command to the LCD panel in 8 bit GPIO mode */
|
||||
static void hd44780_write_cmd_gpio8(struct charlcd *lcd, int cmd)
|
||||
{
|
||||
struct hd44780 *hd = lcd->drvdata;
|
||||
|
||||
hd44780_write_gpio8(hd, cmd, 0);
|
||||
|
||||
/* The shortest command takes at least 120 us */
|
||||
udelay(120);
|
||||
}
|
||||
|
||||
/* Send data to the LCD panel in 8 bit GPIO mode */
|
||||
static void hd44780_write_data_gpio8(struct charlcd *lcd, int data)
|
||||
{
|
||||
struct hd44780 *hd = lcd->drvdata;
|
||||
|
||||
hd44780_write_gpio8(hd, data, 1);
|
||||
|
||||
/* The shortest data takes at least 45 us */
|
||||
udelay(45);
|
||||
}
|
||||
|
||||
static const struct charlcd_ops hd44780_ops_gpio8 = {
|
||||
.write_cmd = hd44780_write_cmd_gpio8,
|
||||
.write_data = hd44780_write_data_gpio8,
|
||||
.backlight = hd44780_backlight,
|
||||
};
|
||||
|
||||
/* Send a command to the LCD panel in 4 bit GPIO mode */
|
||||
static void hd44780_write_cmd_gpio4(struct charlcd *lcd, int cmd)
|
||||
{
|
||||
struct hd44780 *hd = lcd->drvdata;
|
||||
|
||||
hd44780_write_gpio4(hd, cmd, 0);
|
||||
|
||||
/* The shortest command takes at least 120 us */
|
||||
udelay(120);
|
||||
}
|
||||
|
||||
/* Send 4-bits of a command to the LCD panel in raw 4 bit GPIO mode */
|
||||
static void hd44780_write_cmd_raw_gpio4(struct charlcd *lcd, int cmd)
|
||||
{
|
||||
int values[10]; /* for DATA[0-7], RS, RW, but DATA[0-3] is unused */
|
||||
struct hd44780 *hd = lcd->drvdata;
|
||||
unsigned int i, n;
|
||||
|
||||
/* Command nibble + RS, RW */
|
||||
for (i = 0; i < 4; i++)
|
||||
values[PIN_DATA4 + i] = !!(cmd & BIT(i));
|
||||
values[PIN_CTRL_RS] = 0;
|
||||
n = 5;
|
||||
if (hd->pins[PIN_CTRL_RW]) {
|
||||
values[PIN_CTRL_RW] = 0;
|
||||
n++;
|
||||
}
|
||||
|
||||
/* Present the data to the port */
|
||||
gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4],
|
||||
&values[PIN_DATA4]);
|
||||
|
||||
hd44780_strobe_gpio(hd);
|
||||
}
|
||||
|
||||
/* Send data to the LCD panel in 4 bit GPIO mode */
|
||||
static void hd44780_write_data_gpio4(struct charlcd *lcd, int data)
|
||||
{
|
||||
struct hd44780 *hd = lcd->drvdata;
|
||||
|
||||
hd44780_write_gpio4(hd, data, 1);
|
||||
|
||||
/* The shortest data takes at least 45 us */
|
||||
udelay(45);
|
||||
}
|
||||
|
||||
static const struct charlcd_ops hd44780_ops_gpio4 = {
|
||||
.write_cmd = hd44780_write_cmd_gpio4,
|
||||
.write_cmd_raw4 = hd44780_write_cmd_raw_gpio4,
|
||||
.write_data = hd44780_write_data_gpio4,
|
||||
.backlight = hd44780_backlight,
|
||||
};
|
||||
|
||||
static int hd44780_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
unsigned int i, base;
|
||||
struct charlcd *lcd;
|
||||
struct hd44780 *hd;
|
||||
int ifwidth, ret;
|
||||
|
||||
/* Required pins */
|
||||
ifwidth = gpiod_count(dev, "data");
|
||||
if (ifwidth < 0)
|
||||
return ifwidth;
|
||||
|
||||
switch (ifwidth) {
|
||||
case 4:
|
||||
base = PIN_DATA4;
|
||||
break;
|
||||
case 8:
|
||||
base = PIN_DATA0;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
lcd = charlcd_alloc(sizeof(struct hd44780));
|
||||
if (!lcd)
|
||||
return -ENOMEM;
|
||||
|
||||
hd = lcd->drvdata;
|
||||
|
||||
for (i = 0; i < ifwidth; i++) {
|
||||
hd->pins[base + i] = devm_gpiod_get_index(dev, "data", i,
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(hd->pins[base + i])) {
|
||||
ret = PTR_ERR(hd->pins[base + i]);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
hd->pins[PIN_CTRL_E] = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
|
||||
if (IS_ERR(hd->pins[PIN_CTRL_E])) {
|
||||
ret = PTR_ERR(hd->pins[PIN_CTRL_E]);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
hd->pins[PIN_CTRL_RS] = devm_gpiod_get(dev, "rs", GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(hd->pins[PIN_CTRL_RS])) {
|
||||
ret = PTR_ERR(hd->pins[PIN_CTRL_RS]);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Optional pins */
|
||||
hd->pins[PIN_CTRL_RW] = devm_gpiod_get_optional(dev, "rw",
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(hd->pins[PIN_CTRL_RW])) {
|
||||
ret = PTR_ERR(hd->pins[PIN_CTRL_RW]);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
hd->pins[PIN_CTRL_BL] = devm_gpiod_get_optional(dev, "backlight",
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(hd->pins[PIN_CTRL_BL])) {
|
||||
ret = PTR_ERR(hd->pins[PIN_CTRL_BL]);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Required properties */
|
||||
ret = device_property_read_u32(dev, "display-height", &lcd->height);
|
||||
if (ret)
|
||||
goto fail;
|
||||
ret = device_property_read_u32(dev, "display-width", &lcd->width);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* On displays with more than two rows, the internal buffer width is
|
||||
* usually equal to the display width
|
||||
*/
|
||||
if (lcd->height > 2)
|
||||
lcd->bwidth = lcd->width;
|
||||
|
||||
/* Optional properties */
|
||||
device_property_read_u32(dev, "internal-buffer-width", &lcd->bwidth);
|
||||
|
||||
lcd->ifwidth = ifwidth;
|
||||
lcd->ops = ifwidth == 8 ? &hd44780_ops_gpio8 : &hd44780_ops_gpio4;
|
||||
|
||||
ret = charlcd_register(lcd);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
platform_set_drvdata(pdev, lcd);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
kfree(lcd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hd44780_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct charlcd *lcd = platform_get_drvdata(pdev);
|
||||
|
||||
charlcd_unregister(lcd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id hd44780_of_match[] = {
|
||||
{ .compatible = "hit,hd44780" },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, hd44780_of_match);
|
||||
|
||||
static struct platform_driver hd44780_driver = {
|
||||
.probe = hd44780_probe,
|
||||
.remove = hd44780_remove,
|
||||
.driver = {
|
||||
.name = "hd44780",
|
||||
.of_match_table = hd44780_of_match,
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(hd44780_driver);
|
||||
MODULE_DESCRIPTION("HD44780 Character LCD driver");
|
||||
MODULE_AUTHOR("Geert Uytterhoeven <geert@linux-m68k.org>");
|
||||
MODULE_LICENSE("GPL");
|
@ -339,6 +339,7 @@ config BLK_DEV_SX8
|
||||
|
||||
config BLK_DEV_RAM
|
||||
tristate "RAM block device support"
|
||||
select DAX if BLK_DEV_RAM_DAX
|
||||
---help---
|
||||
Saying Y here will allow you to use a portion of your RAM memory as
|
||||
a block device, so that you can make file systems on it, read and
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/slab.h>
|
||||
#ifdef CONFIG_BLK_DEV_RAM_DAX
|
||||
#include <linux/pfn_t.h>
|
||||
#include <linux/dax.h>
|
||||
#endif
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
@ -41,6 +42,9 @@ struct brd_device {
|
||||
|
||||
struct request_queue *brd_queue;
|
||||
struct gendisk *brd_disk;
|
||||
#ifdef CONFIG_BLK_DEV_RAM_DAX
|
||||
struct dax_device *dax_dev;
|
||||
#endif
|
||||
struct list_head brd_list;
|
||||
|
||||
/*
|
||||
@ -375,30 +379,38 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_RAM_DAX
|
||||
static long brd_direct_access(struct block_device *bdev, sector_t sector,
|
||||
void **kaddr, pfn_t *pfn, long size)
|
||||
static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff,
|
||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
||||
{
|
||||
struct brd_device *brd = bdev->bd_disk->private_data;
|
||||
struct page *page;
|
||||
|
||||
if (!brd)
|
||||
return -ENODEV;
|
||||
page = brd_insert_page(brd, sector);
|
||||
page = brd_insert_page(brd, PFN_PHYS(pgoff) / 512);
|
||||
if (!page)
|
||||
return -ENOSPC;
|
||||
*kaddr = page_address(page);
|
||||
*pfn = page_to_pfn_t(page);
|
||||
|
||||
return PAGE_SIZE;
|
||||
return 1;
|
||||
}
|
||||
#else
|
||||
#define brd_direct_access NULL
|
||||
|
||||
static long brd_dax_direct_access(struct dax_device *dax_dev,
|
||||
pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
|
||||
{
|
||||
struct brd_device *brd = dax_get_private(dax_dev);
|
||||
|
||||
return __brd_direct_access(brd, pgoff, nr_pages, kaddr, pfn);
|
||||
}
|
||||
|
||||
static const struct dax_operations brd_dax_ops = {
|
||||
.direct_access = brd_dax_direct_access,
|
||||
};
|
||||
#endif
|
||||
|
||||
static const struct block_device_operations brd_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.rw_page = brd_rw_page,
|
||||
.direct_access = brd_direct_access,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -469,9 +481,6 @@ static struct brd_device *brd_alloc(int i)
|
||||
blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
|
||||
brd->brd_queue->limits.discard_zeroes_data = 1;
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
|
||||
#ifdef CONFIG_BLK_DEV_RAM_DAX
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
|
||||
#endif
|
||||
disk = brd->brd_disk = alloc_disk(max_part);
|
||||
if (!disk)
|
||||
goto out_free_queue;
|
||||
@ -484,8 +493,21 @@ static struct brd_device *brd_alloc(int i)
|
||||
sprintf(disk->disk_name, "ram%d", i);
|
||||
set_capacity(disk, rd_size * 2);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_RAM_DAX
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
|
||||
brd->dax_dev = alloc_dax(brd, disk->disk_name, &brd_dax_ops);
|
||||
if (!brd->dax_dev)
|
||||
goto out_free_inode;
|
||||
#endif
|
||||
|
||||
|
||||
return brd;
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_RAM_DAX
|
||||
out_free_inode:
|
||||
kill_dax(brd->dax_dev);
|
||||
put_dax(brd->dax_dev);
|
||||
#endif
|
||||
out_free_queue:
|
||||
blk_cleanup_queue(brd->brd_queue);
|
||||
out_free_dev:
|
||||
@ -525,6 +547,10 @@ out:
|
||||
static void brd_del_one(struct brd_device *brd)
|
||||
{
|
||||
list_del(&brd->brd_list);
|
||||
#ifdef CONFIG_BLK_DEV_RAM_DAX
|
||||
kill_dax(brd->dax_dev);
|
||||
put_dax(brd->dax_dev);
|
||||
#endif
|
||||
del_gendisk(brd->brd_disk);
|
||||
brd_free(brd);
|
||||
}
|
||||
|
@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
|
||||
}
|
||||
|
||||
static int
|
||||
hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
|
||||
hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
|
||||
struct hpet_info *info)
|
||||
{
|
||||
struct hpet_timer __iomem *timer;
|
||||
|
@ -2302,7 +2302,7 @@ static int __init init(void)
|
||||
|
||||
pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
|
||||
if (!pdrvdata.debugfs_dir)
|
||||
pr_warning("Error creating debugfs dir for virtio-ports\n");
|
||||
pr_warn("Error creating debugfs dir for virtio-ports\n");
|
||||
INIT_LIST_HEAD(&pdrvdata.consoles);
|
||||
INIT_LIST_HEAD(&pdrvdata.portdevs);
|
||||
|
||||
|
@ -1,8 +1,13 @@
|
||||
menuconfig DEV_DAX
|
||||
menuconfig DAX
|
||||
tristate "DAX: direct access to differentiated memory"
|
||||
default m if NVDIMM_DAX
|
||||
depends on TRANSPARENT_HUGEPAGE
|
||||
select SRCU
|
||||
default m if NVDIMM_DAX
|
||||
|
||||
if DAX
|
||||
|
||||
config DEV_DAX
|
||||
tristate "Device DAX: direct access mapping device"
|
||||
depends on TRANSPARENT_HUGEPAGE
|
||||
help
|
||||
Support raw access to differentiated (persistence, bandwidth,
|
||||
latency...) memory via an mmap(2) capable character
|
||||
@ -11,7 +16,6 @@ menuconfig DEV_DAX
|
||||
baseline memory pool. Mappings of a /dev/daxX.Y device impose
|
||||
restrictions that make the mapping behavior deterministic.
|
||||
|
||||
if DEV_DAX
|
||||
|
||||
config DEV_DAX_PMEM
|
||||
tristate "PMEM DAX: direct access to persistent memory"
|
||||
|
@ -1,4 +1,7 @@
|
||||
obj-$(CONFIG_DEV_DAX) += dax.o
|
||||
obj-$(CONFIG_DAX) += dax.o
|
||||
obj-$(CONFIG_DEV_DAX) += device_dax.o
|
||||
obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
|
||||
|
||||
dax-y := super.o
|
||||
dax_pmem-y := pmem.o
|
||||
device_dax-y := device.o
|
||||
|
@ -38,22 +38,18 @@ struct dax_region {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dax_dev - subdivision of a dax region
|
||||
* struct dev_dax - instance data for a subdivision of a dax region
|
||||
* @region - parent region
|
||||
* @inode - inode
|
||||
* @dev - device backing the character device
|
||||
* @cdev - core chardev data
|
||||
* @alive - !alive + srcu grace period == no new mappings can be established
|
||||
* @dax_dev - core dax functionality
|
||||
* @dev - device core
|
||||
* @id - child id in the region
|
||||
* @num_resources - number of physical address extents in this device
|
||||
* @res - array of physical address ranges
|
||||
*/
|
||||
struct dax_dev {
|
||||
struct dev_dax {
|
||||
struct dax_region *region;
|
||||
struct inode *inode;
|
||||
struct dax_device *dax_dev;
|
||||
struct device dev;
|
||||
struct cdev cdev;
|
||||
bool alive;
|
||||
int id;
|
||||
int num_resources;
|
||||
struct resource res[0];
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2016 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -12,14 +12,7 @@
|
||||
*/
|
||||
#ifndef __DAX_H__
|
||||
#define __DAX_H__
|
||||
struct device;
|
||||
struct dax_dev;
|
||||
struct resource;
|
||||
struct dax_region;
|
||||
void dax_region_put(struct dax_region *dax_region);
|
||||
struct dax_region *alloc_dax_region(struct device *parent,
|
||||
int region_id, struct resource *res, unsigned int align,
|
||||
void *addr, unsigned long flags);
|
||||
struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
|
||||
struct resource *res, int count);
|
||||
struct dax_device;
|
||||
struct dax_device *inode_dax(struct inode *inode);
|
||||
struct inode *dax_inode(struct dax_device *dax_dev);
|
||||
#endif /* __DAX_H__ */
|
||||
|
25
drivers/dax/device-dax.h
Normal file
25
drivers/dax/device-dax.h
Normal file
@ -0,0 +1,25 @@
|
||||
/*
|
||||
* Copyright(c) 2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#ifndef __DEVICE_DAX_H__
|
||||
#define __DEVICE_DAX_H__
|
||||
struct device;
|
||||
struct dev_dax;
|
||||
struct resource;
|
||||
struct dax_region;
|
||||
void dax_region_put(struct dax_region *dax_region);
|
||||
struct dax_region *alloc_dax_region(struct device *parent,
|
||||
int region_id, struct resource *res, unsigned int align,
|
||||
void *addr, unsigned long flags);
|
||||
struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
|
||||
struct resource *res, int count);
|
||||
#endif /* __DEVICE_DAX_H__ */
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2016 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -13,10 +13,7 @@
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/magic.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/pfn_t.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dax.h>
|
||||
@ -25,16 +22,7 @@
|
||||
#include "dax-private.h"
|
||||
#include "dax.h"
|
||||
|
||||
static dev_t dax_devt;
|
||||
DEFINE_STATIC_SRCU(dax_srcu);
|
||||
static struct class *dax_class;
|
||||
static DEFINE_IDA(dax_minor_ida);
|
||||
static int nr_dax = CONFIG_NR_DEV_DAX;
|
||||
module_param(nr_dax, int, S_IRUGO);
|
||||
static struct vfsmount *dax_mnt;
|
||||
static struct kmem_cache *dax_cache __read_mostly;
|
||||
static struct super_block *dax_superblock __read_mostly;
|
||||
MODULE_PARM_DESC(nr_dax, "max number of device-dax instances");
|
||||
|
||||
/*
|
||||
* Rely on the fact that drvdata is set before the attributes are
|
||||
@ -87,117 +75,6 @@ static const struct attribute_group *dax_region_attribute_groups[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct inode *dax_alloc_inode(struct super_block *sb)
|
||||
{
|
||||
return kmem_cache_alloc(dax_cache, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static void dax_i_callback(struct rcu_head *head)
|
||||
{
|
||||
struct inode *inode = container_of(head, struct inode, i_rcu);
|
||||
|
||||
kmem_cache_free(dax_cache, inode);
|
||||
}
|
||||
|
||||
static void dax_destroy_inode(struct inode *inode)
|
||||
{
|
||||
call_rcu(&inode->i_rcu, dax_i_callback);
|
||||
}
|
||||
|
||||
static const struct super_operations dax_sops = {
|
||||
.statfs = simple_statfs,
|
||||
.alloc_inode = dax_alloc_inode,
|
||||
.destroy_inode = dax_destroy_inode,
|
||||
.drop_inode = generic_delete_inode,
|
||||
};
|
||||
|
||||
static struct dentry *dax_mount(struct file_system_type *fs_type,
|
||||
int flags, const char *dev_name, void *data)
|
||||
{
|
||||
return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
|
||||
}
|
||||
|
||||
static struct file_system_type dax_type = {
|
||||
.name = "dax",
|
||||
.mount = dax_mount,
|
||||
.kill_sb = kill_anon_super,
|
||||
};
|
||||
|
||||
static int dax_test(struct inode *inode, void *data)
|
||||
{
|
||||
return inode->i_cdev == data;
|
||||
}
|
||||
|
||||
static int dax_set(struct inode *inode, void *data)
|
||||
{
|
||||
inode->i_cdev = data;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct inode *dax_inode_get(struct cdev *cdev, dev_t devt)
|
||||
{
|
||||
struct inode *inode;
|
||||
|
||||
inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
|
||||
dax_test, dax_set, cdev);
|
||||
|
||||
if (!inode)
|
||||
return NULL;
|
||||
|
||||
if (inode->i_state & I_NEW) {
|
||||
inode->i_mode = S_IFCHR;
|
||||
inode->i_flags = S_DAX;
|
||||
inode->i_rdev = devt;
|
||||
mapping_set_gfp_mask(&inode->i_data, GFP_USER);
|
||||
unlock_new_inode(inode);
|
||||
}
|
||||
return inode;
|
||||
}
|
||||
|
||||
static void init_once(void *inode)
|
||||
{
|
||||
inode_init_once(inode);
|
||||
}
|
||||
|
||||
static int dax_inode_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
dax_cache = kmem_cache_create("dax_cache", sizeof(struct inode), 0,
|
||||
(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
|
||||
SLAB_MEM_SPREAD|SLAB_ACCOUNT),
|
||||
init_once);
|
||||
if (!dax_cache)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = register_filesystem(&dax_type);
|
||||
if (rc)
|
||||
goto err_register_fs;
|
||||
|
||||
dax_mnt = kern_mount(&dax_type);
|
||||
if (IS_ERR(dax_mnt)) {
|
||||
rc = PTR_ERR(dax_mnt);
|
||||
goto err_mount;
|
||||
}
|
||||
dax_superblock = dax_mnt->mnt_sb;
|
||||
|
||||
return 0;
|
||||
|
||||
err_mount:
|
||||
unregister_filesystem(&dax_type);
|
||||
err_register_fs:
|
||||
kmem_cache_destroy(dax_cache);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void dax_inode_exit(void)
|
||||
{
|
||||
kern_unmount(dax_mnt);
|
||||
unregister_filesystem(&dax_type);
|
||||
kmem_cache_destroy(dax_cache);
|
||||
}
|
||||
|
||||
static void dax_region_free(struct kref *kref)
|
||||
{
|
||||
struct dax_region *dax_region;
|
||||
@ -266,47 +143,47 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(alloc_dax_region);
|
||||
|
||||
static struct dax_dev *to_dax_dev(struct device *dev)
|
||||
static struct dev_dax *to_dev_dax(struct device *dev)
|
||||
{
|
||||
return container_of(dev, struct dax_dev, dev);
|
||||
return container_of(dev, struct dev_dax, dev);
|
||||
}
|
||||
|
||||
static ssize_t size_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct dax_dev *dax_dev = to_dax_dev(dev);
|
||||
struct dev_dax *dev_dax = to_dev_dax(dev);
|
||||
unsigned long long size = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dax_dev->num_resources; i++)
|
||||
size += resource_size(&dax_dev->res[i]);
|
||||
for (i = 0; i < dev_dax->num_resources; i++)
|
||||
size += resource_size(&dev_dax->res[i]);
|
||||
|
||||
return sprintf(buf, "%llu\n", size);
|
||||
}
|
||||
static DEVICE_ATTR_RO(size);
|
||||
|
||||
static struct attribute *dax_device_attributes[] = {
|
||||
static struct attribute *dev_dax_attributes[] = {
|
||||
&dev_attr_size.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group dax_device_attribute_group = {
|
||||
.attrs = dax_device_attributes,
|
||||
static const struct attribute_group dev_dax_attribute_group = {
|
||||
.attrs = dev_dax_attributes,
|
||||
};
|
||||
|
||||
static const struct attribute_group *dax_attribute_groups[] = {
|
||||
&dax_device_attribute_group,
|
||||
&dev_dax_attribute_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
|
||||
static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
|
||||
const char *func)
|
||||
{
|
||||
struct dax_region *dax_region = dax_dev->region;
|
||||
struct device *dev = &dax_dev->dev;
|
||||
struct dax_region *dax_region = dev_dax->region;
|
||||
struct device *dev = &dev_dax->dev;
|
||||
unsigned long mask;
|
||||
|
||||
if (!dax_dev->alive)
|
||||
if (!dax_alive(dev_dax->dax_dev))
|
||||
return -ENXIO;
|
||||
|
||||
/* prevent private mappings from being established */
|
||||
@ -341,23 +218,23 @@ static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
|
||||
}
|
||||
|
||||
/* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */
|
||||
__weak phys_addr_t dax_pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
|
||||
__weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
|
||||
unsigned long size)
|
||||
{
|
||||
struct resource *res;
|
||||
phys_addr_t phys;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dax_dev->num_resources; i++) {
|
||||
res = &dax_dev->res[i];
|
||||
for (i = 0; i < dev_dax->num_resources; i++) {
|
||||
res = &dev_dax->res[i];
|
||||
phys = pgoff * PAGE_SIZE + res->start;
|
||||
if (phys >= res->start && phys <= res->end)
|
||||
break;
|
||||
pgoff -= PHYS_PFN(resource_size(res));
|
||||
}
|
||||
|
||||
if (i < dax_dev->num_resources) {
|
||||
res = &dax_dev->res[i];
|
||||
if (i < dev_dax->num_resources) {
|
||||
res = &dev_dax->res[i];
|
||||
if (phys + size - 1 <= res->end)
|
||||
return phys;
|
||||
}
|
||||
@ -365,28 +242,29 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
|
||||
{
|
||||
struct device *dev = &dax_dev->dev;
|
||||
struct device *dev = &dev_dax->dev;
|
||||
struct dax_region *dax_region;
|
||||
int rc = VM_FAULT_SIGBUS;
|
||||
phys_addr_t phys;
|
||||
pfn_t pfn;
|
||||
unsigned int fault_size = PAGE_SIZE;
|
||||
|
||||
if (check_vma(dax_dev, vmf->vma, __func__))
|
||||
if (check_vma(dev_dax, vmf->vma, __func__))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
dax_region = dax_dev->region;
|
||||
dax_region = dev_dax->region;
|
||||
if (dax_region->align > PAGE_SIZE) {
|
||||
dev_dbg(dev, "%s: alignment > fault size\n", __func__);
|
||||
dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n",
|
||||
__func__, dax_region->align, fault_size);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
if (fault_size != dax_region->align)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
phys = dax_pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
|
||||
phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE);
|
||||
if (phys == -1) {
|
||||
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
|
||||
vmf->pgoff);
|
||||
@ -405,28 +283,29 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
|
||||
static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
|
||||
{
|
||||
unsigned long pmd_addr = vmf->address & PMD_MASK;
|
||||
struct device *dev = &dax_dev->dev;
|
||||
struct device *dev = &dev_dax->dev;
|
||||
struct dax_region *dax_region;
|
||||
phys_addr_t phys;
|
||||
pgoff_t pgoff;
|
||||
pfn_t pfn;
|
||||
unsigned int fault_size = PMD_SIZE;
|
||||
|
||||
if (check_vma(dax_dev, vmf->vma, __func__))
|
||||
if (check_vma(dev_dax, vmf->vma, __func__))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
dax_region = dax_dev->region;
|
||||
dax_region = dev_dax->region;
|
||||
if (dax_region->align > PMD_SIZE) {
|
||||
dev_dbg(dev, "%s: alignment > fault size\n", __func__);
|
||||
dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n",
|
||||
__func__, dax_region->align, fault_size);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
/* dax pmd mappings require pfn_t_devmap() */
|
||||
if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
|
||||
dev_dbg(dev, "%s: alignment > fault size\n", __func__);
|
||||
dev_dbg(dev, "%s: region lacks devmap flags\n", __func__);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
@ -441,7 +320,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
pgoff = linear_page_index(vmf->vma, pmd_addr);
|
||||
phys = dax_pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
|
||||
phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE);
|
||||
if (phys == -1) {
|
||||
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
|
||||
pgoff);
|
||||
@ -455,10 +334,10 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
|
||||
static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
|
||||
{
|
||||
unsigned long pud_addr = vmf->address & PUD_MASK;
|
||||
struct device *dev = &dax_dev->dev;
|
||||
struct device *dev = &dev_dax->dev;
|
||||
struct dax_region *dax_region;
|
||||
phys_addr_t phys;
|
||||
pgoff_t pgoff;
|
||||
@ -466,18 +345,19 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
unsigned int fault_size = PUD_SIZE;
|
||||
|
||||
|
||||
if (check_vma(dax_dev, vmf->vma, __func__))
|
||||
if (check_vma(dev_dax, vmf->vma, __func__))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
dax_region = dax_dev->region;
|
||||
dax_region = dev_dax->region;
|
||||
if (dax_region->align > PUD_SIZE) {
|
||||
dev_dbg(dev, "%s: alignment > fault size\n", __func__);
|
||||
dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n",
|
||||
__func__, dax_region->align, fault_size);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
/* dax pud mappings require pfn_t_devmap() */
|
||||
if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
|
||||
dev_dbg(dev, "%s: alignment > fault size\n", __func__);
|
||||
dev_dbg(dev, "%s: region lacks devmap flags\n", __func__);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
@ -492,7 +372,7 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
pgoff = linear_page_index(vmf->vma, pud_addr);
|
||||
phys = dax_pgoff_to_phys(dax_dev, pgoff, PUD_SIZE);
|
||||
phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE);
|
||||
if (phys == -1) {
|
||||
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
|
||||
pgoff);
|
||||
@ -505,65 +385,71 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
vmf->flags & FAULT_FLAG_WRITE);
|
||||
}
|
||||
#else
|
||||
static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
|
||||
{
|
||||
return VM_FAULT_FALLBACK;
|
||||
}
|
||||
#endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
|
||||
|
||||
static int dax_dev_huge_fault(struct vm_fault *vmf,
|
||||
static int dev_dax_huge_fault(struct vm_fault *vmf,
|
||||
enum page_entry_size pe_size)
|
||||
{
|
||||
int rc, id;
|
||||
struct file *filp = vmf->vma->vm_file;
|
||||
struct dax_dev *dax_dev = filp->private_data;
|
||||
struct dev_dax *dev_dax = filp->private_data;
|
||||
|
||||
dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
|
||||
dev_dbg(&dev_dax->dev, "%s: %s: %s (%#lx - %#lx) size = %d\n", __func__,
|
||||
current->comm, (vmf->flags & FAULT_FLAG_WRITE)
|
||||
? "write" : "read",
|
||||
vmf->vma->vm_start, vmf->vma->vm_end);
|
||||
vmf->vma->vm_start, vmf->vma->vm_end, pe_size);
|
||||
|
||||
id = srcu_read_lock(&dax_srcu);
|
||||
id = dax_read_lock();
|
||||
switch (pe_size) {
|
||||
case PE_SIZE_PTE:
|
||||
rc = __dax_dev_pte_fault(dax_dev, vmf);
|
||||
rc = __dev_dax_pte_fault(dev_dax, vmf);
|
||||
break;
|
||||
case PE_SIZE_PMD:
|
||||
rc = __dax_dev_pmd_fault(dax_dev, vmf);
|
||||
rc = __dev_dax_pmd_fault(dev_dax, vmf);
|
||||
break;
|
||||
case PE_SIZE_PUD:
|
||||
rc = __dax_dev_pud_fault(dax_dev, vmf);
|
||||
rc = __dev_dax_pud_fault(dev_dax, vmf);
|
||||
break;
|
||||
default:
|
||||
return VM_FAULT_FALLBACK;
|
||||
rc = VM_FAULT_SIGBUS;
|
||||
}
|
||||
srcu_read_unlock(&dax_srcu, id);
|
||||
dax_read_unlock(id);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int dax_dev_fault(struct vm_fault *vmf)
|
||||
static int dev_dax_fault(struct vm_fault *vmf)
|
||||
{
|
||||
return dax_dev_huge_fault(vmf, PE_SIZE_PTE);
|
||||
return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct dax_dev_vm_ops = {
|
||||
.fault = dax_dev_fault,
|
||||
.huge_fault = dax_dev_huge_fault,
|
||||
static const struct vm_operations_struct dax_vm_ops = {
|
||||
.fault = dev_dax_fault,
|
||||
.huge_fault = dev_dax_huge_fault,
|
||||
};
|
||||
|
||||
static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
struct dax_dev *dax_dev = filp->private_data;
|
||||
int rc;
|
||||
struct dev_dax *dev_dax = filp->private_data;
|
||||
int rc, id;
|
||||
|
||||
dev_dbg(&dax_dev->dev, "%s\n", __func__);
|
||||
dev_dbg(&dev_dax->dev, "%s\n", __func__);
|
||||
|
||||
rc = check_vma(dax_dev, vma, __func__);
|
||||
/*
|
||||
* We lock to check dax_dev liveness and will re-check at
|
||||
* fault time.
|
||||
*/
|
||||
id = dax_read_lock();
|
||||
rc = check_vma(dev_dax, vma, __func__);
|
||||
dax_read_unlock(id);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
vma->vm_ops = &dax_dev_vm_ops;
|
||||
vma->vm_ops = &dax_vm_ops;
|
||||
vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
|
||||
return 0;
|
||||
}
|
||||
@ -574,13 +460,13 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned long off, off_end, off_align, len_align, addr_align, align;
|
||||
struct dax_dev *dax_dev = filp ? filp->private_data : NULL;
|
||||
struct dev_dax *dev_dax = filp ? filp->private_data : NULL;
|
||||
struct dax_region *dax_region;
|
||||
|
||||
if (!dax_dev || addr)
|
||||
if (!dev_dax || addr)
|
||||
goto out;
|
||||
|
||||
dax_region = dax_dev->region;
|
||||
dax_region = dev_dax->region;
|
||||
align = dax_region->align;
|
||||
off = pgoff << PAGE_SHIFT;
|
||||
off_end = off + len;
|
||||
@ -605,14 +491,15 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
|
||||
|
||||
static int dax_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct dax_dev *dax_dev;
|
||||
struct dax_device *dax_dev = inode_dax(inode);
|
||||
struct inode *__dax_inode = dax_inode(dax_dev);
|
||||
struct dev_dax *dev_dax = dax_get_private(dax_dev);
|
||||
|
||||
dax_dev = container_of(inode->i_cdev, struct dax_dev, cdev);
|
||||
dev_dbg(&dax_dev->dev, "%s\n", __func__);
|
||||
inode->i_mapping = dax_dev->inode->i_mapping;
|
||||
inode->i_mapping->host = dax_dev->inode;
|
||||
dev_dbg(&dev_dax->dev, "%s\n", __func__);
|
||||
inode->i_mapping = __dax_inode->i_mapping;
|
||||
inode->i_mapping->host = __dax_inode;
|
||||
filp->f_mapping = inode->i_mapping;
|
||||
filp->private_data = dax_dev;
|
||||
filp->private_data = dev_dax;
|
||||
inode->i_flags = S_DAX;
|
||||
|
||||
return 0;
|
||||
@ -620,9 +507,9 @@ static int dax_open(struct inode *inode, struct file *filp)
|
||||
|
||||
static int dax_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct dax_dev *dax_dev = filp->private_data;
|
||||
struct dev_dax *dev_dax = filp->private_data;
|
||||
|
||||
dev_dbg(&dax_dev->dev, "%s\n", __func__);
|
||||
dev_dbg(&dev_dax->dev, "%s\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -635,51 +522,54 @@ static const struct file_operations dax_fops = {
|
||||
.mmap = dax_mmap,
|
||||
};
|
||||
|
||||
static void dax_dev_release(struct device *dev)
|
||||
static void dev_dax_release(struct device *dev)
|
||||
{
|
||||
struct dax_dev *dax_dev = to_dax_dev(dev);
|
||||
struct dax_region *dax_region = dax_dev->region;
|
||||
struct dev_dax *dev_dax = to_dev_dax(dev);
|
||||
struct dax_region *dax_region = dev_dax->region;
|
||||
struct dax_device *dax_dev = dev_dax->dax_dev;
|
||||
|
||||
ida_simple_remove(&dax_region->ida, dax_dev->id);
|
||||
ida_simple_remove(&dax_minor_ida, MINOR(dev->devt));
|
||||
ida_simple_remove(&dax_region->ida, dev_dax->id);
|
||||
dax_region_put(dax_region);
|
||||
iput(dax_dev->inode);
|
||||
kfree(dax_dev);
|
||||
put_dax(dax_dev);
|
||||
kfree(dev_dax);
|
||||
}
|
||||
|
||||
static void unregister_dax_dev(void *dev)
|
||||
static void kill_dev_dax(struct dev_dax *dev_dax)
|
||||
{
|
||||
struct dax_dev *dax_dev = to_dax_dev(dev);
|
||||
struct cdev *cdev = &dax_dev->cdev;
|
||||
struct dax_device *dax_dev = dev_dax->dax_dev;
|
||||
struct inode *inode = dax_inode(dax_dev);
|
||||
|
||||
kill_dax(dax_dev);
|
||||
unmap_mapping_range(inode->i_mapping, 0, 0, 1);
|
||||
}
|
||||
|
||||
static void unregister_dev_dax(void *dev)
|
||||
{
|
||||
struct dev_dax *dev_dax = to_dev_dax(dev);
|
||||
struct dax_device *dax_dev = dev_dax->dax_dev;
|
||||
struct inode *inode = dax_inode(dax_dev);
|
||||
struct cdev *cdev = inode->i_cdev;
|
||||
|
||||
dev_dbg(dev, "%s\n", __func__);
|
||||
|
||||
/*
|
||||
* Note, rcu is not protecting the liveness of dax_dev, rcu is
|
||||
* ensuring that any fault handlers that might have seen
|
||||
* dax_dev->alive == true, have completed. Any fault handlers
|
||||
* that start after synchronize_srcu() has started will abort
|
||||
* upon seeing dax_dev->alive == false.
|
||||
*/
|
||||
dax_dev->alive = false;
|
||||
synchronize_srcu(&dax_srcu);
|
||||
unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
|
||||
cdev_del(cdev);
|
||||
device_unregister(dev);
|
||||
kill_dev_dax(dev_dax);
|
||||
cdev_device_del(cdev, dev);
|
||||
put_device(dev);
|
||||
}
|
||||
|
||||
struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
|
||||
struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
|
||||
struct resource *res, int count)
|
||||
{
|
||||
struct device *parent = dax_region->dev;
|
||||
struct dax_dev *dax_dev;
|
||||
int rc = 0, minor, i;
|
||||
struct dax_device *dax_dev;
|
||||
struct dev_dax *dev_dax;
|
||||
struct inode *inode;
|
||||
struct device *dev;
|
||||
struct cdev *cdev;
|
||||
dev_t dev_t;
|
||||
int rc = 0, i;
|
||||
|
||||
dax_dev = kzalloc(sizeof(*dax_dev) + sizeof(*res) * count, GFP_KERNEL);
|
||||
if (!dax_dev)
|
||||
dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL);
|
||||
if (!dev_dax)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
@ -689,115 +579,79 @@ struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
}
|
||||
dax_dev->res[i].start = res[i].start;
|
||||
dax_dev->res[i].end = res[i].end;
|
||||
dev_dax->res[i].start = res[i].start;
|
||||
dev_dax->res[i].end = res[i].end;
|
||||
}
|
||||
|
||||
if (i < count)
|
||||
goto err_id;
|
||||
|
||||
dax_dev->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
|
||||
if (dax_dev->id < 0) {
|
||||
rc = dax_dev->id;
|
||||
dev_dax->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
|
||||
if (dev_dax->id < 0) {
|
||||
rc = dev_dax->id;
|
||||
goto err_id;
|
||||
}
|
||||
|
||||
minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL);
|
||||
if (minor < 0) {
|
||||
rc = minor;
|
||||
goto err_minor;
|
||||
}
|
||||
|
||||
dev_t = MKDEV(MAJOR(dax_devt), minor);
|
||||
dev = &dax_dev->dev;
|
||||
dax_dev->inode = dax_inode_get(&dax_dev->cdev, dev_t);
|
||||
if (!dax_dev->inode) {
|
||||
rc = -ENOMEM;
|
||||
goto err_inode;
|
||||
}
|
||||
|
||||
/* device_initialize() so cdev can reference kobj parent */
|
||||
device_initialize(dev);
|
||||
|
||||
cdev = &dax_dev->cdev;
|
||||
cdev_init(cdev, &dax_fops);
|
||||
cdev->owner = parent->driver->owner;
|
||||
cdev->kobj.parent = &dev->kobj;
|
||||
rc = cdev_add(&dax_dev->cdev, dev_t, 1);
|
||||
if (rc)
|
||||
goto err_cdev;
|
||||
/*
|
||||
* No 'host' or dax_operations since there is no access to this
|
||||
* device outside of mmap of the resulting character device.
|
||||
*/
|
||||
dax_dev = alloc_dax(dev_dax, NULL, NULL);
|
||||
if (!dax_dev)
|
||||
goto err_dax;
|
||||
|
||||
/* from here on we're committed to teardown via dax_dev_release() */
|
||||
dax_dev->num_resources = count;
|
||||
dax_dev->alive = true;
|
||||
dax_dev->region = dax_region;
|
||||
dev = &dev_dax->dev;
|
||||
device_initialize(dev);
|
||||
|
||||
inode = dax_inode(dax_dev);
|
||||
cdev = inode->i_cdev;
|
||||
cdev_init(cdev, &dax_fops);
|
||||
cdev->owner = parent->driver->owner;
|
||||
|
||||
dev_dax->num_resources = count;
|
||||
dev_dax->dax_dev = dax_dev;
|
||||
dev_dax->region = dax_region;
|
||||
kref_get(&dax_region->kref);
|
||||
|
||||
dev->devt = dev_t;
|
||||
dev->devt = inode->i_rdev;
|
||||
dev->class = dax_class;
|
||||
dev->parent = parent;
|
||||
dev->groups = dax_attribute_groups;
|
||||
dev->release = dax_dev_release;
|
||||
dev_set_name(dev, "dax%d.%d", dax_region->id, dax_dev->id);
|
||||
rc = device_add(dev);
|
||||
dev->release = dev_dax_release;
|
||||
dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
|
||||
|
||||
rc = cdev_device_add(cdev, dev);
|
||||
if (rc) {
|
||||
kill_dev_dax(dev_dax);
|
||||
put_device(dev);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_dev, dev);
|
||||
rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
|
||||
return dax_dev;
|
||||
return dev_dax;
|
||||
|
||||
err_cdev:
|
||||
iput(dax_dev->inode);
|
||||
err_inode:
|
||||
ida_simple_remove(&dax_minor_ida, minor);
|
||||
err_minor:
|
||||
ida_simple_remove(&dax_region->ida, dax_dev->id);
|
||||
err_dax:
|
||||
ida_simple_remove(&dax_region->ida, dev_dax->id);
|
||||
err_id:
|
||||
kfree(dax_dev);
|
||||
kfree(dev_dax);
|
||||
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_create_dax_dev);
|
||||
EXPORT_SYMBOL_GPL(devm_create_dev_dax);
|
||||
|
||||
static int __init dax_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = dax_inode_init();
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
nr_dax = max(nr_dax, 256);
|
||||
rc = alloc_chrdev_region(&dax_devt, 0, nr_dax, "dax");
|
||||
if (rc)
|
||||
goto err_chrdev;
|
||||
|
||||
dax_class = class_create(THIS_MODULE, "dax");
|
||||
if (IS_ERR(dax_class)) {
|
||||
rc = PTR_ERR(dax_class);
|
||||
goto err_class;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_class:
|
||||
unregister_chrdev_region(dax_devt, nr_dax);
|
||||
err_chrdev:
|
||||
dax_inode_exit();
|
||||
return rc;
|
||||
return PTR_ERR_OR_ZERO(dax_class);
|
||||
}
|
||||
|
||||
static void __exit dax_exit(void)
|
||||
{
|
||||
class_destroy(dax_class);
|
||||
unregister_chrdev_region(dax_devt, nr_dax);
|
||||
ida_destroy(&dax_minor_ida);
|
||||
dax_inode_exit();
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Intel Corporation");
|
@ -16,7 +16,7 @@
|
||||
#include <linux/pfn_t.h>
|
||||
#include "../nvdimm/pfn.h"
|
||||
#include "../nvdimm/nd.h"
|
||||
#include "dax.h"
|
||||
#include "device-dax.h"
|
||||
|
||||
struct dax_pmem {
|
||||
struct device *dev;
|
||||
@ -61,8 +61,8 @@ static int dax_pmem_probe(struct device *dev)
|
||||
int rc;
|
||||
void *addr;
|
||||
struct resource res;
|
||||
struct dax_dev *dax_dev;
|
||||
struct nd_pfn_sb *pfn_sb;
|
||||
struct dev_dax *dev_dax;
|
||||
struct dax_pmem *dax_pmem;
|
||||
struct nd_region *nd_region;
|
||||
struct nd_namespace_io *nsio;
|
||||
@ -130,12 +130,12 @@ static int dax_pmem_probe(struct device *dev)
|
||||
return -ENOMEM;
|
||||
|
||||
/* TODO: support for subdividing a dax region... */
|
||||
dax_dev = devm_create_dax_dev(dax_region, &res, 1);
|
||||
dev_dax = devm_create_dev_dax(dax_region, &res, 1);
|
||||
|
||||
/* child dax_dev instances now own the lifetime of the dax_region */
|
||||
/* child dev_dax instances now own the lifetime of the dax_region */
|
||||
dax_region_put(dax_region);
|
||||
|
||||
return PTR_ERR_OR_ZERO(dax_dev);
|
||||
return PTR_ERR_OR_ZERO(dev_dax);
|
||||
}
|
||||
|
||||
static struct nd_device_driver dax_pmem_driver = {
|
||||
|
425
drivers/dax/super.c
Normal file
425
drivers/dax/super.c
Normal file
@ -0,0 +1,425 @@
|
||||
/*
|
||||
* Copyright(c) 2017 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/magic.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dax.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
static int nr_dax = CONFIG_NR_DEV_DAX;
|
||||
module_param(nr_dax, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(nr_dax, "max number of dax device instances");
|
||||
|
||||
static dev_t dax_devt;
|
||||
DEFINE_STATIC_SRCU(dax_srcu);
|
||||
static struct vfsmount *dax_mnt;
|
||||
static DEFINE_IDA(dax_minor_ida);
|
||||
static struct kmem_cache *dax_cache __read_mostly;
|
||||
static struct super_block *dax_superblock __read_mostly;
|
||||
|
||||
#define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
|
||||
static struct hlist_head dax_host_list[DAX_HASH_SIZE];
|
||||
static DEFINE_SPINLOCK(dax_host_lock);
|
||||
|
||||
int dax_read_lock(void)
|
||||
{
|
||||
return srcu_read_lock(&dax_srcu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dax_read_lock);
|
||||
|
||||
void dax_read_unlock(int id)
|
||||
{
|
||||
srcu_read_unlock(&dax_srcu, id);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dax_read_unlock);
|
||||
|
||||
/**
|
||||
* struct dax_device - anchor object for dax services
|
||||
* @inode: core vfs
|
||||
* @cdev: optional character interface for "device dax"
|
||||
* @host: optional name for lookups where the device path is not available
|
||||
* @private: dax driver private data
|
||||
* @alive: !alive + rcu grace period == no new operations / mappings
|
||||
*/
|
||||
struct dax_device {
|
||||
struct hlist_node list;
|
||||
struct inode inode;
|
||||
struct cdev cdev;
|
||||
const char *host;
|
||||
void *private;
|
||||
bool alive;
|
||||
const struct dax_operations *ops;
|
||||
};
|
||||
|
||||
/**
|
||||
* dax_direct_access() - translate a device pgoff to an absolute pfn
|
||||
* @dax_dev: a dax_device instance representing the logical memory range
|
||||
* @pgoff: offset in pages from the start of the device to translate
|
||||
* @nr_pages: number of consecutive pages caller can handle relative to @pfn
|
||||
* @kaddr: output parameter that returns a virtual address mapping of pfn
|
||||
* @pfn: output parameter that returns an absolute pfn translation of @pgoff
|
||||
*
|
||||
* Return: negative errno if an error occurs, otherwise the number of
|
||||
* pages accessible at the device relative @pgoff.
|
||||
*/
|
||||
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
||||
void **kaddr, pfn_t *pfn)
|
||||
{
|
||||
long avail;
|
||||
|
||||
/*
|
||||
* The device driver is allowed to sleep, in order to make the
|
||||
* memory directly accessible.
|
||||
*/
|
||||
might_sleep();
|
||||
|
||||
if (!dax_dev)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!dax_alive(dax_dev))
|
||||
return -ENXIO;
|
||||
|
||||
if (nr_pages < 0)
|
||||
return nr_pages;
|
||||
|
||||
avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
|
||||
kaddr, pfn);
|
||||
if (!avail)
|
||||
return -ERANGE;
|
||||
return min(avail, nr_pages);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dax_direct_access);
|
||||
|
||||
bool dax_alive(struct dax_device *dax_dev)
|
||||
{
|
||||
lockdep_assert_held(&dax_srcu);
|
||||
return dax_dev->alive;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dax_alive);
|
||||
|
||||
static int dax_host_hash(const char *host)
|
||||
{
|
||||
return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
|
||||
* that any fault handlers or operations that might have seen
|
||||
* dax_alive(), have completed. Any operations that start after
|
||||
* synchronize_srcu() has run will abort upon seeing !dax_alive().
|
||||
*/
|
||||
void kill_dax(struct dax_device *dax_dev)
|
||||
{
|
||||
if (!dax_dev)
|
||||
return;
|
||||
|
||||
dax_dev->alive = false;
|
||||
|
||||
synchronize_srcu(&dax_srcu);
|
||||
|
||||
spin_lock(&dax_host_lock);
|
||||
hlist_del_init(&dax_dev->list);
|
||||
spin_unlock(&dax_host_lock);
|
||||
|
||||
dax_dev->private = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kill_dax);
|
||||
|
||||
static struct inode *dax_alloc_inode(struct super_block *sb)
|
||||
{
|
||||
struct dax_device *dax_dev;
|
||||
|
||||
dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
|
||||
return &dax_dev->inode;
|
||||
}
|
||||
|
||||
static struct dax_device *to_dax_dev(struct inode *inode)
|
||||
{
|
||||
return container_of(inode, struct dax_device, inode);
|
||||
}
|
||||
|
||||
static void dax_i_callback(struct rcu_head *head)
|
||||
{
|
||||
struct inode *inode = container_of(head, struct inode, i_rcu);
|
||||
struct dax_device *dax_dev = to_dax_dev(inode);
|
||||
|
||||
kfree(dax_dev->host);
|
||||
dax_dev->host = NULL;
|
||||
ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
|
||||
kmem_cache_free(dax_cache, dax_dev);
|
||||
}
|
||||
|
||||
static void dax_destroy_inode(struct inode *inode)
|
||||
{
|
||||
struct dax_device *dax_dev = to_dax_dev(inode);
|
||||
|
||||
WARN_ONCE(dax_dev->alive,
|
||||
"kill_dax() must be called before final iput()\n");
|
||||
call_rcu(&inode->i_rcu, dax_i_callback);
|
||||
}
|
||||
|
||||
static const struct super_operations dax_sops = {
|
||||
.statfs = simple_statfs,
|
||||
.alloc_inode = dax_alloc_inode,
|
||||
.destroy_inode = dax_destroy_inode,
|
||||
.drop_inode = generic_delete_inode,
|
||||
};
|
||||
|
||||
static struct dentry *dax_mount(struct file_system_type *fs_type,
|
||||
int flags, const char *dev_name, void *data)
|
||||
{
|
||||
return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
|
||||
}
|
||||
|
||||
static struct file_system_type dax_fs_type = {
|
||||
.name = "dax",
|
||||
.mount = dax_mount,
|
||||
.kill_sb = kill_anon_super,
|
||||
};
|
||||
|
||||
static int dax_test(struct inode *inode, void *data)
|
||||
{
|
||||
dev_t devt = *(dev_t *) data;
|
||||
|
||||
return inode->i_rdev == devt;
|
||||
}
|
||||
|
||||
static int dax_set(struct inode *inode, void *data)
|
||||
{
|
||||
dev_t devt = *(dev_t *) data;
|
||||
|
||||
inode->i_rdev = devt;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dax_device *dax_dev_get(dev_t devt)
|
||||
{
|
||||
struct dax_device *dax_dev;
|
||||
struct inode *inode;
|
||||
|
||||
inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
|
||||
dax_test, dax_set, &devt);
|
||||
|
||||
if (!inode)
|
||||
return NULL;
|
||||
|
||||
dax_dev = to_dax_dev(inode);
|
||||
if (inode->i_state & I_NEW) {
|
||||
dax_dev->alive = true;
|
||||
inode->i_cdev = &dax_dev->cdev;
|
||||
inode->i_mode = S_IFCHR;
|
||||
inode->i_flags = S_DAX;
|
||||
mapping_set_gfp_mask(&inode->i_data, GFP_USER);
|
||||
unlock_new_inode(inode);
|
||||
}
|
||||
|
||||
return dax_dev;
|
||||
}
|
||||
|
||||
static void dax_add_host(struct dax_device *dax_dev, const char *host)
|
||||
{
|
||||
int hash;
|
||||
|
||||
/*
|
||||
* Unconditionally init dax_dev since it's coming from a
|
||||
* non-zeroed slab cache
|
||||
*/
|
||||
INIT_HLIST_NODE(&dax_dev->list);
|
||||
dax_dev->host = host;
|
||||
if (!host)
|
||||
return;
|
||||
|
||||
hash = dax_host_hash(host);
|
||||
spin_lock(&dax_host_lock);
|
||||
hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
|
||||
spin_unlock(&dax_host_lock);
|
||||
}
|
||||
|
||||
struct dax_device *alloc_dax(void *private, const char *__host,
|
||||
const struct dax_operations *ops)
|
||||
{
|
||||
struct dax_device *dax_dev;
|
||||
const char *host;
|
||||
dev_t devt;
|
||||
int minor;
|
||||
|
||||
host = kstrdup(__host, GFP_KERNEL);
|
||||
if (__host && !host)
|
||||
return NULL;
|
||||
|
||||
minor = ida_simple_get(&dax_minor_ida, 0, nr_dax, GFP_KERNEL);
|
||||
if (minor < 0)
|
||||
goto err_minor;
|
||||
|
||||
devt = MKDEV(MAJOR(dax_devt), minor);
|
||||
dax_dev = dax_dev_get(devt);
|
||||
if (!dax_dev)
|
||||
goto err_dev;
|
||||
|
||||
dax_add_host(dax_dev, host);
|
||||
dax_dev->ops = ops;
|
||||
dax_dev->private = private;
|
||||
return dax_dev;
|
||||
|
||||
err_dev:
|
||||
ida_simple_remove(&dax_minor_ida, minor);
|
||||
err_minor:
|
||||
kfree(host);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(alloc_dax);
|
||||
|
||||
void put_dax(struct dax_device *dax_dev)
|
||||
{
|
||||
if (!dax_dev)
|
||||
return;
|
||||
iput(&dax_dev->inode);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(put_dax);
|
||||
|
||||
/**
|
||||
* dax_get_by_host() - temporary lookup mechanism for filesystem-dax
|
||||
* @host: alternate name for the device registered by a dax driver
|
||||
*/
|
||||
struct dax_device *dax_get_by_host(const char *host)
|
||||
{
|
||||
struct dax_device *dax_dev, *found = NULL;
|
||||
int hash, id;
|
||||
|
||||
if (!host)
|
||||
return NULL;
|
||||
|
||||
hash = dax_host_hash(host);
|
||||
|
||||
id = dax_read_lock();
|
||||
spin_lock(&dax_host_lock);
|
||||
hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
|
||||
if (!dax_alive(dax_dev)
|
||||
|| strcmp(host, dax_dev->host) != 0)
|
||||
continue;
|
||||
|
||||
if (igrab(&dax_dev->inode))
|
||||
found = dax_dev;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&dax_host_lock);
|
||||
dax_read_unlock(id);
|
||||
|
||||
return found;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dax_get_by_host);
|
||||
|
||||
/**
|
||||
* inode_dax: convert a public inode into its dax_dev
|
||||
* @inode: An inode with i_cdev pointing to a dax_dev
|
||||
*
|
||||
* Note this is not equivalent to to_dax_dev() which is for private
|
||||
* internal use where we know the inode filesystem type == dax_fs_type.
|
||||
*/
|
||||
struct dax_device *inode_dax(struct inode *inode)
|
||||
{
|
||||
struct cdev *cdev = inode->i_cdev;
|
||||
|
||||
return container_of(cdev, struct dax_device, cdev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inode_dax);
|
||||
|
||||
struct inode *dax_inode(struct dax_device *dax_dev)
|
||||
{
|
||||
return &dax_dev->inode;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dax_inode);
|
||||
|
||||
void *dax_get_private(struct dax_device *dax_dev)
|
||||
{
|
||||
return dax_dev->private;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dax_get_private);
|
||||
|
||||
static void init_once(void *_dax_dev)
|
||||
{
|
||||
struct dax_device *dax_dev = _dax_dev;
|
||||
struct inode *inode = &dax_dev->inode;
|
||||
|
||||
inode_init_once(inode);
|
||||
}
|
||||
|
||||
static int __dax_fs_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
|
||||
(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
|
||||
SLAB_MEM_SPREAD|SLAB_ACCOUNT),
|
||||
init_once);
|
||||
if (!dax_cache)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = register_filesystem(&dax_fs_type);
|
||||
if (rc)
|
||||
goto err_register_fs;
|
||||
|
||||
dax_mnt = kern_mount(&dax_fs_type);
|
||||
if (IS_ERR(dax_mnt)) {
|
||||
rc = PTR_ERR(dax_mnt);
|
||||
goto err_mount;
|
||||
}
|
||||
dax_superblock = dax_mnt->mnt_sb;
|
||||
|
||||
return 0;
|
||||
|
||||
err_mount:
|
||||
unregister_filesystem(&dax_fs_type);
|
||||
err_register_fs:
|
||||
kmem_cache_destroy(dax_cache);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __dax_fs_exit(void)
|
||||
{
|
||||
kern_unmount(dax_mnt);
|
||||
unregister_filesystem(&dax_fs_type);
|
||||
kmem_cache_destroy(dax_cache);
|
||||
}
|
||||
|
||||
static int __init dax_fs_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = __dax_fs_init();
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
nr_dax = max(nr_dax, 256);
|
||||
rc = alloc_chrdev_region(&dax_devt, 0, nr_dax, "dax");
|
||||
if (rc)
|
||||
__dax_fs_exit();
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __exit dax_fs_exit(void)
|
||||
{
|
||||
unregister_chrdev_region(dax_devt, nr_dax);
|
||||
ida_destroy(&dax_minor_ida);
|
||||
__dax_fs_exit();
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Intel Corporation");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
subsys_initcall(dax_fs_init);
|
||||
module_exit(dax_fs_exit);
|
@ -20,6 +20,12 @@ config FPGA_REGION
|
||||
FPGA Regions allow loading FPGA images under control of
|
||||
the Device Tree.
|
||||
|
||||
config FPGA_MGR_ICE40_SPI
|
||||
tristate "Lattice iCE40 SPI"
|
||||
depends on OF && SPI
|
||||
help
|
||||
FPGA manager driver support for Lattice iCE40 FPGAs over SPI.
|
||||
|
||||
config FPGA_MGR_SOCFPGA
|
||||
tristate "Altera SOCFPGA FPGA Manager"
|
||||
depends on ARCH_SOCFPGA || COMPILE_TEST
|
||||
@ -33,6 +39,13 @@ config FPGA_MGR_SOCFPGA_A10
|
||||
help
|
||||
FPGA manager driver support for Altera Arria10 SoCFPGA.
|
||||
|
||||
config FPGA_MGR_TS73XX
|
||||
tristate "Technologic Systems TS-73xx SBC FPGA Manager"
|
||||
depends on ARCH_EP93XX && MACH_TS72XX
|
||||
help
|
||||
FPGA manager driver support for the Altera Cyclone II FPGA
|
||||
present on the TS-73xx SBC boards.
|
||||
|
||||
config FPGA_MGR_ZYNQ_FPGA
|
||||
tristate "Xilinx Zynq FPGA"
|
||||
depends on ARCH_ZYNQ || COMPILE_TEST
|
||||
|
@ -6,8 +6,10 @@
|
||||
obj-$(CONFIG_FPGA) += fpga-mgr.o
|
||||
|
||||
# FPGA Manager Drivers
|
||||
obj-$(CONFIG_FPGA_MGR_ICE40_SPI) += ice40-spi.o
|
||||
obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o
|
||||
obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10) += socfpga-a10.o
|
||||
obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o
|
||||
obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
|
||||
|
||||
# FPGA Bridge Drivers
|
||||
|
@ -27,7 +27,7 @@ static DEFINE_IDA(fpga_bridge_ida);
|
||||
static struct class *fpga_bridge_class;
|
||||
|
||||
/* Lock for adding/removing bridges to linked lists*/
|
||||
spinlock_t bridge_list_lock;
|
||||
static spinlock_t bridge_list_lock;
|
||||
|
||||
static int fpga_bridge_of_node_match(struct device *dev, const void *data)
|
||||
{
|
||||
@ -146,11 +146,9 @@ EXPORT_SYMBOL_GPL(fpga_bridge_put);
|
||||
int fpga_bridges_enable(struct list_head *bridge_list)
|
||||
{
|
||||
struct fpga_bridge *bridge;
|
||||
struct list_head *node;
|
||||
int ret;
|
||||
|
||||
list_for_each(node, bridge_list) {
|
||||
bridge = list_entry(node, struct fpga_bridge, node);
|
||||
list_for_each_entry(bridge, bridge_list, node) {
|
||||
ret = fpga_bridge_enable(bridge);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -172,11 +170,9 @@ EXPORT_SYMBOL_GPL(fpga_bridges_enable);
|
||||
int fpga_bridges_disable(struct list_head *bridge_list)
|
||||
{
|
||||
struct fpga_bridge *bridge;
|
||||
struct list_head *node;
|
||||
int ret;
|
||||
|
||||
list_for_each(node, bridge_list) {
|
||||
bridge = list_entry(node, struct fpga_bridge, node);
|
||||
list_for_each_entry(bridge, bridge_list, node) {
|
||||
ret = fpga_bridge_disable(bridge);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -196,13 +192,10 @@ EXPORT_SYMBOL_GPL(fpga_bridges_disable);
|
||||
*/
|
||||
void fpga_bridges_put(struct list_head *bridge_list)
|
||||
{
|
||||
struct fpga_bridge *bridge;
|
||||
struct list_head *node, *next;
|
||||
struct fpga_bridge *bridge, *next;
|
||||
unsigned long flags;
|
||||
|
||||
list_for_each_safe(node, next, bridge_list) {
|
||||
bridge = list_entry(node, struct fpga_bridge, node);
|
||||
|
||||
list_for_each_entry_safe(bridge, next, bridge_list, node) {
|
||||
fpga_bridge_put(bridge);
|
||||
|
||||
spin_lock_irqsave(&bridge_list_lock, flags);
|
||||
|
@ -361,7 +361,7 @@ static struct attribute *fpga_mgr_attrs[] = {
|
||||
};
|
||||
ATTRIBUTE_GROUPS(fpga_mgr);
|
||||
|
||||
struct fpga_manager *__fpga_mgr_get(struct device *dev)
|
||||
static struct fpga_manager *__fpga_mgr_get(struct device *dev)
|
||||
{
|
||||
struct fpga_manager *mgr;
|
||||
int ret = -ENODEV;
|
||||
|
@ -337,8 +337,9 @@ static int child_regions_with_firmware(struct device_node *overlay)
|
||||
* The overlay must add either firmware-name or external-fpga-config property
|
||||
* to the FPGA Region.
|
||||
*
|
||||
* firmware-name : program the FPGA
|
||||
* external-fpga-config : FPGA is already programmed
|
||||
* firmware-name : program the FPGA
|
||||
* external-fpga-config : FPGA is already programmed
|
||||
* encrypted-fpga-config : FPGA bitstream is encrypted
|
||||
*
|
||||
* The overlay can add other FPGA regions, but child FPGA regions cannot have a
|
||||
* firmware-name property since those regions don't exist yet.
|
||||
@ -373,6 +374,9 @@ static int fpga_region_notify_pre_apply(struct fpga_region *region,
|
||||
if (of_property_read_bool(nd->overlay, "external-fpga-config"))
|
||||
info->flags |= FPGA_MGR_EXTERNAL_CONFIG;
|
||||
|
||||
if (of_property_read_bool(nd->overlay, "encrypted-fpga-config"))
|
||||
info->flags |= FPGA_MGR_ENCRYPTED_BITSTREAM;
|
||||
|
||||
of_property_read_string(nd->overlay, "firmware-name", &firmware_name);
|
||||
|
||||
of_property_read_u32(nd->overlay, "region-unfreeze-timeout-us",
|
||||
|
207
drivers/fpga/ice40-spi.c
Normal file
207
drivers/fpga/ice40-spi.c
Normal file
@ -0,0 +1,207 @@
|
||||
/*
|
||||
* FPGA Manager Driver for Lattice iCE40.
|
||||
*
|
||||
* Copyright (c) 2016 Joel Holdsworth
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*
|
||||
* This driver adds support to the FPGA manager for configuring the SRAM of
|
||||
* Lattice iCE40 FPGAs through slave SPI.
|
||||
*/
|
||||
|
||||
#include <linux/fpga/fpga-mgr.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/spi/spi.h>
|
||||
#include <linux/stringify.h>
|
||||
|
||||
#define ICE40_SPI_MAX_SPEED 25000000 /* Hz */
|
||||
#define ICE40_SPI_MIN_SPEED 1000000 /* Hz */
|
||||
|
||||
#define ICE40_SPI_RESET_DELAY 1 /* us (>200ns) */
|
||||
#define ICE40_SPI_HOUSEKEEPING_DELAY 1200 /* us */
|
||||
|
||||
#define ICE40_SPI_NUM_ACTIVATION_BYTES DIV_ROUND_UP(49, 8)
|
||||
|
||||
struct ice40_fpga_priv {
|
||||
struct spi_device *dev;
|
||||
struct gpio_desc *reset;
|
||||
struct gpio_desc *cdone;
|
||||
};
|
||||
|
||||
static enum fpga_mgr_states ice40_fpga_ops_state(struct fpga_manager *mgr)
|
||||
{
|
||||
struct ice40_fpga_priv *priv = mgr->priv;
|
||||
|
||||
return gpiod_get_value(priv->cdone) ? FPGA_MGR_STATE_OPERATING :
|
||||
FPGA_MGR_STATE_UNKNOWN;
|
||||
}
|
||||
|
||||
static int ice40_fpga_ops_write_init(struct fpga_manager *mgr,
|
||||
struct fpga_image_info *info,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct ice40_fpga_priv *priv = mgr->priv;
|
||||
struct spi_device *dev = priv->dev;
|
||||
struct spi_message message;
|
||||
struct spi_transfer assert_cs_then_reset_delay = {
|
||||
.cs_change = 1,
|
||||
.delay_usecs = ICE40_SPI_RESET_DELAY
|
||||
};
|
||||
struct spi_transfer housekeeping_delay_then_release_cs = {
|
||||
.delay_usecs = ICE40_SPI_HOUSEKEEPING_DELAY
|
||||
};
|
||||
int ret;
|
||||
|
||||
if ((info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
|
||||
dev_err(&dev->dev,
|
||||
"Partial reconfiguration is not supported\n");
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
/* Lock the bus, assert CRESET_B and SS_B and delay >200ns */
|
||||
spi_bus_lock(dev->master);
|
||||
|
||||
gpiod_set_value(priv->reset, 1);
|
||||
|
||||
spi_message_init(&message);
|
||||
spi_message_add_tail(&assert_cs_then_reset_delay, &message);
|
||||
ret = spi_sync_locked(dev, &message);
|
||||
|
||||
/* Come out of reset */
|
||||
gpiod_set_value(priv->reset, 0);
|
||||
|
||||
/* Abort if the chip-select failed */
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/* Check CDONE is de-asserted i.e. the FPGA is reset */
|
||||
if (gpiod_get_value(priv->cdone)) {
|
||||
dev_err(&dev->dev, "Device reset failed, CDONE is asserted\n");
|
||||
ret = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Wait for the housekeeping to complete, and release SS_B */
|
||||
spi_message_init(&message);
|
||||
spi_message_add_tail(&housekeeping_delay_then_release_cs, &message);
|
||||
ret = spi_sync_locked(dev, &message);
|
||||
|
||||
fail:
|
||||
spi_bus_unlock(dev->master);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ice40_fpga_ops_write(struct fpga_manager *mgr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct ice40_fpga_priv *priv = mgr->priv;
|
||||
|
||||
return spi_write(priv->dev, buf, count);
|
||||
}
|
||||
|
||||
static int ice40_fpga_ops_write_complete(struct fpga_manager *mgr,
|
||||
struct fpga_image_info *info)
|
||||
{
|
||||
struct ice40_fpga_priv *priv = mgr->priv;
|
||||
struct spi_device *dev = priv->dev;
|
||||
const u8 padding[ICE40_SPI_NUM_ACTIVATION_BYTES] = {0};
|
||||
|
||||
/* Check CDONE is asserted */
|
||||
if (!gpiod_get_value(priv->cdone)) {
|
||||
dev_err(&dev->dev,
|
||||
"CDONE was not asserted after firmware transfer\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Send of zero-padding to activate the firmware */
|
||||
return spi_write(dev, padding, sizeof(padding));
|
||||
}
|
||||
|
||||
static const struct fpga_manager_ops ice40_fpga_ops = {
|
||||
.state = ice40_fpga_ops_state,
|
||||
.write_init = ice40_fpga_ops_write_init,
|
||||
.write = ice40_fpga_ops_write,
|
||||
.write_complete = ice40_fpga_ops_write_complete,
|
||||
};
|
||||
|
||||
static int ice40_fpga_probe(struct spi_device *spi)
|
||||
{
|
||||
struct device *dev = &spi->dev;
|
||||
struct ice40_fpga_priv *priv;
|
||||
int ret;
|
||||
|
||||
priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->dev = spi;
|
||||
|
||||
/* Check board setup data. */
|
||||
if (spi->max_speed_hz > ICE40_SPI_MAX_SPEED) {
|
||||
dev_err(dev, "SPI speed is too high, maximum speed is "
|
||||
__stringify(ICE40_SPI_MAX_SPEED) "\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (spi->max_speed_hz < ICE40_SPI_MIN_SPEED) {
|
||||
dev_err(dev, "SPI speed is too low, minimum speed is "
|
||||
__stringify(ICE40_SPI_MIN_SPEED) "\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (spi->mode & SPI_CPHA) {
|
||||
dev_err(dev, "Bad SPI mode, CPHA not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Set up the GPIOs */
|
||||
priv->cdone = devm_gpiod_get(dev, "cdone", GPIOD_IN);
|
||||
if (IS_ERR(priv->cdone)) {
|
||||
ret = PTR_ERR(priv->cdone);
|
||||
dev_err(dev, "Failed to get CDONE GPIO: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
priv->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(priv->reset)) {
|
||||
ret = PTR_ERR(priv->reset);
|
||||
dev_err(dev, "Failed to get CRESET_B GPIO: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Register with the FPGA manager */
|
||||
return fpga_mgr_register(dev, "Lattice iCE40 FPGA Manager",
|
||||
&ice40_fpga_ops, priv);
|
||||
}
|
||||
|
||||
static int ice40_fpga_remove(struct spi_device *spi)
|
||||
{
|
||||
fpga_mgr_unregister(&spi->dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id ice40_fpga_of_match[] = {
|
||||
{ .compatible = "lattice,ice40-fpga-mgr", },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, ice40_fpga_of_match);
|
||||
|
||||
static struct spi_driver ice40_fpga_driver = {
|
||||
.probe = ice40_fpga_probe,
|
||||
.remove = ice40_fpga_remove,
|
||||
.driver = {
|
||||
.name = "ice40spi",
|
||||
.of_match_table = of_match_ptr(ice40_fpga_of_match),
|
||||
},
|
||||
};
|
||||
|
||||
module_spi_driver(ice40_fpga_driver);
|
||||
|
||||
MODULE_AUTHOR("Joel Holdsworth <joel@airwebreathe.org.uk>");
|
||||
MODULE_DESCRIPTION("Lattice iCE40 FPGA Manager");
|
||||
MODULE_LICENSE("GPL v2");
|
156
drivers/fpga/ts73xx-fpga.c
Normal file
156
drivers/fpga/ts73xx-fpga.c
Normal file
@ -0,0 +1,156 @@
|
||||
/*
|
||||
* Technologic Systems TS-73xx SBC FPGA loader
|
||||
*
|
||||
* Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com>
|
||||
*
|
||||
* FPGA Manager Driver for the on-board Altera Cyclone II FPGA found on
|
||||
* TS-7300, heavily based on load_fpga.c in their vendor tree.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/fpga/fpga-mgr.h>
|
||||
|
||||
#define TS73XX_FPGA_DATA_REG 0
|
||||
#define TS73XX_FPGA_CONFIG_REG 1
|
||||
|
||||
#define TS73XX_FPGA_WRITE_DONE 0x1
|
||||
#define TS73XX_FPGA_WRITE_DONE_TIMEOUT 1000 /* us */
|
||||
#define TS73XX_FPGA_RESET 0x2
|
||||
#define TS73XX_FPGA_RESET_LOW_DELAY 30 /* us */
|
||||
#define TS73XX_FPGA_RESET_HIGH_DELAY 80 /* us */
|
||||
#define TS73XX_FPGA_LOAD_OK 0x4
|
||||
#define TS73XX_FPGA_CONFIG_LOAD 0x8
|
||||
|
||||
struct ts73xx_fpga_priv {
|
||||
void __iomem *io_base;
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
static enum fpga_mgr_states ts73xx_fpga_state(struct fpga_manager *mgr)
|
||||
{
|
||||
return FPGA_MGR_STATE_UNKNOWN;
|
||||
}
|
||||
|
||||
static int ts73xx_fpga_write_init(struct fpga_manager *mgr,
|
||||
struct fpga_image_info *info,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct ts73xx_fpga_priv *priv = mgr->priv;
|
||||
|
||||
/* Reset the FPGA */
|
||||
writeb(0, priv->io_base + TS73XX_FPGA_CONFIG_REG);
|
||||
udelay(TS73XX_FPGA_RESET_LOW_DELAY);
|
||||
writeb(TS73XX_FPGA_RESET, priv->io_base + TS73XX_FPGA_CONFIG_REG);
|
||||
udelay(TS73XX_FPGA_RESET_HIGH_DELAY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ts73xx_fpga_write(struct fpga_manager *mgr, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
struct ts73xx_fpga_priv *priv = mgr->priv;
|
||||
size_t i = 0;
|
||||
int ret;
|
||||
u8 reg;
|
||||
|
||||
while (count--) {
|
||||
ret = readb_poll_timeout(priv->io_base + TS73XX_FPGA_CONFIG_REG,
|
||||
reg, !(reg & TS73XX_FPGA_WRITE_DONE),
|
||||
1, TS73XX_FPGA_WRITE_DONE_TIMEOUT);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
writeb(buf[i], priv->io_base + TS73XX_FPGA_DATA_REG);
|
||||
i++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ts73xx_fpga_write_complete(struct fpga_manager *mgr,
|
||||
struct fpga_image_info *info)
|
||||
{
|
||||
struct ts73xx_fpga_priv *priv = mgr->priv;
|
||||
u8 reg;
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
reg = readb(priv->io_base + TS73XX_FPGA_CONFIG_REG);
|
||||
reg |= TS73XX_FPGA_CONFIG_LOAD;
|
||||
writeb(reg, priv->io_base + TS73XX_FPGA_CONFIG_REG);
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
reg = readb(priv->io_base + TS73XX_FPGA_CONFIG_REG);
|
||||
reg &= ~TS73XX_FPGA_CONFIG_LOAD;
|
||||
writeb(reg, priv->io_base + TS73XX_FPGA_CONFIG_REG);
|
||||
|
||||
reg = readb(priv->io_base + TS73XX_FPGA_CONFIG_REG);
|
||||
if ((reg & TS73XX_FPGA_LOAD_OK) != TS73XX_FPGA_LOAD_OK)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct fpga_manager_ops ts73xx_fpga_ops = {
|
||||
.state = ts73xx_fpga_state,
|
||||
.write_init = ts73xx_fpga_write_init,
|
||||
.write = ts73xx_fpga_write,
|
||||
.write_complete = ts73xx_fpga_write_complete,
|
||||
};
|
||||
|
||||
static int ts73xx_fpga_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *kdev = &pdev->dev;
|
||||
struct ts73xx_fpga_priv *priv;
|
||||
struct resource *res;
|
||||
|
||||
priv = devm_kzalloc(kdev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->dev = kdev;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
priv->io_base = devm_ioremap_resource(kdev, res);
|
||||
if (IS_ERR(priv->io_base)) {
|
||||
dev_err(kdev, "unable to remap registers\n");
|
||||
return PTR_ERR(priv->io_base);
|
||||
}
|
||||
|
||||
return fpga_mgr_register(kdev, "TS-73xx FPGA Manager",
|
||||
&ts73xx_fpga_ops, priv);
|
||||
}
|
||||
|
||||
static int ts73xx_fpga_remove(struct platform_device *pdev)
|
||||
{
|
||||
fpga_mgr_unregister(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver ts73xx_fpga_driver = {
|
||||
.driver = {
|
||||
.name = "ts73xx-fpga-mgr",
|
||||
},
|
||||
.probe = ts73xx_fpga_probe,
|
||||
.remove = ts73xx_fpga_remove,
|
||||
};
|
||||
module_platform_driver(ts73xx_fpga_driver);
|
||||
|
||||
MODULE_AUTHOR("Florian Fainelli <f.fainelli@gmail.com>");
|
||||
MODULE_DESCRIPTION("TS-73xx FPGA Manager driver");
|
||||
MODULE_LICENSE("GPL v2");
|
@ -72,6 +72,10 @@
|
||||
#define CTRL_PCAP_PR_MASK BIT(27)
|
||||
/* Enable PCAP */
|
||||
#define CTRL_PCAP_MODE_MASK BIT(26)
|
||||
/* Lower rate to allow decrypt on the fly */
|
||||
#define CTRL_PCAP_RATE_EN_MASK BIT(25)
|
||||
/* System booted in secure mode */
|
||||
#define CTRL_SEC_EN_MASK BIT(7)
|
||||
|
||||
/* Miscellaneous Control Register bit definitions */
|
||||
/* Internal PCAP loopback */
|
||||
@ -266,6 +270,17 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* check if bitstream is encrypted & and system's still secure */
|
||||
if (info->flags & FPGA_MGR_ENCRYPTED_BITSTREAM) {
|
||||
ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
|
||||
if (!(ctrl & CTRL_SEC_EN_MASK)) {
|
||||
dev_err(&mgr->dev,
|
||||
"System not secure, can't use crypted bitstreams\n");
|
||||
err = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
/* don't globally reset PL if we're doing partial reconfig */
|
||||
if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
|
||||
if (!zynq_fpga_has_sync(buf, count)) {
|
||||
@ -337,12 +352,19 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
|
||||
|
||||
/* set configuration register with following options:
|
||||
* - enable PCAP interface
|
||||
* - set throughput for maximum speed
|
||||
* - set throughput for maximum speed (if bistream not crypted)
|
||||
* - set CPU in user mode
|
||||
*/
|
||||
ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
|
||||
zynq_fpga_write(priv, CTRL_OFFSET,
|
||||
(CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK | ctrl));
|
||||
if (info->flags & FPGA_MGR_ENCRYPTED_BITSTREAM)
|
||||
zynq_fpga_write(priv, CTRL_OFFSET,
|
||||
(CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK
|
||||
| CTRL_PCAP_RATE_EN_MASK | ctrl));
|
||||
else
|
||||
zynq_fpga_write(priv, CTRL_OFFSET,
|
||||
(CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK
|
||||
| ctrl));
|
||||
|
||||
|
||||
/* We expect that the command queue is empty right now. */
|
||||
status = zynq_fpga_read(priv, STATUS_OFFSET);
|
||||
|
@ -333,7 +333,7 @@ static int create_gpadl_header(void *kbuffer, u32 size,
|
||||
* Gpadl is u32 and we are using a pointer which could
|
||||
* be 64-bit
|
||||
* This is governed by the guest/host protocol and
|
||||
* so the hypervisor gurantees that this is ok.
|
||||
* so the hypervisor guarantees that this is ok.
|
||||
*/
|
||||
for (i = 0; i < pfncurr; i++)
|
||||
gpadl_body->pfn[i] = slow_virt_to_phys(
|
||||
@ -380,7 +380,7 @@ nomem:
|
||||
}
|
||||
|
||||
/*
|
||||
* vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
|
||||
* vmbus_establish_gpadl - Establish a GPADL for the specified buffer
|
||||
*
|
||||
* @channel: a channel
|
||||
* @kbuffer: from kmalloc or vmalloc
|
||||
@ -731,7 +731,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
|
||||
/* Setup the descriptor */
|
||||
desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
|
||||
desc.flags = flags;
|
||||
desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
|
||||
desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
|
||||
desc.length8 = (u16)(packetlen_aligned >> 3);
|
||||
desc.transactionid = requestid;
|
||||
desc.rangecount = pagecount;
|
||||
@ -792,7 +792,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
|
||||
/* Setup the descriptor */
|
||||
desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
|
||||
desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
|
||||
desc->dataoffset8 = desc_size >> 3; /* in 8-bytes grandularity */
|
||||
desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
|
||||
desc->length8 = (u16)(packetlen_aligned >> 3);
|
||||
desc->transactionid = requestid;
|
||||
desc->rangecount = 1;
|
||||
@ -842,7 +842,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
|
||||
/* Setup the descriptor */
|
||||
desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
|
||||
desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
|
||||
desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
|
||||
desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
|
||||
desc.length8 = (u16)(packetlen_aligned >> 3);
|
||||
desc.transactionid = requestid;
|
||||
desc.rangecount = 1;
|
||||
|
@ -1080,30 +1080,30 @@ static void vmbus_onversion_response(
|
||||
}
|
||||
|
||||
/* Channel message dispatch table */
|
||||
struct vmbus_channel_message_table_entry
|
||||
channel_message_table[CHANNELMSG_COUNT] = {
|
||||
{CHANNELMSG_INVALID, 0, NULL},
|
||||
{CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer},
|
||||
{CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind},
|
||||
{CHANNELMSG_REQUESTOFFERS, 0, NULL},
|
||||
{CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered},
|
||||
{CHANNELMSG_OPENCHANNEL, 0, NULL},
|
||||
{CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result},
|
||||
{CHANNELMSG_CLOSECHANNEL, 0, NULL},
|
||||
{CHANNELMSG_GPADL_HEADER, 0, NULL},
|
||||
{CHANNELMSG_GPADL_BODY, 0, NULL},
|
||||
{CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created},
|
||||
{CHANNELMSG_GPADL_TEARDOWN, 0, NULL},
|
||||
{CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown},
|
||||
{CHANNELMSG_RELID_RELEASED, 0, NULL},
|
||||
{CHANNELMSG_INITIATE_CONTACT, 0, NULL},
|
||||
{CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response},
|
||||
{CHANNELMSG_UNLOAD, 0, NULL},
|
||||
{CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response},
|
||||
{CHANNELMSG_18, 0, NULL},
|
||||
{CHANNELMSG_19, 0, NULL},
|
||||
{CHANNELMSG_20, 0, NULL},
|
||||
{CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL},
|
||||
const struct vmbus_channel_message_table_entry
|
||||
channel_message_table[CHANNELMSG_COUNT] = {
|
||||
{ CHANNELMSG_INVALID, 0, NULL },
|
||||
{ CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer },
|
||||
{ CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind },
|
||||
{ CHANNELMSG_REQUESTOFFERS, 0, NULL },
|
||||
{ CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered },
|
||||
{ CHANNELMSG_OPENCHANNEL, 0, NULL },
|
||||
{ CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result },
|
||||
{ CHANNELMSG_CLOSECHANNEL, 0, NULL },
|
||||
{ CHANNELMSG_GPADL_HEADER, 0, NULL },
|
||||
{ CHANNELMSG_GPADL_BODY, 0, NULL },
|
||||
{ CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created },
|
||||
{ CHANNELMSG_GPADL_TEARDOWN, 0, NULL },
|
||||
{ CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown },
|
||||
{ CHANNELMSG_RELID_RELEASED, 0, NULL },
|
||||
{ CHANNELMSG_INITIATE_CONTACT, 0, NULL },
|
||||
{ CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response },
|
||||
{ CHANNELMSG_UNLOAD, 0, NULL },
|
||||
{ CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response },
|
||||
{ CHANNELMSG_18, 0, NULL },
|
||||
{ CHANNELMSG_19, 0, NULL },
|
||||
{ CHANNELMSG_20, 0, NULL },
|
||||
{ CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL },
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -296,44 +296,47 @@ struct vmbus_channel *relid2channel(u32 relid)
|
||||
|
||||
/*
|
||||
* vmbus_on_event - Process a channel event notification
|
||||
*
|
||||
* For batched channels (default) optimize host to guest signaling
|
||||
* by ensuring:
|
||||
* 1. While reading the channel, we disable interrupts from host.
|
||||
* 2. Ensure that we process all posted messages from the host
|
||||
* before returning from this callback.
|
||||
* 3. Once we return, enable signaling from the host. Once this
|
||||
* state is set we check to see if additional packets are
|
||||
* available to read. In this case we repeat the process.
|
||||
* If this tasklet has been running for a long time
|
||||
* then reschedule ourselves.
|
||||
*/
|
||||
void vmbus_on_event(unsigned long data)
|
||||
{
|
||||
struct vmbus_channel *channel = (void *) data;
|
||||
void (*callback_fn)(void *);
|
||||
unsigned long time_limit = jiffies + 2;
|
||||
|
||||
/*
|
||||
* A channel once created is persistent even when there
|
||||
* is no driver handling the device. An unloading driver
|
||||
* sets the onchannel_callback to NULL on the same CPU
|
||||
* as where this interrupt is handled (in an interrupt context).
|
||||
* Thus, checking and invoking the driver specific callback takes
|
||||
* care of orderly unloading of the driver.
|
||||
*/
|
||||
callback_fn = READ_ONCE(channel->onchannel_callback);
|
||||
if (unlikely(callback_fn == NULL))
|
||||
return;
|
||||
do {
|
||||
void (*callback_fn)(void *);
|
||||
|
||||
(*callback_fn)(channel->channel_callback_context);
|
||||
|
||||
if (channel->callback_mode == HV_CALL_BATCHED) {
|
||||
/*
|
||||
* This callback reads the messages sent by the host.
|
||||
* We can optimize host to guest signaling by ensuring:
|
||||
* 1. While reading the channel, we disable interrupts from
|
||||
* host.
|
||||
* 2. Ensure that we process all posted messages from the host
|
||||
* before returning from this callback.
|
||||
* 3. Once we return, enable signaling from the host. Once this
|
||||
* state is set we check to see if additional packets are
|
||||
* available to read. In this case we repeat the process.
|
||||
/* A channel once created is persistent even when
|
||||
* there is no driver handling the device. An
|
||||
* unloading driver sets the onchannel_callback to NULL.
|
||||
*/
|
||||
if (hv_end_read(&channel->inbound) != 0) {
|
||||
hv_begin_read(&channel->inbound);
|
||||
callback_fn = READ_ONCE(channel->onchannel_callback);
|
||||
if (unlikely(callback_fn == NULL))
|
||||
return;
|
||||
|
||||
tasklet_schedule(&channel->callback_event);
|
||||
}
|
||||
}
|
||||
(*callback_fn)(channel->channel_callback_context);
|
||||
|
||||
if (channel->callback_mode != HV_CALL_BATCHED)
|
||||
return;
|
||||
|
||||
if (likely(hv_end_read(&channel->inbound) == 0))
|
||||
return;
|
||||
|
||||
hv_begin_read(&channel->inbound);
|
||||
} while (likely(time_before(jiffies, time_limit)));
|
||||
|
||||
/* The time limit (2 jiffies) has been reached */
|
||||
tasklet_schedule(&channel->callback_event);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -254,7 +254,10 @@ int hv_synic_init(unsigned int cpu)
|
||||
shared_sint.as_uint64 = 0;
|
||||
shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
|
||||
shared_sint.masked = false;
|
||||
shared_sint.auto_eoi = true;
|
||||
if (ms_hyperv.hints & HV_X64_DEPRECATING_AEOI_RECOMMENDED)
|
||||
shared_sint.auto_eoi = false;
|
||||
else
|
||||
shared_sint.auto_eoi = true;
|
||||
|
||||
hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
|
||||
shared_sint.as_uint64);
|
||||
|
@ -722,8 +722,6 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
|
||||
5*HZ);
|
||||
post_status(&dm_device);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void hv_online_page(struct page *pg)
|
||||
|
@ -186,8 +186,6 @@ static void fcopy_send_data(struct work_struct *dummy)
|
||||
}
|
||||
}
|
||||
kfree(smsg_out);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -69,7 +69,7 @@ static const int fw_versions[] = {
|
||||
*
|
||||
* While the request/response protocol is guaranteed by the host, we further
|
||||
* ensure this by serializing packet processing in this driver - we do not
|
||||
* read additional packets from the VMBUs until the current packet is fully
|
||||
* read additional packets from the VMBUS until the current packet is fully
|
||||
* handled.
|
||||
*/
|
||||
|
||||
@ -397,7 +397,7 @@ kvp_send_key(struct work_struct *dummy)
|
||||
* the max lengths specified. We will however, reserve room
|
||||
* for the string terminating character - in the utf16s_utf8s()
|
||||
* function we limit the size of the buffer where the converted
|
||||
* string is placed to HV_KVP_EXCHANGE_MAX_*_SIZE -1 to gaurantee
|
||||
* string is placed to HV_KVP_EXCHANGE_MAX_*_SIZE -1 to guarantee
|
||||
* that the strings can be properly terminated!
|
||||
*/
|
||||
|
||||
@ -483,8 +483,6 @@ kvp_send_key(struct work_struct *dummy)
|
||||
}
|
||||
|
||||
kfree(message);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -533,7 +531,7 @@ kvp_respond_to_host(struct hv_kvp_msg *msg_to_host, int error)
|
||||
*/
|
||||
if (error) {
|
||||
/*
|
||||
* Something failed or we have timedout;
|
||||
* Something failed or we have timed out;
|
||||
* terminate the current host-side iteration.
|
||||
*/
|
||||
goto response_done;
|
||||
@ -607,8 +605,8 @@ response_done:
|
||||
* This callback is invoked when we get a KVP message from the host.
|
||||
* The host ensures that only one KVP transaction can be active at a time.
|
||||
* KVP implementation in Linux needs to forward the key to a user-mde
|
||||
* component to retrive the corresponding value. Consequently, we cannot
|
||||
* respond to the host in the conext of this callback. Since the host
|
||||
* component to retrieve the corresponding value. Consequently, we cannot
|
||||
* respond to the host in the context of this callback. Since the host
|
||||
* guarantees that at most only one transaction can be active at a time,
|
||||
* we stash away the transaction state in a set of global variables.
|
||||
*/
|
||||
|
@ -212,8 +212,6 @@ static void vss_send_op(void)
|
||||
}
|
||||
|
||||
kfree(vss_msg);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void vss_handle_request(struct work_struct *dummy)
|
||||
|
@ -218,8 +218,8 @@ struct hv_per_cpu_context {
|
||||
|
||||
struct hv_context {
|
||||
/* We only support running on top of Hyper-V
|
||||
* So at this point this really can only contain the Hyper-V ID
|
||||
*/
|
||||
* So at this point this really can only contain the Hyper-V ID
|
||||
*/
|
||||
u64 guestid;
|
||||
|
||||
void *tsc_page;
|
||||
@ -248,14 +248,6 @@ struct hv_context {
|
||||
|
||||
extern struct hv_context hv_context;
|
||||
|
||||
struct hv_ring_buffer_debug_info {
|
||||
u32 current_interrupt_mask;
|
||||
u32 current_read_index;
|
||||
u32 current_write_index;
|
||||
u32 bytes_avail_toread;
|
||||
u32 bytes_avail_towrite;
|
||||
};
|
||||
|
||||
/* Hv Interface */
|
||||
|
||||
extern int hv_init(void);
|
||||
@ -289,9 +281,6 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
|
||||
void *buffer, u32 buflen, u32 *buffer_actual_len,
|
||||
u64 *requestid, bool raw);
|
||||
|
||||
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||
struct hv_ring_buffer_debug_info *debug_info);
|
||||
|
||||
/*
|
||||
* Maximum channels is determined by the size of the interrupt page
|
||||
* which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt
|
||||
@ -376,7 +365,7 @@ struct vmbus_channel_message_table_entry {
|
||||
void (*message_handler)(struct vmbus_channel_message_header *msg);
|
||||
};
|
||||
|
||||
extern struct vmbus_channel_message_table_entry
|
||||
extern const struct vmbus_channel_message_table_entry
|
||||
channel_message_table[CHANNELMSG_COUNT];
|
||||
|
||||
|
||||
@ -403,17 +392,17 @@ int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep);
|
||||
void vmbus_on_event(unsigned long data);
|
||||
void vmbus_on_msg_dpc(unsigned long data);
|
||||
|
||||
int hv_kvp_init(struct hv_util_service *);
|
||||
int hv_kvp_init(struct hv_util_service *srv);
|
||||
void hv_kvp_deinit(void);
|
||||
void hv_kvp_onchannelcallback(void *);
|
||||
void hv_kvp_onchannelcallback(void *context);
|
||||
|
||||
int hv_vss_init(struct hv_util_service *);
|
||||
int hv_vss_init(struct hv_util_service *srv);
|
||||
void hv_vss_deinit(void);
|
||||
void hv_vss_onchannelcallback(void *);
|
||||
void hv_vss_onchannelcallback(void *context);
|
||||
|
||||
int hv_fcopy_init(struct hv_util_service *);
|
||||
int hv_fcopy_init(struct hv_util_service *srv);
|
||||
void hv_fcopy_deinit(void);
|
||||
void hv_fcopy_onchannelcallback(void *);
|
||||
void hv_fcopy_onchannelcallback(void *context);
|
||||
void vmbus_initiate_unload(bool crash);
|
||||
|
||||
static inline void hv_poll_channel(struct vmbus_channel *channel,
|
||||
|
@ -73,8 +73,6 @@ static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
|
||||
*/
|
||||
if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
|
||||
vmbus_setevent(channel);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* Get the next write location for the specified ring buffer. */
|
||||
@ -208,6 +206,7 @@ void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||
ring_info->ring_buffer->interrupt_mask;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
|
||||
|
||||
/* Initialize the ring buffer. */
|
||||
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
|
||||
@ -267,14 +266,13 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
|
||||
int hv_ringbuffer_write(struct vmbus_channel *channel,
|
||||
const struct kvec *kv_list, u32 kv_count)
|
||||
{
|
||||
int i = 0;
|
||||
int i;
|
||||
u32 bytes_avail_towrite;
|
||||
u32 totalbytes_towrite = 0;
|
||||
|
||||
u32 totalbytes_towrite = sizeof(u64);
|
||||
u32 next_write_location;
|
||||
u32 old_write;
|
||||
u64 prev_indices = 0;
|
||||
unsigned long flags = 0;
|
||||
u64 prev_indices;
|
||||
unsigned long flags;
|
||||
struct hv_ring_buffer_info *outring_info = &channel->outbound;
|
||||
|
||||
if (channel->rescind)
|
||||
@ -283,8 +281,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
|
||||
for (i = 0; i < kv_count; i++)
|
||||
totalbytes_towrite += kv_list[i].iov_len;
|
||||
|
||||
totalbytes_towrite += sizeof(u64);
|
||||
|
||||
spin_lock_irqsave(&outring_info->ring_lock, flags);
|
||||
|
||||
bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
|
||||
@ -341,18 +337,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
|
||||
u64 *requestid, bool raw)
|
||||
{
|
||||
u32 bytes_avail_toread;
|
||||
u32 next_read_location = 0;
|
||||
u32 next_read_location;
|
||||
u64 prev_indices = 0;
|
||||
struct vmpacket_descriptor desc;
|
||||
u32 offset;
|
||||
u32 packetlen;
|
||||
int ret = 0;
|
||||
struct hv_ring_buffer_info *inring_info = &channel->inbound;
|
||||
|
||||
if (buflen <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
*buffer_actual_len = 0;
|
||||
*requestid = 0;
|
||||
|
||||
@ -363,7 +357,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
|
||||
* No error is set when there is even no header, drivers are
|
||||
* supposed to analyze buffer_actual_len.
|
||||
*/
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
init_cached_read_index(channel);
|
||||
@ -408,5 +402,5 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
|
||||
|
||||
hv_signal_on_read(channel);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
@ -787,8 +787,6 @@ static void vmbus_shutdown(struct device *child_device)
|
||||
|
||||
if (drv->shutdown)
|
||||
drv->shutdown(dev);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@ -855,7 +853,7 @@ void vmbus_on_msg_dpc(unsigned long data)
|
||||
struct hv_message *msg = (struct hv_message *)page_addr +
|
||||
VMBUS_MESSAGE_SINT;
|
||||
struct vmbus_channel_message_header *hdr;
|
||||
struct vmbus_channel_message_table_entry *entry;
|
||||
const struct vmbus_channel_message_table_entry *entry;
|
||||
struct onmessage_work_context *ctx;
|
||||
u32 message_type = msg->header.message_type;
|
||||
|
||||
|
@ -200,6 +200,7 @@ config BLK_DEV_DM_BUILTIN
|
||||
config BLK_DEV_DM
|
||||
tristate "Device mapper support"
|
||||
select BLK_DEV_DM_BUILTIN
|
||||
select DAX
|
||||
---help---
|
||||
Device-mapper is a low level volume manager. It works by allowing
|
||||
people to specify mappings for ranges of logical sectors. Various
|
||||
|
@ -58,6 +58,7 @@ struct mapped_device {
|
||||
struct target_type *immutable_target_type;
|
||||
|
||||
struct gendisk *disk;
|
||||
struct dax_device *dax_dev;
|
||||
char name[16];
|
||||
|
||||
void *interface_ptr;
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/dax.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/device-mapper.h>
|
||||
|
||||
@ -141,22 +142,20 @@ static int linear_iterate_devices(struct dm_target *ti,
|
||||
return fn(ti, lc->dev, lc->start, ti->len, data);
|
||||
}
|
||||
|
||||
static long linear_direct_access(struct dm_target *ti, sector_t sector,
|
||||
void **kaddr, pfn_t *pfn, long size)
|
||||
static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
||||
{
|
||||
long ret;
|
||||
struct linear_c *lc = ti->private;
|
||||
struct block_device *bdev = lc->dev->bdev;
|
||||
struct blk_dax_ctl dax = {
|
||||
.sector = linear_map_sector(ti, sector),
|
||||
.size = size,
|
||||
};
|
||||
long ret;
|
||||
struct dax_device *dax_dev = lc->dev->dax_dev;
|
||||
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
|
||||
|
||||
ret = bdev_direct_access(bdev, &dax);
|
||||
*kaddr = dax.addr;
|
||||
*pfn = dax.pfn;
|
||||
|
||||
return ret;
|
||||
dev_sector = linear_map_sector(ti, sector);
|
||||
ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
|
||||
if (ret)
|
||||
return ret;
|
||||
return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
|
||||
}
|
||||
|
||||
static struct target_type linear_target = {
|
||||
@ -169,7 +168,7 @@ static struct target_type linear_target = {
|
||||
.status = linear_status,
|
||||
.prepare_ioctl = linear_prepare_ioctl,
|
||||
.iterate_devices = linear_iterate_devices,
|
||||
.direct_access = linear_direct_access,
|
||||
.direct_access = linear_dax_direct_access,
|
||||
};
|
||||
|
||||
int __init dm_linear_init(void)
|
||||
|
@ -2302,8 +2302,8 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
|
||||
return do_origin(o->dev, bio);
|
||||
}
|
||||
|
||||
static long origin_direct_access(struct dm_target *ti, sector_t sector,
|
||||
void **kaddr, pfn_t *pfn, long size)
|
||||
static long origin_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
||||
{
|
||||
DMWARN("device does not support dax.");
|
||||
return -EIO;
|
||||
@ -2368,7 +2368,7 @@ static struct target_type origin_target = {
|
||||
.postsuspend = origin_postsuspend,
|
||||
.status = origin_status,
|
||||
.iterate_devices = origin_iterate_devices,
|
||||
.direct_access = origin_direct_access,
|
||||
.direct_access = origin_dax_direct_access,
|
||||
};
|
||||
|
||||
static struct target_type snapshot_target = {
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/dax.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/log2.h>
|
||||
|
||||
@ -308,27 +309,25 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
|
||||
return DM_MAPIO_REMAPPED;
|
||||
}
|
||||
|
||||
static long stripe_direct_access(struct dm_target *ti, sector_t sector,
|
||||
void **kaddr, pfn_t *pfn, long size)
|
||||
static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
||||
{
|
||||
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
|
||||
struct stripe_c *sc = ti->private;
|
||||
uint32_t stripe;
|
||||
struct dax_device *dax_dev;
|
||||
struct block_device *bdev;
|
||||
struct blk_dax_ctl dax = {
|
||||
.size = size,
|
||||
};
|
||||
uint32_t stripe;
|
||||
long ret;
|
||||
|
||||
stripe_map_sector(sc, sector, &stripe, &dax.sector);
|
||||
|
||||
dax.sector += sc->stripe[stripe].physical_start;
|
||||
stripe_map_sector(sc, sector, &stripe, &dev_sector);
|
||||
dev_sector += sc->stripe[stripe].physical_start;
|
||||
dax_dev = sc->stripe[stripe].dev->dax_dev;
|
||||
bdev = sc->stripe[stripe].dev->bdev;
|
||||
|
||||
ret = bdev_direct_access(bdev, &dax);
|
||||
*kaddr = dax.addr;
|
||||
*pfn = dax.pfn;
|
||||
|
||||
return ret;
|
||||
ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
|
||||
if (ret)
|
||||
return ret;
|
||||
return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -448,7 +447,7 @@ static struct target_type stripe_target = {
|
||||
.status = stripe_status,
|
||||
.iterate_devices = stripe_iterate_devices,
|
||||
.io_hints = stripe_io_hints,
|
||||
.direct_access = stripe_direct_access,
|
||||
.direct_access = stripe_dax_direct_access,
|
||||
};
|
||||
|
||||
int __init dm_stripe_init(void)
|
||||
|
@ -142,8 +142,8 @@ static void io_err_release_clone_rq(struct request *clone)
|
||||
{
|
||||
}
|
||||
|
||||
static long io_err_direct_access(struct dm_target *ti, sector_t sector,
|
||||
void **kaddr, pfn_t *pfn, long size)
|
||||
static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
@ -157,7 +157,7 @@ static struct target_type error_target = {
|
||||
.map = io_err_map,
|
||||
.clone_and_map_rq = io_err_clone_and_map_rq,
|
||||
.release_clone_rq = io_err_release_clone_rq,
|
||||
.direct_access = io_err_direct_access,
|
||||
.direct_access = io_err_dax_direct_access,
|
||||
};
|
||||
|
||||
int __init dm_target_init(void)
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/blkpg.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/dax.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/hdreg.h>
|
||||
@ -629,6 +630,7 @@ static int open_table_device(struct table_device *td, dev_t dev,
|
||||
}
|
||||
|
||||
td->dm_dev.bdev = bdev;
|
||||
td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -642,7 +644,9 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
|
||||
|
||||
bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
|
||||
blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
|
||||
put_dax(td->dm_dev.dax_dev);
|
||||
td->dm_dev.bdev = NULL;
|
||||
td->dm_dev.dax_dev = NULL;
|
||||
}
|
||||
|
||||
static struct table_device *find_table_device(struct list_head *l, dev_t dev,
|
||||
@ -908,31 +912,49 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
|
||||
|
||||
static long dm_blk_direct_access(struct block_device *bdev, sector_t sector,
|
||||
void **kaddr, pfn_t *pfn, long size)
|
||||
static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
|
||||
sector_t sector, int *srcu_idx)
|
||||
{
|
||||
struct mapped_device *md = bdev->bd_disk->private_data;
|
||||
struct dm_table *map;
|
||||
struct dm_target *ti;
|
||||
int srcu_idx;
|
||||
long len, ret = -EIO;
|
||||
|
||||
map = dm_get_live_table(md, &srcu_idx);
|
||||
map = dm_get_live_table(md, srcu_idx);
|
||||
if (!map)
|
||||
goto out;
|
||||
return NULL;
|
||||
|
||||
ti = dm_table_find_target(map, sector);
|
||||
if (!dm_target_is_valid(ti))
|
||||
return NULL;
|
||||
|
||||
return ti;
|
||||
}
|
||||
|
||||
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
||||
{
|
||||
struct mapped_device *md = dax_get_private(dax_dev);
|
||||
sector_t sector = pgoff * PAGE_SECTORS;
|
||||
struct dm_target *ti;
|
||||
long len, ret = -EIO;
|
||||
int srcu_idx;
|
||||
|
||||
ti = dm_dax_get_live_target(md, sector, &srcu_idx);
|
||||
|
||||
if (!ti)
|
||||
goto out;
|
||||
|
||||
len = max_io_len(sector, ti) << SECTOR_SHIFT;
|
||||
size = min(len, size);
|
||||
|
||||
if (!ti->type->direct_access)
|
||||
goto out;
|
||||
len = max_io_len(sector, ti) / PAGE_SECTORS;
|
||||
if (len < 1)
|
||||
goto out;
|
||||
nr_pages = min(len, nr_pages);
|
||||
if (ti->type->direct_access)
|
||||
ret = ti->type->direct_access(ti, sector, kaddr, pfn, size);
|
||||
out:
|
||||
ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
|
||||
|
||||
out:
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
return min(ret, size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1437,6 +1459,7 @@ static int next_free_minor(int *minor)
|
||||
}
|
||||
|
||||
static const struct block_device_operations dm_blk_dops;
|
||||
static const struct dax_operations dm_dax_ops;
|
||||
|
||||
static void dm_wq_work(struct work_struct *work);
|
||||
|
||||
@ -1483,6 +1506,12 @@ static void cleanup_mapped_device(struct mapped_device *md)
|
||||
if (md->bs)
|
||||
bioset_free(md->bs);
|
||||
|
||||
if (md->dax_dev) {
|
||||
kill_dax(md->dax_dev);
|
||||
put_dax(md->dax_dev);
|
||||
md->dax_dev = NULL;
|
||||
}
|
||||
|
||||
if (md->disk) {
|
||||
spin_lock(&_minor_lock);
|
||||
md->disk->private_data = NULL;
|
||||
@ -1510,6 +1539,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
|
||||
static struct mapped_device *alloc_dev(int minor)
|
||||
{
|
||||
int r, numa_node_id = dm_get_numa_node();
|
||||
struct dax_device *dax_dev;
|
||||
struct mapped_device *md;
|
||||
void *old_md;
|
||||
|
||||
@ -1574,6 +1604,12 @@ static struct mapped_device *alloc_dev(int minor)
|
||||
md->disk->queue = md->queue;
|
||||
md->disk->private_data = md;
|
||||
sprintf(md->disk->disk_name, "dm-%d", minor);
|
||||
|
||||
dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops);
|
||||
if (!dax_dev)
|
||||
goto bad;
|
||||
md->dax_dev = dax_dev;
|
||||
|
||||
add_disk(md->disk);
|
||||
format_dev_t(md->name, MKDEV(_major, minor));
|
||||
|
||||
@ -2775,12 +2811,15 @@ static const struct block_device_operations dm_blk_dops = {
|
||||
.open = dm_blk_open,
|
||||
.release = dm_blk_close,
|
||||
.ioctl = dm_blk_ioctl,
|
||||
.direct_access = dm_blk_direct_access,
|
||||
.getgeo = dm_blk_getgeo,
|
||||
.pr_ops = &dm_pr_ops,
|
||||
.owner = THIS_MODULE
|
||||
};
|
||||
|
||||
static const struct dax_operations dm_dax_ops = {
|
||||
.direct_access = dm_dax_direct_access,
|
||||
};
|
||||
|
||||
/*
|
||||
* module hooks
|
||||
*/
|
||||
|
@ -495,6 +495,7 @@ config VEXPRESS_SYSCFG
|
||||
config PANEL
|
||||
tristate "Parallel port LCD/Keypad Panel support"
|
||||
depends on PARPORT
|
||||
select CHARLCD
|
||||
---help---
|
||||
Say Y here if you have an HD44780 or KS-0074 LCD connected to your
|
||||
parallel port. This driver also features 4 and 6-key keypads. The LCD
|
||||
@ -771,6 +772,14 @@ config PANEL_BOOT_MESSAGE
|
||||
|
||||
endif # PANEL
|
||||
|
||||
config ASPEED_LPC_CTRL
|
||||
depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON
|
||||
tristate "Aspeed ast2400/2500 HOST LPC to BMC bridge control"
|
||||
---help---
|
||||
Control Aspeed ast2400/2500 HOST LPC to BMC mappings through
|
||||
ioctl()s, the driver also provides a read/write interface to a BMC ram
|
||||
region where the host LPC read/write region can be buffered.
|
||||
|
||||
source "drivers/misc/c2port/Kconfig"
|
||||
source "drivers/misc/eeprom/Kconfig"
|
||||
source "drivers/misc/cb710/Kconfig"
|
||||
|
@ -54,6 +54,7 @@ obj-$(CONFIG_ECHO) += echo/
|
||||
obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
|
||||
obj-$(CONFIG_CXL_BASE) += cxl/
|
||||
obj-$(CONFIG_PANEL) += panel.o
|
||||
obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
|
||||
|
||||
lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
|
||||
lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
|
||||
|
267
drivers/misc/aspeed-lpc-ctrl.c
Normal file
267
drivers/misc/aspeed-lpc-ctrl.c
Normal file
@ -0,0 +1,267 @@
|
||||
/*
|
||||
* Copyright 2017 IBM Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/regmap.h>
|
||||
|
||||
#include <linux/aspeed-lpc-ctrl.h>
|
||||
|
||||
#define DEVICE_NAME "aspeed-lpc-ctrl"
|
||||
|
||||
#define HICR7 0x8
|
||||
#define HICR8 0xc
|
||||
|
||||
struct aspeed_lpc_ctrl {
|
||||
struct miscdevice miscdev;
|
||||
struct regmap *regmap;
|
||||
phys_addr_t mem_base;
|
||||
resource_size_t mem_size;
|
||||
u32 pnor_size;
|
||||
u32 pnor_base;
|
||||
};
|
||||
|
||||
static struct aspeed_lpc_ctrl *file_aspeed_lpc_ctrl(struct file *file)
|
||||
{
|
||||
return container_of(file->private_data, struct aspeed_lpc_ctrl,
|
||||
miscdev);
|
||||
}
|
||||
|
||||
static int aspeed_lpc_ctrl_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct aspeed_lpc_ctrl *lpc_ctrl = file_aspeed_lpc_ctrl(file);
|
||||
unsigned long vsize = vma->vm_end - vma->vm_start;
|
||||
pgprot_t prot = vma->vm_page_prot;
|
||||
|
||||
if (vma->vm_pgoff + vsize > lpc_ctrl->mem_base + lpc_ctrl->mem_size)
|
||||
return -EINVAL;
|
||||
|
||||
/* ast2400/2500 AHB accesses are not cache coherent */
|
||||
prot = pgprot_dmacoherent(prot);
|
||||
|
||||
if (remap_pfn_range(vma, vma->vm_start,
|
||||
(lpc_ctrl->mem_base >> PAGE_SHIFT) + vma->vm_pgoff,
|
||||
vsize, prot))
|
||||
return -EAGAIN;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long aspeed_lpc_ctrl_ioctl(struct file *file, unsigned int cmd,
|
||||
unsigned long param)
|
||||
{
|
||||
struct aspeed_lpc_ctrl *lpc_ctrl = file_aspeed_lpc_ctrl(file);
|
||||
void __user *p = (void __user *)param;
|
||||
struct aspeed_lpc_ctrl_mapping map;
|
||||
u32 addr;
|
||||
u32 size;
|
||||
long rc;
|
||||
|
||||
if (copy_from_user(&map, p, sizeof(map)))
|
||||
return -EFAULT;
|
||||
|
||||
if (map.flags != 0)
|
||||
return -EINVAL;
|
||||
|
||||
switch (cmd) {
|
||||
case ASPEED_LPC_CTRL_IOCTL_GET_SIZE:
|
||||
/* The flash windows don't report their size */
|
||||
if (map.window_type != ASPEED_LPC_CTRL_WINDOW_MEMORY)
|
||||
return -EINVAL;
|
||||
|
||||
/* Support more than one window id in the future */
|
||||
if (map.window_id != 0)
|
||||
return -EINVAL;
|
||||
|
||||
map.size = lpc_ctrl->mem_size;
|
||||
|
||||
return copy_to_user(p, &map, sizeof(map)) ? -EFAULT : 0;
|
||||
case ASPEED_LPC_CTRL_IOCTL_MAP:
|
||||
|
||||
/*
|
||||
* The top half of HICR7 is the MSB of the BMC address of the
|
||||
* mapping.
|
||||
* The bottom half of HICR7 is the MSB of the HOST LPC
|
||||
* firmware space address of the mapping.
|
||||
*
|
||||
* The 1 bits in the top of half of HICR8 represent the bits
|
||||
* (in the requested address) that should be ignored and
|
||||
* replaced with those from the top half of HICR7.
|
||||
* The 1 bits in the bottom half of HICR8 represent the bits
|
||||
* (in the requested address) that should be kept and pass
|
||||
* into the BMC address space.
|
||||
*/
|
||||
|
||||
/*
|
||||
* It doesn't make sense to talk about a size or offset with
|
||||
* low 16 bits set. Both HICR7 and HICR8 talk about the top 16
|
||||
* bits of addresses and sizes.
|
||||
*/
|
||||
|
||||
if ((map.size & 0x0000ffff) || (map.offset & 0x0000ffff))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Because of the way the masks work in HICR8 offset has to
|
||||
* be a multiple of size.
|
||||
*/
|
||||
if (map.offset & (map.size - 1))
|
||||
return -EINVAL;
|
||||
|
||||
if (map.window_type == ASPEED_LPC_CTRL_WINDOW_FLASH) {
|
||||
addr = lpc_ctrl->pnor_base;
|
||||
size = lpc_ctrl->pnor_size;
|
||||
} else if (map.window_type == ASPEED_LPC_CTRL_WINDOW_MEMORY) {
|
||||
addr = lpc_ctrl->mem_base;
|
||||
size = lpc_ctrl->mem_size;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Check overflow first! */
|
||||
if (map.offset + map.size < map.offset ||
|
||||
map.offset + map.size > size)
|
||||
return -EINVAL;
|
||||
|
||||
if (map.size == 0 || map.size > size)
|
||||
return -EINVAL;
|
||||
|
||||
addr += map.offset;
|
||||
|
||||
/*
|
||||
* addr (host lpc address) is safe regardless of values. This
|
||||
* simply changes the address the host has to request on its
|
||||
* side of the LPC bus. This cannot impact the hosts own
|
||||
* memory space by surprise as LPC specific accessors are
|
||||
* required. The only strange thing that could be done is
|
||||
* setting the lower 16 bits but the shift takes care of that.
|
||||
*/
|
||||
|
||||
rc = regmap_write(lpc_ctrl->regmap, HICR7,
|
||||
(addr | (map.addr >> 16)));
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return regmap_write(lpc_ctrl->regmap, HICR8,
|
||||
(~(map.size - 1)) | ((map.size >> 16) - 1));
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static const struct file_operations aspeed_lpc_ctrl_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.mmap = aspeed_lpc_ctrl_mmap,
|
||||
.unlocked_ioctl = aspeed_lpc_ctrl_ioctl,
|
||||
};
|
||||
|
||||
static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct aspeed_lpc_ctrl *lpc_ctrl;
|
||||
struct device_node *node;
|
||||
struct resource resm;
|
||||
struct device *dev;
|
||||
int rc;
|
||||
|
||||
dev = &pdev->dev;
|
||||
|
||||
lpc_ctrl = devm_kzalloc(dev, sizeof(*lpc_ctrl), GFP_KERNEL);
|
||||
if (!lpc_ctrl)
|
||||
return -ENOMEM;
|
||||
|
||||
node = of_parse_phandle(dev->of_node, "flash", 0);
|
||||
if (!node) {
|
||||
dev_err(dev, "Didn't find host pnor flash node\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
rc = of_address_to_resource(node, 1, &resm);
|
||||
of_node_put(node);
|
||||
if (rc) {
|
||||
dev_err(dev, "Couldn't address to resource for flash\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
lpc_ctrl->pnor_size = resource_size(&resm);
|
||||
lpc_ctrl->pnor_base = resm.start;
|
||||
|
||||
dev_set_drvdata(&pdev->dev, lpc_ctrl);
|
||||
|
||||
node = of_parse_phandle(dev->of_node, "memory-region", 0);
|
||||
if (!node) {
|
||||
dev_err(dev, "Didn't find reserved memory\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = of_address_to_resource(node, 0, &resm);
|
||||
of_node_put(node);
|
||||
if (rc) {
|
||||
dev_err(dev, "Couldn't address to resource for reserved memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
lpc_ctrl->mem_size = resource_size(&resm);
|
||||
lpc_ctrl->mem_base = resm.start;
|
||||
|
||||
lpc_ctrl->regmap = syscon_node_to_regmap(
|
||||
pdev->dev.parent->of_node);
|
||||
if (IS_ERR(lpc_ctrl->regmap)) {
|
||||
dev_err(dev, "Couldn't get regmap\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
lpc_ctrl->miscdev.minor = MISC_DYNAMIC_MINOR;
|
||||
lpc_ctrl->miscdev.name = DEVICE_NAME;
|
||||
lpc_ctrl->miscdev.fops = &aspeed_lpc_ctrl_fops;
|
||||
lpc_ctrl->miscdev.parent = dev;
|
||||
rc = misc_register(&lpc_ctrl->miscdev);
|
||||
if (rc)
|
||||
dev_err(dev, "Unable to register device\n");
|
||||
else
|
||||
dev_info(dev, "Loaded at 0x%08x (0x%08x)\n",
|
||||
lpc_ctrl->mem_base, lpc_ctrl->mem_size);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int aspeed_lpc_ctrl_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct aspeed_lpc_ctrl *lpc_ctrl = dev_get_drvdata(&pdev->dev);
|
||||
|
||||
misc_deregister(&lpc_ctrl->miscdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id aspeed_lpc_ctrl_match[] = {
|
||||
{ .compatible = "aspeed,ast2400-lpc-ctrl" },
|
||||
{ .compatible = "aspeed,ast2500-lpc-ctrl" },
|
||||
{ },
|
||||
};
|
||||
|
||||
static struct platform_driver aspeed_lpc_ctrl_driver = {
|
||||
.driver = {
|
||||
.name = DEVICE_NAME,
|
||||
.of_match_table = aspeed_lpc_ctrl_match,
|
||||
},
|
||||
.probe = aspeed_lpc_ctrl_probe,
|
||||
.remove = aspeed_lpc_ctrl_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(aspeed_lpc_ctrl_driver);
|
||||
|
||||
MODULE_DEVICE_TABLE(of, aspeed_lpc_ctrl_match);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Cyril Bur <cyrilbur@gmail.com>");
|
||||
MODULE_DESCRIPTION("Control for aspeed 2400/2500 LPC HOST to BMC mappings");
|
File diff suppressed because it is too large
Load Diff
@ -20,6 +20,7 @@ if LIBNVDIMM
|
||||
config BLK_DEV_PMEM
|
||||
tristate "PMEM: Persistent memory block device support"
|
||||
default LIBNVDIMM
|
||||
select DAX
|
||||
select ND_BTT if BTT
|
||||
select ND_PFN if NVDIMM_PFN
|
||||
help
|
||||
|
@ -246,7 +246,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
|
||||
if (rw == READ) {
|
||||
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
|
||||
return -EIO;
|
||||
return memcpy_from_pmem(buf, nsio->addr + offset, size);
|
||||
return memcpy_mcsafe(buf, nsio->addr + offset, size);
|
||||
}
|
||||
|
||||
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/pfn_t.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pmem.h>
|
||||
#include <linux/dax.h>
|
||||
#include <linux/nd.h>
|
||||
#include "pmem.h"
|
||||
#include "pfn.h"
|
||||
@ -88,7 +89,7 @@ static int read_pmem(struct page *page, unsigned int off,
|
||||
int rc;
|
||||
void *mem = kmap_atomic(page);
|
||||
|
||||
rc = memcpy_from_pmem(mem + off, pmem_addr, len);
|
||||
rc = memcpy_mcsafe(mem + off, pmem_addr, len);
|
||||
kunmap_atomic(mem);
|
||||
if (rc)
|
||||
return -EIO;
|
||||
@ -199,13 +200,13 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
|
||||
}
|
||||
|
||||
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
|
||||
__weak long pmem_direct_access(struct block_device *bdev, sector_t sector,
|
||||
void **kaddr, pfn_t *pfn, long size)
|
||||
__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
||||
{
|
||||
struct pmem_device *pmem = bdev->bd_queue->queuedata;
|
||||
resource_size_t offset = sector * 512 + pmem->data_offset;
|
||||
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
|
||||
|
||||
if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
|
||||
if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
|
||||
PFN_PHYS(nr_pages))))
|
||||
return -EIO;
|
||||
*kaddr = pmem->virt_addr + offset;
|
||||
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
|
||||
@ -215,26 +216,41 @@ __weak long pmem_direct_access(struct block_device *bdev, sector_t sector,
|
||||
* requested range.
|
||||
*/
|
||||
if (unlikely(pmem->bb.count))
|
||||
return size;
|
||||
return pmem->size - pmem->pfn_pad - offset;
|
||||
return nr_pages;
|
||||
return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
|
||||
}
|
||||
|
||||
static const struct block_device_operations pmem_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.rw_page = pmem_rw_page,
|
||||
.direct_access = pmem_direct_access,
|
||||
.revalidate_disk = nvdimm_revalidate_disk,
|
||||
};
|
||||
|
||||
static long pmem_dax_direct_access(struct dax_device *dax_dev,
|
||||
pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
|
||||
{
|
||||
struct pmem_device *pmem = dax_get_private(dax_dev);
|
||||
|
||||
return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
|
||||
}
|
||||
|
||||
static const struct dax_operations pmem_dax_ops = {
|
||||
.direct_access = pmem_dax_direct_access,
|
||||
};
|
||||
|
||||
static void pmem_release_queue(void *q)
|
||||
{
|
||||
blk_cleanup_queue(q);
|
||||
}
|
||||
|
||||
static void pmem_release_disk(void *disk)
|
||||
static void pmem_release_disk(void *__pmem)
|
||||
{
|
||||
del_gendisk(disk);
|
||||
put_disk(disk);
|
||||
struct pmem_device *pmem = __pmem;
|
||||
|
||||
kill_dax(pmem->dax_dev);
|
||||
put_dax(pmem->dax_dev);
|
||||
del_gendisk(pmem->disk);
|
||||
put_disk(pmem->disk);
|
||||
}
|
||||
|
||||
static int pmem_attach_disk(struct device *dev,
|
||||
@ -245,6 +261,7 @@ static int pmem_attach_disk(struct device *dev,
|
||||
struct vmem_altmap __altmap, *altmap = NULL;
|
||||
struct resource *res = &nsio->res;
|
||||
struct nd_pfn *nd_pfn = NULL;
|
||||
struct dax_device *dax_dev;
|
||||
int nid = dev_to_node(dev);
|
||||
struct nd_pfn_sb *pfn_sb;
|
||||
struct pmem_device *pmem;
|
||||
@ -325,6 +342,7 @@ static int pmem_attach_disk(struct device *dev,
|
||||
disk = alloc_disk_node(0, nid);
|
||||
if (!disk)
|
||||
return -ENOMEM;
|
||||
pmem->disk = disk;
|
||||
|
||||
disk->fops = &pmem_fops;
|
||||
disk->queue = q;
|
||||
@ -336,9 +354,16 @@ static int pmem_attach_disk(struct device *dev,
|
||||
return -ENOMEM;
|
||||
nvdimm_badblocks_populate(nd_region, &pmem->bb, res);
|
||||
disk->bb = &pmem->bb;
|
||||
device_add_disk(dev, disk);
|
||||
|
||||
if (devm_add_action_or_reset(dev, pmem_release_disk, disk))
|
||||
dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
|
||||
if (!dax_dev) {
|
||||
put_disk(disk);
|
||||
return -ENOMEM;
|
||||
}
|
||||
pmem->dax_dev = dax_dev;
|
||||
|
||||
device_add_disk(dev, disk);
|
||||
if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
|
||||
return -ENOMEM;
|
||||
|
||||
revalidate_disk(disk);
|
||||
|
@ -5,8 +5,6 @@
|
||||
#include <linux/pfn_t.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
long pmem_direct_access(struct block_device *bdev, sector_t sector,
|
||||
void **kaddr, pfn_t *pfn, long size);
|
||||
/* this definition is in it's own header for tools/testing/nvdimm to consume */
|
||||
struct pmem_device {
|
||||
/* One contiguous memory region per device */
|
||||
@ -20,5 +18,10 @@ struct pmem_device {
|
||||
/* trim size when namespace capacity has been section aligned */
|
||||
u32 pfn_pad;
|
||||
struct badblocks bb;
|
||||
struct dax_device *dax_dev;
|
||||
struct gendisk *disk;
|
||||
};
|
||||
|
||||
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
||||
long nr_pages, void **kaddr, pfn_t *pfn);
|
||||
#endif /* __NVDIMM_PMEM_H__ */
|
||||
|
@ -64,6 +64,43 @@ static int pps_cdev_fasync(int fd, struct file *file, int on)
|
||||
return fasync_helper(fd, file, on, &pps->async_queue);
|
||||
}
|
||||
|
||||
static int pps_cdev_pps_fetch(struct pps_device *pps, struct pps_fdata *fdata)
|
||||
{
|
||||
unsigned int ev = pps->last_ev;
|
||||
int err = 0;
|
||||
|
||||
/* Manage the timeout */
|
||||
if (fdata->timeout.flags & PPS_TIME_INVALID)
|
||||
err = wait_event_interruptible(pps->queue,
|
||||
ev != pps->last_ev);
|
||||
else {
|
||||
unsigned long ticks;
|
||||
|
||||
dev_dbg(pps->dev, "timeout %lld.%09d\n",
|
||||
(long long) fdata->timeout.sec,
|
||||
fdata->timeout.nsec);
|
||||
ticks = fdata->timeout.sec * HZ;
|
||||
ticks += fdata->timeout.nsec / (NSEC_PER_SEC / HZ);
|
||||
|
||||
if (ticks != 0) {
|
||||
err = wait_event_interruptible_timeout(
|
||||
pps->queue,
|
||||
ev != pps->last_ev,
|
||||
ticks);
|
||||
if (err == 0)
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check for pending signals */
|
||||
if (err == -ERESTARTSYS) {
|
||||
dev_dbg(pps->dev, "pending signal caught\n");
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long pps_cdev_ioctl(struct file *file,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
@ -144,7 +181,6 @@ static long pps_cdev_ioctl(struct file *file,
|
||||
|
||||
case PPS_FETCH: {
|
||||
struct pps_fdata fdata;
|
||||
unsigned int ev;
|
||||
|
||||
dev_dbg(pps->dev, "PPS_FETCH\n");
|
||||
|
||||
@ -152,36 +188,9 @@ static long pps_cdev_ioctl(struct file *file,
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
ev = pps->last_ev;
|
||||
|
||||
/* Manage the timeout */
|
||||
if (fdata.timeout.flags & PPS_TIME_INVALID)
|
||||
err = wait_event_interruptible(pps->queue,
|
||||
ev != pps->last_ev);
|
||||
else {
|
||||
unsigned long ticks;
|
||||
|
||||
dev_dbg(pps->dev, "timeout %lld.%09d\n",
|
||||
(long long) fdata.timeout.sec,
|
||||
fdata.timeout.nsec);
|
||||
ticks = fdata.timeout.sec * HZ;
|
||||
ticks += fdata.timeout.nsec / (NSEC_PER_SEC / HZ);
|
||||
|
||||
if (ticks != 0) {
|
||||
err = wait_event_interruptible_timeout(
|
||||
pps->queue,
|
||||
ev != pps->last_ev,
|
||||
ticks);
|
||||
if (err == 0)
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check for pending signals */
|
||||
if (err == -ERESTARTSYS) {
|
||||
dev_dbg(pps->dev, "pending signal caught\n");
|
||||
return -EINTR;
|
||||
}
|
||||
err = pps_cdev_pps_fetch(pps, &fdata);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Return the fetched timestamp */
|
||||
spin_lock_irq(&pps->lock);
|
||||
@ -242,6 +251,57 @@ static long pps_cdev_ioctl(struct file *file,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static long pps_cdev_compat_ioctl(struct file *file,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct pps_device *pps = file->private_data;
|
||||
void __user *uarg = (void __user *) arg;
|
||||
|
||||
cmd = _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(void *));
|
||||
|
||||
if (cmd == PPS_FETCH) {
|
||||
struct pps_fdata_compat compat;
|
||||
struct pps_fdata fdata;
|
||||
int err;
|
||||
|
||||
dev_dbg(pps->dev, "PPS_FETCH\n");
|
||||
|
||||
err = copy_from_user(&compat, uarg, sizeof(struct pps_fdata_compat));
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
memcpy(&fdata.timeout, &compat.timeout,
|
||||
sizeof(struct pps_ktime_compat));
|
||||
|
||||
err = pps_cdev_pps_fetch(pps, &fdata);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Return the fetched timestamp */
|
||||
spin_lock_irq(&pps->lock);
|
||||
|
||||
compat.info.assert_sequence = pps->assert_sequence;
|
||||
compat.info.clear_sequence = pps->clear_sequence;
|
||||
compat.info.current_mode = pps->current_mode;
|
||||
|
||||
memcpy(&compat.info.assert_tu, &pps->assert_tu,
|
||||
sizeof(struct pps_ktime_compat));
|
||||
memcpy(&compat.info.clear_tu, &pps->clear_tu,
|
||||
sizeof(struct pps_ktime_compat));
|
||||
|
||||
spin_unlock_irq(&pps->lock);
|
||||
|
||||
return copy_to_user(uarg, &compat,
|
||||
sizeof(struct pps_fdata_compat)) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
return pps_cdev_ioctl(file, cmd, arg);
|
||||
}
|
||||
#else
|
||||
#define pps_cdev_compat_ioctl NULL
|
||||
#endif
|
||||
|
||||
static int pps_cdev_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct pps_device *pps = container_of(inode->i_cdev,
|
||||
@ -268,6 +328,7 @@ static const struct file_operations pps_cdev_fops = {
|
||||
.llseek = no_llseek,
|
||||
.poll = pps_cdev_poll,
|
||||
.fasync = pps_cdev_fasync,
|
||||
.compat_ioctl = pps_cdev_compat_ioctl,
|
||||
.unlocked_ioctl = pps_cdev_ioctl,
|
||||
.open = pps_cdev_open,
|
||||
.release = pps_cdev_release,
|
||||
|
@ -108,15 +108,11 @@ static struct attribute *rio_dev_attrs[] = {
|
||||
&dev_attr_lprev.attr,
|
||||
&dev_attr_destid.attr,
|
||||
&dev_attr_modalias.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group rio_dev_group = {
|
||||
.attrs = rio_dev_attrs,
|
||||
};
|
||||
|
||||
const struct attribute_group *rio_dev_groups[] = {
|
||||
&rio_dev_group,
|
||||
/* Switch-only attributes */
|
||||
&dev_attr_routes.attr,
|
||||
&dev_attr_lnext.attr,
|
||||
&dev_attr_hopcount.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -259,46 +255,40 @@ static struct bin_attribute rio_config_attr = {
|
||||
.write = rio_write_config,
|
||||
};
|
||||
|
||||
/**
|
||||
* rio_create_sysfs_dev_files - create RIO specific sysfs files
|
||||
* @rdev: device whose entries should be created
|
||||
*
|
||||
* Create files when @rdev is added to sysfs.
|
||||
*/
|
||||
int rio_create_sysfs_dev_files(struct rio_dev *rdev)
|
||||
static struct bin_attribute *rio_dev_bin_attrs[] = {
|
||||
&rio_config_attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static umode_t rio_dev_is_attr_visible(struct kobject *kobj,
|
||||
struct attribute *attr, int n)
|
||||
{
|
||||
int err = 0;
|
||||
struct rio_dev *rdev = to_rio_dev(kobj_to_dev(kobj));
|
||||
umode_t mode = attr->mode;
|
||||
|
||||
err = device_create_bin_file(&rdev->dev, &rio_config_attr);
|
||||
|
||||
if (!err && (rdev->pef & RIO_PEF_SWITCH)) {
|
||||
err |= device_create_file(&rdev->dev, &dev_attr_routes);
|
||||
err |= device_create_file(&rdev->dev, &dev_attr_lnext);
|
||||
err |= device_create_file(&rdev->dev, &dev_attr_hopcount);
|
||||
if (!(rdev->pef & RIO_PEF_SWITCH) &&
|
||||
(attr == &dev_attr_routes.attr ||
|
||||
attr == &dev_attr_lnext.attr ||
|
||||
attr == &dev_attr_hopcount.attr)) {
|
||||
/*
|
||||
* Hide switch-specific attributes for a non-switch device.
|
||||
*/
|
||||
mode = 0;
|
||||
}
|
||||
|
||||
if (err)
|
||||
pr_warning("RIO: Failed to create attribute file(s) for %s\n",
|
||||
rio_name(rdev));
|
||||
|
||||
return err;
|
||||
return mode;
|
||||
}
|
||||
|
||||
/**
|
||||
* rio_remove_sysfs_dev_files - cleanup RIO specific sysfs files
|
||||
* @rdev: device whose entries we should free
|
||||
*
|
||||
* Cleanup when @rdev is removed from sysfs.
|
||||
*/
|
||||
void rio_remove_sysfs_dev_files(struct rio_dev *rdev)
|
||||
{
|
||||
device_remove_bin_file(&rdev->dev, &rio_config_attr);
|
||||
if (rdev->pef & RIO_PEF_SWITCH) {
|
||||
device_remove_file(&rdev->dev, &dev_attr_routes);
|
||||
device_remove_file(&rdev->dev, &dev_attr_lnext);
|
||||
device_remove_file(&rdev->dev, &dev_attr_hopcount);
|
||||
}
|
||||
}
|
||||
static const struct attribute_group rio_dev_group = {
|
||||
.attrs = rio_dev_attrs,
|
||||
.is_visible = rio_dev_is_attr_visible,
|
||||
.bin_attrs = rio_dev_bin_attrs,
|
||||
};
|
||||
|
||||
const struct attribute_group *rio_dev_groups[] = {
|
||||
&rio_dev_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static ssize_t bus_scan_store(struct bus_type *bus, const char *buf,
|
||||
size_t count)
|
||||
|
@ -192,8 +192,6 @@ int rio_add_device(struct rio_dev *rdev)
|
||||
}
|
||||
spin_unlock(&rio_global_list_lock);
|
||||
|
||||
rio_create_sysfs_dev_files(rdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rio_add_device);
|
||||
@ -220,7 +218,6 @@ void rio_del_device(struct rio_dev *rdev, enum rio_device_state state)
|
||||
}
|
||||
}
|
||||
spin_unlock(&rio_global_list_lock);
|
||||
rio_remove_sysfs_dev_files(rdev);
|
||||
device_unregister(&rdev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rio_del_device);
|
||||
|
@ -27,8 +27,6 @@ extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
|
||||
u8 hopcount, u32 from);
|
||||
extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid,
|
||||
u8 hopcount);
|
||||
extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
|
||||
extern void rio_remove_sysfs_dev_files(struct rio_dev *rdev);
|
||||
extern int rio_lock_device(struct rio_mport *port, u16 destid,
|
||||
u8 hopcount, int wait_ms);
|
||||
extern int rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount);
|
||||
|
@ -14,6 +14,7 @@ config BLK_DEV_XPRAM
|
||||
|
||||
config DCSSBLK
|
||||
def_tristate m
|
||||
select DAX
|
||||
prompt "DCSSBLK support"
|
||||
depends on S390 && BLOCK
|
||||
help
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pfn_t.h>
|
||||
#include <linux/dax.h>
|
||||
#include <asm/extmem.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
@ -30,8 +31,8 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode);
|
||||
static void dcssblk_release(struct gendisk *disk, fmode_t mode);
|
||||
static blk_qc_t dcssblk_make_request(struct request_queue *q,
|
||||
struct bio *bio);
|
||||
static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
|
||||
void **kaddr, pfn_t *pfn, long size);
|
||||
static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||
long nr_pages, void **kaddr, pfn_t *pfn);
|
||||
|
||||
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
|
||||
|
||||
@ -40,7 +41,10 @@ static const struct block_device_operations dcssblk_devops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = dcssblk_open,
|
||||
.release = dcssblk_release,
|
||||
.direct_access = dcssblk_direct_access,
|
||||
};
|
||||
|
||||
static const struct dax_operations dcssblk_dax_ops = {
|
||||
.direct_access = dcssblk_dax_direct_access,
|
||||
};
|
||||
|
||||
struct dcssblk_dev_info {
|
||||
@ -57,6 +61,7 @@ struct dcssblk_dev_info {
|
||||
struct request_queue *dcssblk_queue;
|
||||
int num_of_segments;
|
||||
struct list_head seg_list;
|
||||
struct dax_device *dax_dev;
|
||||
};
|
||||
|
||||
struct segment_info {
|
||||
@ -389,6 +394,8 @@ removeseg:
|
||||
}
|
||||
list_del(&dev_info->lh);
|
||||
|
||||
kill_dax(dev_info->dax_dev);
|
||||
put_dax(dev_info->dax_dev);
|
||||
del_gendisk(dev_info->gd);
|
||||
blk_cleanup_queue(dev_info->dcssblk_queue);
|
||||
dev_info->gd->queue = NULL;
|
||||
@ -654,6 +661,13 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
|
||||
if (rc)
|
||||
goto put_dev;
|
||||
|
||||
dev_info->dax_dev = alloc_dax(dev_info, dev_info->gd->disk_name,
|
||||
&dcssblk_dax_ops);
|
||||
if (!dev_info->dax_dev) {
|
||||
rc = -ENOMEM;
|
||||
goto put_dev;
|
||||
}
|
||||
|
||||
get_device(&dev_info->dev);
|
||||
device_add_disk(&dev_info->dev, dev_info->gd);
|
||||
|
||||
@ -752,6 +766,8 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
|
||||
}
|
||||
|
||||
list_del(&dev_info->lh);
|
||||
kill_dax(dev_info->dax_dev);
|
||||
put_dax(dev_info->dax_dev);
|
||||
del_gendisk(dev_info->gd);
|
||||
blk_cleanup_queue(dev_info->dcssblk_queue);
|
||||
dev_info->gd->queue = NULL;
|
||||
@ -883,21 +899,26 @@ fail:
|
||||
}
|
||||
|
||||
static long
|
||||
dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
|
||||
void **kaddr, pfn_t *pfn, long size)
|
||||
__dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
|
||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
||||
{
|
||||
struct dcssblk_dev_info *dev_info;
|
||||
unsigned long offset, dev_sz;
|
||||
resource_size_t offset = pgoff * PAGE_SIZE;
|
||||
unsigned long dev_sz;
|
||||
|
||||
dev_info = bdev->bd_disk->private_data;
|
||||
if (!dev_info)
|
||||
return -ENODEV;
|
||||
dev_sz = dev_info->end - dev_info->start + 1;
|
||||
offset = secnum * 512;
|
||||
*kaddr = (void *) dev_info->start + offset;
|
||||
*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), PFN_DEV);
|
||||
|
||||
return dev_sz - offset;
|
||||
return (dev_sz - offset) / PAGE_SIZE;
|
||||
}
|
||||
|
||||
static long
|
||||
dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||
long nr_pages, void **kaddr, pfn_t *pfn)
|
||||
{
|
||||
struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev);
|
||||
|
||||
return __dcssblk_direct_access(dev_info, pgoff, nr_pages, kaddr, pfn);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -76,9 +76,16 @@ static struct vme_bridge *find_bridge(struct vme_resource *resource)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* vme_free_consistent - Allocate contiguous memory.
|
||||
* @resource: Pointer to VME resource.
|
||||
* @size: Size of allocation required.
|
||||
* @dma: Pointer to variable to store physical address of allocation.
|
||||
*
|
||||
* Allocate a contiguous block of memory for use by the driver. This is used to
|
||||
* create the buffers for the slave windows.
|
||||
*
|
||||
* Return: Virtual address of allocation on success, NULL on failure.
|
||||
*/
|
||||
void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
|
||||
dma_addr_t *dma)
|
||||
@ -111,8 +118,14 @@ void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
|
||||
}
|
||||
EXPORT_SYMBOL(vme_alloc_consistent);
|
||||
|
||||
/*
|
||||
* Free previously allocated contiguous block of memory.
|
||||
/**
|
||||
* vme_free_consistent - Free previously allocated memory.
|
||||
* @resource: Pointer to VME resource.
|
||||
* @size: Size of allocation to free.
|
||||
* @vaddr: Virtual address of allocation.
|
||||
* @dma: Physical address of allocation.
|
||||
*
|
||||
* Free previously allocated block of contiguous memory.
|
||||
*/
|
||||
void vme_free_consistent(struct vme_resource *resource, size_t size,
|
||||
void *vaddr, dma_addr_t dma)
|
||||
@ -145,6 +158,16 @@ void vme_free_consistent(struct vme_resource *resource, size_t size,
|
||||
}
|
||||
EXPORT_SYMBOL(vme_free_consistent);
|
||||
|
||||
/**
|
||||
* vme_get_size - Helper function returning size of a VME window
|
||||
* @resource: Pointer to VME slave or master resource.
|
||||
*
|
||||
* Determine the size of the VME window provided. This is a helper
|
||||
* function, wrappering the call to vme_master_get or vme_slave_get
|
||||
* depending on the type of window resource handed to it.
|
||||
*
|
||||
* Return: Size of the window on success, zero on failure.
|
||||
*/
|
||||
size_t vme_get_size(struct vme_resource *resource)
|
||||
{
|
||||
int enabled, retval;
|
||||
@ -259,9 +282,16 @@ static u32 vme_get_aspace(int am)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Request a slave image with specific attributes, return some unique
|
||||
* identifier.
|
||||
/**
|
||||
* vme_slave_request - Request a VME slave window resource.
|
||||
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
|
||||
* @address: Required VME address space.
|
||||
* @cycle: Required VME data transfer cycle type.
|
||||
*
|
||||
* Request use of a VME window resource capable of being set for the requested
|
||||
* address space and data transfer cycle.
|
||||
*
|
||||
* Return: Pointer to VME resource on success, NULL on failure.
|
||||
*/
|
||||
struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
|
||||
u32 cycle)
|
||||
@ -327,6 +357,23 @@ err_bus:
|
||||
}
|
||||
EXPORT_SYMBOL(vme_slave_request);
|
||||
|
||||
/**
|
||||
* vme_slave_set - Set VME slave window configuration.
|
||||
* @resource: Pointer to VME slave resource.
|
||||
* @enabled: State to which the window should be configured.
|
||||
* @vme_base: Base address for the window.
|
||||
* @size: Size of the VME window.
|
||||
* @buf_base: Based address of buffer used to provide VME slave window storage.
|
||||
* @aspace: VME address space for the VME window.
|
||||
* @cycle: VME data transfer cycle type for the VME window.
|
||||
*
|
||||
* Set configuration for provided VME slave window.
|
||||
*
|
||||
* Return: Zero on success, -EINVAL if operation is not supported on this
|
||||
* device, if an invalid resource has been provided or invalid
|
||||
* attributes are provided. Hardware specific errors may also be
|
||||
* returned.
|
||||
*/
|
||||
int vme_slave_set(struct vme_resource *resource, int enabled,
|
||||
unsigned long long vme_base, unsigned long long size,
|
||||
dma_addr_t buf_base, u32 aspace, u32 cycle)
|
||||
@ -362,6 +409,21 @@ int vme_slave_set(struct vme_resource *resource, int enabled,
|
||||
}
|
||||
EXPORT_SYMBOL(vme_slave_set);
|
||||
|
||||
/**
|
||||
* vme_slave_get - Retrieve VME slave window configuration.
|
||||
* @resource: Pointer to VME slave resource.
|
||||
* @enabled: Pointer to variable for storing state.
|
||||
* @vme_base: Pointer to variable for storing window base address.
|
||||
* @size: Pointer to variable for storing window size.
|
||||
* @buf_base: Pointer to variable for storing slave buffer base address.
|
||||
* @aspace: Pointer to variable for storing VME address space.
|
||||
* @cycle: Pointer to variable for storing VME data transfer cycle type.
|
||||
*
|
||||
* Return configuration for provided VME slave window.
|
||||
*
|
||||
* Return: Zero on success, -EINVAL if operation is not supported on this
|
||||
* device or if an invalid resource has been provided.
|
||||
*/
|
||||
int vme_slave_get(struct vme_resource *resource, int *enabled,
|
||||
unsigned long long *vme_base, unsigned long long *size,
|
||||
dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
|
||||
@ -386,6 +448,12 @@ int vme_slave_get(struct vme_resource *resource, int *enabled,
|
||||
}
|
||||
EXPORT_SYMBOL(vme_slave_get);
|
||||
|
||||
/**
|
||||
* vme_slave_free - Free VME slave window
|
||||
* @resource: Pointer to VME slave resource.
|
||||
*
|
||||
* Free the provided slave resource so that it may be reallocated.
|
||||
*/
|
||||
void vme_slave_free(struct vme_resource *resource)
|
||||
{
|
||||
struct vme_slave_resource *slave_image;
|
||||
@ -415,9 +483,17 @@ void vme_slave_free(struct vme_resource *resource)
|
||||
}
|
||||
EXPORT_SYMBOL(vme_slave_free);
|
||||
|
||||
/*
|
||||
* Request a master image with specific attributes, return some unique
|
||||
* identifier.
|
||||
/**
|
||||
* vme_master_request - Request a VME master window resource.
|
||||
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
|
||||
* @address: Required VME address space.
|
||||
* @cycle: Required VME data transfer cycle type.
|
||||
* @dwidth: Required VME data transfer width.
|
||||
*
|
||||
* Request use of a VME window resource capable of being set for the requested
|
||||
* address space, data transfer cycle and width.
|
||||
*
|
||||
* Return: Pointer to VME resource on success, NULL on failure.
|
||||
*/
|
||||
struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
|
||||
u32 cycle, u32 dwidth)
|
||||
@ -486,6 +562,23 @@ err_bus:
|
||||
}
|
||||
EXPORT_SYMBOL(vme_master_request);
|
||||
|
||||
/**
|
||||
* vme_master_set - Set VME master window configuration.
|
||||
* @resource: Pointer to VME master resource.
|
||||
* @enabled: State to which the window should be configured.
|
||||
* @vme_base: Base address for the window.
|
||||
* @size: Size of the VME window.
|
||||
* @aspace: VME address space for the VME window.
|
||||
* @cycle: VME data transfer cycle type for the VME window.
|
||||
* @dwidth: VME data transfer width for the VME window.
|
||||
*
|
||||
* Set configuration for provided VME master window.
|
||||
*
|
||||
* Return: Zero on success, -EINVAL if operation is not supported on this
|
||||
* device, if an invalid resource has been provided or invalid
|
||||
* attributes are provided. Hardware specific errors may also be
|
||||
* returned.
|
||||
*/
|
||||
int vme_master_set(struct vme_resource *resource, int enabled,
|
||||
unsigned long long vme_base, unsigned long long size, u32 aspace,
|
||||
u32 cycle, u32 dwidth)
|
||||
@ -522,6 +615,21 @@ int vme_master_set(struct vme_resource *resource, int enabled,
|
||||
}
|
||||
EXPORT_SYMBOL(vme_master_set);
|
||||
|
||||
/**
|
||||
* vme_master_get - Retrieve VME master window configuration.
|
||||
* @resource: Pointer to VME master resource.
|
||||
* @enabled: Pointer to variable for storing state.
|
||||
* @vme_base: Pointer to variable for storing window base address.
|
||||
* @size: Pointer to variable for storing window size.
|
||||
* @aspace: Pointer to variable for storing VME address space.
|
||||
* @cycle: Pointer to variable for storing VME data transfer cycle type.
|
||||
* @dwidth: Pointer to variable for storing VME data transfer width.
|
||||
*
|
||||
* Return configuration for provided VME master window.
|
||||
*
|
||||
* Return: Zero on success, -EINVAL if operation is not supported on this
|
||||
* device or if an invalid resource has been provided.
|
||||
*/
|
||||
int vme_master_get(struct vme_resource *resource, int *enabled,
|
||||
unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
|
||||
u32 *cycle, u32 *dwidth)
|
||||
@ -546,8 +654,20 @@ int vme_master_get(struct vme_resource *resource, int *enabled,
|
||||
}
|
||||
EXPORT_SYMBOL(vme_master_get);
|
||||
|
||||
/*
|
||||
* Read data out of VME space into a buffer.
|
||||
/**
|
||||
* vme_master_write - Read data from VME space into a buffer.
|
||||
* @resource: Pointer to VME master resource.
|
||||
* @buf: Pointer to buffer where data should be transferred.
|
||||
* @count: Number of bytes to transfer.
|
||||
* @offset: Offset into VME master window at which to start transfer.
|
||||
*
|
||||
* Perform read of count bytes of data from location on VME bus which maps into
|
||||
* the VME master window at offset to buf.
|
||||
*
|
||||
* Return: Number of bytes read, -EINVAL if resource is not a VME master
|
||||
* resource or read operation is not supported. -EFAULT returned if
|
||||
* invalid offset is provided. Hardware specific errors may also be
|
||||
* returned.
|
||||
*/
|
||||
ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
|
||||
loff_t offset)
|
||||
@ -583,8 +703,20 @@ ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
|
||||
}
|
||||
EXPORT_SYMBOL(vme_master_read);
|
||||
|
||||
/*
|
||||
* Write data out to VME space from a buffer.
|
||||
/**
|
||||
* vme_master_write - Write data out to VME space from a buffer.
|
||||
* @resource: Pointer to VME master resource.
|
||||
* @buf: Pointer to buffer holding data to transfer.
|
||||
* @count: Number of bytes to transfer.
|
||||
* @offset: Offset into VME master window at which to start transfer.
|
||||
*
|
||||
* Perform write of count bytes of data from buf to location on VME bus which
|
||||
* maps into the VME master window at offset.
|
||||
*
|
||||
* Return: Number of bytes written, -EINVAL if resource is not a VME master
|
||||
* resource or write operation is not supported. -EFAULT returned if
|
||||
* invalid offset is provided. Hardware specific errors may also be
|
||||
* returned.
|
||||
*/
|
||||
ssize_t vme_master_write(struct vme_resource *resource, void *buf,
|
||||
size_t count, loff_t offset)
|
||||
@ -619,8 +751,24 @@ ssize_t vme_master_write(struct vme_resource *resource, void *buf,
|
||||
}
|
||||
EXPORT_SYMBOL(vme_master_write);
|
||||
|
||||
/*
|
||||
* Perform RMW cycle to provided location.
|
||||
/**
|
||||
* vme_master_rmw - Perform read-modify-write cycle.
|
||||
* @resource: Pointer to VME master resource.
|
||||
* @mask: Bits to be compared and swapped in operation.
|
||||
* @compare: Bits to be compared with data read from offset.
|
||||
* @swap: Bits to be swapped in data read from offset.
|
||||
* @offset: Offset into VME master window at which to perform operation.
|
||||
*
|
||||
* Perform read-modify-write cycle on provided location:
|
||||
* - Location on VME bus is read.
|
||||
* - Bits selected by mask are compared with compare.
|
||||
* - Where a selected bit matches that in compare and are selected in swap,
|
||||
* the bit is swapped.
|
||||
* - Result written back to location on VME bus.
|
||||
*
|
||||
* Return: Bytes written on success, -EINVAL if resource is not a VME master
|
||||
* resource or RMW operation is not supported. Hardware specific
|
||||
* errors may also be returned.
|
||||
*/
|
||||
unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
|
||||
unsigned int compare, unsigned int swap, loff_t offset)
|
||||
@ -644,6 +792,17 @@ unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
|
||||
}
|
||||
EXPORT_SYMBOL(vme_master_rmw);
|
||||
|
||||
/**
|
||||
* vme_master_mmap - Mmap region of VME master window.
|
||||
* @resource: Pointer to VME master resource.
|
||||
* @vma: Pointer to definition of user mapping.
|
||||
*
|
||||
* Memory map a region of the VME master window into user space.
|
||||
*
|
||||
* Return: Zero on success, -EINVAL if resource is not a VME master
|
||||
* resource or -EFAULT if map exceeds window size. Other generic mmap
|
||||
* errors may also be returned.
|
||||
*/
|
||||
int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
|
||||
{
|
||||
struct vme_master_resource *image;
|
||||
@ -670,6 +829,12 @@ int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
|
||||
}
|
||||
EXPORT_SYMBOL(vme_master_mmap);
|
||||
|
||||
/**
|
||||
* vme_master_free - Free VME master window
|
||||
* @resource: Pointer to VME master resource.
|
||||
*
|
||||
* Free the provided master resource so that it may be reallocated.
|
||||
*/
|
||||
void vme_master_free(struct vme_resource *resource)
|
||||
{
|
||||
struct vme_master_resource *master_image;
|
||||
@ -699,9 +864,15 @@ void vme_master_free(struct vme_resource *resource)
|
||||
}
|
||||
EXPORT_SYMBOL(vme_master_free);
|
||||
|
||||
/*
|
||||
* Request a DMA controller with specific attributes, return some unique
|
||||
* identifier.
|
||||
/**
|
||||
* vme_dma_request - Request a DMA controller.
|
||||
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
|
||||
* @route: Required src/destination combination.
|
||||
*
|
||||
* Request a VME DMA controller with capability to perform transfers bewteen
|
||||
* requested source/destination combination.
|
||||
*
|
||||
* Return: Pointer to VME DMA resource on success, NULL on failure.
|
||||
*/
|
||||
struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
|
||||
{
|
||||
@ -768,8 +939,15 @@ err_bus:
|
||||
}
|
||||
EXPORT_SYMBOL(vme_dma_request);
|
||||
|
||||
/*
|
||||
* Start new list
|
||||
/**
|
||||
* vme_new_dma_list - Create new VME DMA list.
|
||||
* @resource: Pointer to VME DMA resource.
|
||||
*
|
||||
* Create a new VME DMA list. It is the responsibility of the user to free
|
||||
* the list once it is no longer required with vme_dma_list_free().
|
||||
*
|
||||
* Return: Pointer to new VME DMA list, NULL on allocation failure or invalid
|
||||
* VME DMA resource.
|
||||
*/
|
||||
struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
|
||||
{
|
||||
@ -796,8 +974,16 @@ struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
|
||||
}
|
||||
EXPORT_SYMBOL(vme_new_dma_list);
|
||||
|
||||
/*
|
||||
* Create "Pattern" type attributes
|
||||
/**
|
||||
* vme_dma_pattern_attribute - Create "Pattern" type VME DMA list attribute.
|
||||
* @pattern: Value to use used as pattern
|
||||
* @type: Type of pattern to be written.
|
||||
*
|
||||
* Create VME DMA list attribute for pattern generation. It is the
|
||||
* responsibility of the user to free used attributes using
|
||||
* vme_dma_free_attribute().
|
||||
*
|
||||
* Return: Pointer to VME DMA attribute, NULL on failure.
|
||||
*/
|
||||
struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
|
||||
{
|
||||
@ -831,8 +1017,15 @@ err_attr:
|
||||
}
|
||||
EXPORT_SYMBOL(vme_dma_pattern_attribute);
|
||||
|
||||
/*
|
||||
* Create "PCI" type attributes
|
||||
/**
|
||||
* vme_dma_pci_attribute - Create "PCI" type VME DMA list attribute.
|
||||
* @address: PCI base address for DMA transfer.
|
||||
*
|
||||
* Create VME DMA list attribute pointing to a location on PCI for DMA
|
||||
* transfers. It is the responsibility of the user to free used attributes
|
||||
* using vme_dma_free_attribute().
|
||||
*
|
||||
* Return: Pointer to VME DMA attribute, NULL on failure.
|
||||
*/
|
||||
struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
|
||||
{
|
||||
@ -869,8 +1062,18 @@ err_attr:
|
||||
}
|
||||
EXPORT_SYMBOL(vme_dma_pci_attribute);
|
||||
|
||||
/*
|
||||
* Create "VME" type attributes
|
||||
/**
|
||||
* vme_dma_vme_attribute - Create "VME" type VME DMA list attribute.
|
||||
* @address: VME base address for DMA transfer.
|
||||
* @aspace: VME address space to use for DMA transfer.
|
||||
* @cycle: VME bus cycle to use for DMA transfer.
|
||||
* @dwidth: VME data width to use for DMA transfer.
|
||||
*
|
||||
* Create VME DMA list attribute pointing to a location on the VME bus for DMA
|
||||
* transfers. It is the responsibility of the user to free used attributes
|
||||
* using vme_dma_free_attribute().
|
||||
*
|
||||
* Return: Pointer to VME DMA attribute, NULL on failure.
|
||||
*/
|
||||
struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
|
||||
u32 aspace, u32 cycle, u32 dwidth)
|
||||
@ -908,8 +1111,12 @@ err_attr:
|
||||
}
|
||||
EXPORT_SYMBOL(vme_dma_vme_attribute);
|
||||
|
||||
/*
|
||||
* Free attribute
|
||||
/**
|
||||
* vme_dma_free_attribute - Free DMA list attribute.
|
||||
* @attributes: Pointer to DMA list attribute.
|
||||
*
|
||||
* Free VME DMA list attribute. VME DMA list attributes can be safely freed
|
||||
* once vme_dma_list_add() has returned.
|
||||
*/
|
||||
void vme_dma_free_attribute(struct vme_dma_attr *attributes)
|
||||
{
|
||||
@ -918,6 +1125,23 @@ void vme_dma_free_attribute(struct vme_dma_attr *attributes)
|
||||
}
|
||||
EXPORT_SYMBOL(vme_dma_free_attribute);
|
||||
|
||||
/**
|
||||
* vme_dma_list_add - Add enty to a VME DMA list.
|
||||
* @list: Pointer to VME list.
|
||||
* @src: Pointer to DMA list attribute to use as source.
|
||||
* @dest: Pointer to DMA list attribute to use as destination.
|
||||
* @count: Number of bytes to transfer.
|
||||
*
|
||||
* Add an entry to the provided VME DMA list. Entry requires pointers to source
|
||||
* and destination DMA attributes and a count.
|
||||
*
|
||||
* Please note, the attributes supported as source and destinations for
|
||||
* transfers are hardware dependent.
|
||||
*
|
||||
* Return: Zero on success, -EINVAL if operation is not supported on this
|
||||
* device or if the link list has already been submitted for execution.
|
||||
* Hardware specific errors also possible.
|
||||
*/
|
||||
int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
|
||||
struct vme_dma_attr *dest, size_t count)
|
||||
{
|
||||
@ -942,6 +1166,16 @@ int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
|
||||
}
|
||||
EXPORT_SYMBOL(vme_dma_list_add);
|
||||
|
||||
/**
|
||||
* vme_dma_list_exec - Queue a VME DMA list for execution.
|
||||
* @list: Pointer to VME list.
|
||||
*
|
||||
* Queue the provided VME DMA list for execution. The call will return once the
|
||||
* list has been executed.
|
||||
*
|
||||
* Return: Zero on success, -EINVAL if operation is not supported on this
|
||||
* device. Hardware specific errors also possible.
|
||||
*/
|
||||
int vme_dma_list_exec(struct vme_dma_list *list)
|
||||
{
|
||||
struct vme_bridge *bridge = list->parent->parent;
|
||||
@ -962,6 +1196,15 @@ int vme_dma_list_exec(struct vme_dma_list *list)
|
||||
}
|
||||
EXPORT_SYMBOL(vme_dma_list_exec);
|
||||
|
||||
/**
|
||||
* vme_dma_list_free - Free a VME DMA list.
|
||||
* @list: Pointer to VME list.
|
||||
*
|
||||
* Free the provided DMA list and all its entries.
|
||||
*
|
||||
* Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
|
||||
* is still in use. Hardware specific errors also possible.
|
||||
*/
|
||||
int vme_dma_list_free(struct vme_dma_list *list)
|
||||
{
|
||||
struct vme_bridge *bridge = list->parent->parent;
|
||||
@ -994,6 +1237,15 @@ int vme_dma_list_free(struct vme_dma_list *list)
|
||||
}
|
||||
EXPORT_SYMBOL(vme_dma_list_free);
|
||||
|
||||
/**
|
||||
* vme_dma_free - Free a VME DMA resource.
|
||||
* @resource: Pointer to VME DMA resource.
|
||||
*
|
||||
* Free the provided DMA resource so that it may be reallocated.
|
||||
*
|
||||
* Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
|
||||
* is still active.
|
||||
*/
|
||||
int vme_dma_free(struct vme_resource *resource)
|
||||
{
|
||||
struct vme_dma_resource *ctrlr;
|
||||
@ -1099,6 +1351,22 @@ void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
|
||||
}
|
||||
EXPORT_SYMBOL(vme_irq_handler);
|
||||
|
||||
/**
|
||||
* vme_irq_request - Request a specific VME interrupt.
|
||||
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
|
||||
* @level: Interrupt priority being requested.
|
||||
* @statid: Interrupt vector being requested.
|
||||
* @callback: Pointer to callback function called when VME interrupt/vector
|
||||
* received.
|
||||
* @priv_data: Generic pointer that will be passed to the callback function.
|
||||
*
|
||||
* Request callback to be attached as a handler for VME interrupts with provided
|
||||
* level and statid.
|
||||
*
|
||||
* Return: Zero on success, -EINVAL on invalid vme device, level or if the
|
||||
* function is not supported, -EBUSY if the level/statid combination is
|
||||
* already in use. Hardware specific errors also possible.
|
||||
*/
|
||||
int vme_irq_request(struct vme_dev *vdev, int level, int statid,
|
||||
void (*callback)(int, int, void *),
|
||||
void *priv_data)
|
||||
@ -1142,6 +1410,14 @@ int vme_irq_request(struct vme_dev *vdev, int level, int statid,
|
||||
}
|
||||
EXPORT_SYMBOL(vme_irq_request);
|
||||
|
||||
/**
|
||||
* vme_irq_free - Free a VME interrupt.
|
||||
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
|
||||
* @level: Interrupt priority of interrupt being freed.
|
||||
* @statid: Interrupt vector of interrupt being freed.
|
||||
*
|
||||
* Remove previously attached callback from VME interrupt priority/vector.
|
||||
*/
|
||||
void vme_irq_free(struct vme_dev *vdev, int level, int statid)
|
||||
{
|
||||
struct vme_bridge *bridge;
|
||||
@ -1177,6 +1453,18 @@ void vme_irq_free(struct vme_dev *vdev, int level, int statid)
|
||||
}
|
||||
EXPORT_SYMBOL(vme_irq_free);
|
||||
|
||||
/**
|
||||
* vme_irq_generate - Generate VME interrupt.
|
||||
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
|
||||
* @level: Interrupt priority at which to assert the interrupt.
|
||||
* @statid: Interrupt vector to associate with the interrupt.
|
||||
*
|
||||
* Generate a VME interrupt of the provided level and with the provided
|
||||
* statid.
|
||||
*
|
||||
* Return: Zero on success, -EINVAL on invalid vme device, level or if the
|
||||
* function is not supported. Hardware specific errors also possible.
|
||||
*/
|
||||
int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
|
||||
{
|
||||
struct vme_bridge *bridge;
|
||||
@ -1201,8 +1489,15 @@ int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
|
||||
}
|
||||
EXPORT_SYMBOL(vme_irq_generate);
|
||||
|
||||
/*
|
||||
* Request the location monitor, return resource or NULL
|
||||
/**
|
||||
* vme_lm_request - Request a VME location monitor
|
||||
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
|
||||
*
|
||||
* Allocate a location monitor resource to the driver. A location monitor
|
||||
* allows the driver to monitor accesses to a contiguous number of
|
||||
* addresses on the VME bus.
|
||||
*
|
||||
* Return: Pointer to a VME resource on success or NULL on failure.
|
||||
*/
|
||||
struct vme_resource *vme_lm_request(struct vme_dev *vdev)
|
||||
{
|
||||
@ -1218,7 +1513,7 @@ struct vme_resource *vme_lm_request(struct vme_dev *vdev)
|
||||
goto err_bus;
|
||||
}
|
||||
|
||||
/* Loop through DMA resources */
|
||||
/* Loop through LM resources */
|
||||
list_for_each(lm_pos, &bridge->lm_resources) {
|
||||
lm = list_entry(lm_pos,
|
||||
struct vme_lm_resource, list);
|
||||
@ -1264,6 +1559,17 @@ err_bus:
|
||||
}
|
||||
EXPORT_SYMBOL(vme_lm_request);
|
||||
|
||||
/**
|
||||
* vme_lm_count - Determine number of VME Addresses monitored
|
||||
* @resource: Pointer to VME location monitor resource.
|
||||
*
|
||||
* The number of contiguous addresses monitored is hardware dependent.
|
||||
* Return the number of contiguous addresses monitored by the
|
||||
* location monitor.
|
||||
*
|
||||
* Return: Count of addresses monitored or -EINVAL when provided with an
|
||||
* invalid location monitor resource.
|
||||
*/
|
||||
int vme_lm_count(struct vme_resource *resource)
|
||||
{
|
||||
struct vme_lm_resource *lm;
|
||||
@ -1279,6 +1585,20 @@ int vme_lm_count(struct vme_resource *resource)
|
||||
}
|
||||
EXPORT_SYMBOL(vme_lm_count);
|
||||
|
||||
/**
|
||||
* vme_lm_set - Configure location monitor
|
||||
* @resource: Pointer to VME location monitor resource.
|
||||
* @lm_base: Base address to monitor.
|
||||
* @aspace: VME address space to monitor.
|
||||
* @cycle: VME bus cycle type to monitor.
|
||||
*
|
||||
* Set the base address, address space and cycle type of accesses to be
|
||||
* monitored by the location monitor.
|
||||
*
|
||||
* Return: Zero on success, -EINVAL when provided with an invalid location
|
||||
* monitor resource or function is not supported. Hardware specific
|
||||
* errors may also be returned.
|
||||
*/
|
||||
int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
|
||||
u32 aspace, u32 cycle)
|
||||
{
|
||||
@ -1301,6 +1621,20 @@ int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
|
||||
}
|
||||
EXPORT_SYMBOL(vme_lm_set);
|
||||
|
||||
/**
|
||||
* vme_lm_get - Retrieve location monitor settings
|
||||
* @resource: Pointer to VME location monitor resource.
|
||||
* @lm_base: Pointer used to output the base address monitored.
|
||||
* @aspace: Pointer used to output the address space monitored.
|
||||
* @cycle: Pointer used to output the VME bus cycle type monitored.
|
||||
*
|
||||
* Retrieve the base address, address space and cycle type of accesses to
|
||||
* be monitored by the location monitor.
|
||||
*
|
||||
* Return: Zero on success, -EINVAL when provided with an invalid location
|
||||
* monitor resource or function is not supported. Hardware specific
|
||||
* errors may also be returned.
|
||||
*/
|
||||
int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
|
||||
u32 *aspace, u32 *cycle)
|
||||
{
|
||||
@ -1323,6 +1657,21 @@ int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
|
||||
}
|
||||
EXPORT_SYMBOL(vme_lm_get);
|
||||
|
||||
/**
|
||||
* vme_lm_attach - Provide callback for location monitor address
|
||||
* @resource: Pointer to VME location monitor resource.
|
||||
* @monitor: Offset to which callback should be attached.
|
||||
* @callback: Pointer to callback function called when triggered.
|
||||
* @data: Generic pointer that will be passed to the callback function.
|
||||
*
|
||||
* Attach a callback to the specificed offset into the location monitors
|
||||
* monitored addresses. A generic pointer is provided to allow data to be
|
||||
* passed to the callback when called.
|
||||
*
|
||||
* Return: Zero on success, -EINVAL when provided with an invalid location
|
||||
* monitor resource or function is not supported. Hardware specific
|
||||
* errors may also be returned.
|
||||
*/
|
||||
int vme_lm_attach(struct vme_resource *resource, int monitor,
|
||||
void (*callback)(void *), void *data)
|
||||
{
|
||||
@ -1345,6 +1694,18 @@ int vme_lm_attach(struct vme_resource *resource, int monitor,
|
||||
}
|
||||
EXPORT_SYMBOL(vme_lm_attach);
|
||||
|
||||
/**
|
||||
* vme_lm_detach - Remove callback for location monitor address
|
||||
* @resource: Pointer to VME location monitor resource.
|
||||
* @monitor: Offset to which callback should be removed.
|
||||
*
|
||||
* Remove the callback associated with the specificed offset into the
|
||||
* location monitors monitored addresses.
|
||||
*
|
||||
* Return: Zero on success, -EINVAL when provided with an invalid location
|
||||
* monitor resource or function is not supported. Hardware specific
|
||||
* errors may also be returned.
|
||||
*/
|
||||
int vme_lm_detach(struct vme_resource *resource, int monitor)
|
||||
{
|
||||
struct vme_bridge *bridge = find_bridge(resource);
|
||||
@ -1366,6 +1727,18 @@ int vme_lm_detach(struct vme_resource *resource, int monitor)
|
||||
}
|
||||
EXPORT_SYMBOL(vme_lm_detach);
|
||||
|
||||
/**
|
||||
* vme_lm_free - Free allocated VME location monitor
|
||||
* @resource: Pointer to VME location monitor resource.
|
||||
*
|
||||
* Free allocation of a VME location monitor.
|
||||
*
|
||||
* WARNING: This function currently expects that any callbacks that have
|
||||
* been attached to the location monitor have been removed.
|
||||
*
|
||||
* Return: Zero on success, -EINVAL when provided with an invalid location
|
||||
* monitor resource.
|
||||
*/
|
||||
void vme_lm_free(struct vme_resource *resource)
|
||||
{
|
||||
struct vme_lm_resource *lm;
|
||||
@ -1392,6 +1765,16 @@ void vme_lm_free(struct vme_resource *resource)
|
||||
}
|
||||
EXPORT_SYMBOL(vme_lm_free);
|
||||
|
||||
/**
|
||||
* vme_slot_num - Retrieve slot ID
|
||||
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
|
||||
*
|
||||
* Retrieve the slot ID associated with the provided VME device.
|
||||
*
|
||||
* Return: The slot ID on success, -EINVAL if VME bridge cannot be determined
|
||||
* or the function is not supported. Hardware specific errors may also
|
||||
* be returned.
|
||||
*/
|
||||
int vme_slot_num(struct vme_dev *vdev)
|
||||
{
|
||||
struct vme_bridge *bridge;
|
||||
@ -1411,6 +1794,15 @@ int vme_slot_num(struct vme_dev *vdev)
|
||||
}
|
||||
EXPORT_SYMBOL(vme_slot_num);
|
||||
|
||||
/**
|
||||
* vme_bus_num - Retrieve bus number
|
||||
* @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
|
||||
*
|
||||
* Retrieve the bus enumeration associated with the provided VME device.
|
||||
*
|
||||
* Return: The bus number on success, -EINVAL if VME bridge cannot be
|
||||
* determined.
|
||||
*/
|
||||
int vme_bus_num(struct vme_dev *vdev)
|
||||
{
|
||||
struct vme_bridge *bridge;
|
||||
@ -1556,6 +1948,15 @@ static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* vme_register_driver - Register a VME driver
|
||||
* @drv: Pointer to VME driver structure to register.
|
||||
* @ndevs: Maximum number of devices to allow to be enumerated.
|
||||
*
|
||||
* Register a VME device driver with the VME subsystem.
|
||||
*
|
||||
* Return: Zero on success, error value on registration failure.
|
||||
*/
|
||||
int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
|
||||
{
|
||||
int err;
|
||||
@ -1576,6 +1977,12 @@ int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
|
||||
}
|
||||
EXPORT_SYMBOL(vme_register_driver);
|
||||
|
||||
/**
|
||||
* vme_unregister_driver - Unregister a VME driver
|
||||
* @drv: Pointer to VME driver structure to unregister.
|
||||
*
|
||||
* Unregister a VME device driver from the VME subsystem.
|
||||
*/
|
||||
void vme_unregister_driver(struct vme_driver *drv)
|
||||
{
|
||||
struct vme_dev *dev, *dev_tmp;
|
||||
|
@ -86,6 +86,12 @@ config W1_SLAVE_DS2433_CRC
|
||||
Each block has 30 bytes of data and a two byte CRC16.
|
||||
Full block writes are only allowed if the CRC is valid.
|
||||
|
||||
config W1_SLAVE_DS2438
|
||||
tristate "DS2438 Smart Battery Monitor 0x26 family support"
|
||||
help
|
||||
Say Y here if you want to use a 1-wire
|
||||
DS2438 Smart Battery Monitor device support
|
||||
|
||||
config W1_SLAVE_DS2760
|
||||
tristate "Dallas 2760 battery monitor chip (HP iPAQ & others)"
|
||||
help
|
||||
|
@ -11,6 +11,7 @@ obj-$(CONFIG_W1_SLAVE_DS2406) += w1_ds2406.o
|
||||
obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o
|
||||
obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
|
||||
obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
|
||||
obj-$(CONFIG_W1_SLAVE_DS2438) += w1_ds2438.o
|
||||
obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o
|
||||
obj-$(CONFIG_W1_SLAVE_DS2780) += w1_ds2780.o
|
||||
obj-$(CONFIG_W1_SLAVE_DS2781) += w1_ds2781.o
|
||||
|
390
drivers/w1/slaves/w1_ds2438.c
Normal file
390
drivers/w1/slaves/w1_ds2438.c
Normal file
@ -0,0 +1,390 @@
|
||||
/*
|
||||
* 1-Wire implementation for the ds2438 chip
|
||||
*
|
||||
* Copyright (c) 2017 Mariusz Bialonczyk <manio@skyboo.net>
|
||||
*
|
||||
* This source code is licensed under the GNU General Public License,
|
||||
* Version 2. See the file COPYING for more details.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "../w1.h"
|
||||
#include "../w1_family.h"
|
||||
|
||||
#define W1_DS2438_RETRIES 3
|
||||
|
||||
/* Memory commands */
|
||||
#define W1_DS2438_READ_SCRATCH 0xBE
|
||||
#define W1_DS2438_WRITE_SCRATCH 0x4E
|
||||
#define W1_DS2438_COPY_SCRATCH 0x48
|
||||
#define W1_DS2438_RECALL_MEMORY 0xB8
|
||||
/* Register commands */
|
||||
#define W1_DS2438_CONVERT_TEMP 0x44
|
||||
#define W1_DS2438_CONVERT_VOLTAGE 0xB4
|
||||
|
||||
#define DS2438_PAGE_SIZE 8
|
||||
#define DS2438_ADC_INPUT_VAD 0
|
||||
#define DS2438_ADC_INPUT_VDD 1
|
||||
#define DS2438_MAX_CONVERSION_TIME 10 /* ms */
|
||||
|
||||
/* Page #0 definitions */
|
||||
#define DS2438_STATUS_REG 0x00 /* Status/Configuration Register */
|
||||
#define DS2438_STATUS_IAD (1 << 0) /* Current A/D Control Bit */
|
||||
#define DS2438_STATUS_CA (1 << 1) /* Current Accumulator Configuration */
|
||||
#define DS2438_STATUS_EE (1 << 2) /* Current Accumulator Shadow Selector bit */
|
||||
#define DS2438_STATUS_AD (1 << 3) /* Voltage A/D Input Select Bit */
|
||||
#define DS2438_STATUS_TB (1 << 4) /* Temperature Busy Flag */
|
||||
#define DS2438_STATUS_NVB (1 << 5) /* Nonvolatile Memory Busy Flag */
|
||||
#define DS2438_STATUS_ADB (1 << 6) /* A/D Converter Busy Flag */
|
||||
|
||||
#define DS2438_TEMP_LSB 0x01
|
||||
#define DS2438_TEMP_MSB 0x02
|
||||
#define DS2438_VOLTAGE_LSB 0x03
|
||||
#define DS2438_VOLTAGE_MSB 0x04
|
||||
#define DS2438_CURRENT_LSB 0x05
|
||||
#define DS2438_CURRENT_MSB 0x06
|
||||
#define DS2438_THRESHOLD 0x07
|
||||
|
||||
int w1_ds2438_get_page(struct w1_slave *sl, int pageno, u8 *buf)
|
||||
{
|
||||
unsigned int retries = W1_DS2438_RETRIES;
|
||||
u8 w1_buf[2];
|
||||
u8 crc;
|
||||
size_t count;
|
||||
|
||||
while (retries--) {
|
||||
crc = 0;
|
||||
|
||||
if (w1_reset_select_slave(sl))
|
||||
continue;
|
||||
w1_buf[0] = W1_DS2438_RECALL_MEMORY;
|
||||
w1_buf[1] = 0x00;
|
||||
w1_write_block(sl->master, w1_buf, 2);
|
||||
|
||||
if (w1_reset_select_slave(sl))
|
||||
continue;
|
||||
w1_buf[0] = W1_DS2438_READ_SCRATCH;
|
||||
w1_buf[1] = 0x00;
|
||||
w1_write_block(sl->master, w1_buf, 2);
|
||||
|
||||
count = w1_read_block(sl->master, buf, DS2438_PAGE_SIZE + 1);
|
||||
if (count == DS2438_PAGE_SIZE + 1) {
|
||||
crc = w1_calc_crc8(buf, DS2438_PAGE_SIZE);
|
||||
|
||||
/* check for correct CRC */
|
||||
if ((u8)buf[DS2438_PAGE_SIZE] == crc)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int w1_ds2438_get_temperature(struct w1_slave *sl, int16_t *temperature)
|
||||
{
|
||||
unsigned int retries = W1_DS2438_RETRIES;
|
||||
u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
|
||||
unsigned int tm = DS2438_MAX_CONVERSION_TIME;
|
||||
unsigned long sleep_rem;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&sl->master->bus_mutex);
|
||||
|
||||
while (retries--) {
|
||||
if (w1_reset_select_slave(sl))
|
||||
continue;
|
||||
w1_write_8(sl->master, W1_DS2438_CONVERT_TEMP);
|
||||
|
||||
mutex_unlock(&sl->master->bus_mutex);
|
||||
sleep_rem = msleep_interruptible(tm);
|
||||
if (sleep_rem != 0) {
|
||||
ret = -1;
|
||||
goto post_unlock;
|
||||
}
|
||||
|
||||
if (mutex_lock_interruptible(&sl->master->bus_mutex) != 0) {
|
||||
ret = -1;
|
||||
goto post_unlock;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
|
||||
*temperature = (((int16_t) w1_buf[DS2438_TEMP_MSB]) << 8) | ((uint16_t) w1_buf[DS2438_TEMP_LSB]);
|
||||
ret = 0;
|
||||
} else
|
||||
ret = -1;
|
||||
|
||||
mutex_unlock(&sl->master->bus_mutex);
|
||||
|
||||
post_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int w1_ds2438_change_config_bit(struct w1_slave *sl, u8 mask, u8 value)
|
||||
{
|
||||
unsigned int retries = W1_DS2438_RETRIES;
|
||||
u8 w1_buf[3];
|
||||
u8 status;
|
||||
int perform_write = 0;
|
||||
|
||||
while (retries--) {
|
||||
if (w1_reset_select_slave(sl))
|
||||
continue;
|
||||
w1_buf[0] = W1_DS2438_RECALL_MEMORY;
|
||||
w1_buf[1] = 0x00;
|
||||
w1_write_block(sl->master, w1_buf, 2);
|
||||
|
||||
if (w1_reset_select_slave(sl))
|
||||
continue;
|
||||
w1_buf[0] = W1_DS2438_READ_SCRATCH;
|
||||
w1_buf[1] = 0x00;
|
||||
w1_write_block(sl->master, w1_buf, 2);
|
||||
|
||||
/* reading one byte of result */
|
||||
status = w1_read_8(sl->master);
|
||||
|
||||
/* if bit0=1, set a value to a mask for easy compare */
|
||||
if (value)
|
||||
value = mask;
|
||||
|
||||
if ((status & mask) == value)
|
||||
return 0; /* already set as requested */
|
||||
else {
|
||||
/* changing bit */
|
||||
status ^= mask;
|
||||
perform_write = 1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (perform_write) {
|
||||
retries = W1_DS2438_RETRIES;
|
||||
while (retries--) {
|
||||
if (w1_reset_select_slave(sl))
|
||||
continue;
|
||||
w1_buf[0] = W1_DS2438_WRITE_SCRATCH;
|
||||
w1_buf[1] = 0x00;
|
||||
w1_buf[2] = status;
|
||||
w1_write_block(sl->master, w1_buf, 3);
|
||||
|
||||
if (w1_reset_select_slave(sl))
|
||||
continue;
|
||||
w1_buf[0] = W1_DS2438_COPY_SCRATCH;
|
||||
w1_buf[1] = 0x00;
|
||||
w1_write_block(sl->master, w1_buf, 2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
uint16_t w1_ds2438_get_voltage(struct w1_slave *sl, int adc_input, uint16_t *voltage)
|
||||
{
|
||||
unsigned int retries = W1_DS2438_RETRIES;
|
||||
u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
|
||||
unsigned int tm = DS2438_MAX_CONVERSION_TIME;
|
||||
unsigned long sleep_rem;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&sl->master->bus_mutex);
|
||||
|
||||
if (w1_ds2438_change_config_bit(sl, DS2438_STATUS_AD, adc_input)) {
|
||||
ret = -1;
|
||||
goto pre_unlock;
|
||||
}
|
||||
|
||||
while (retries--) {
|
||||
if (w1_reset_select_slave(sl))
|
||||
continue;
|
||||
w1_write_8(sl->master, W1_DS2438_CONVERT_VOLTAGE);
|
||||
|
||||
mutex_unlock(&sl->master->bus_mutex);
|
||||
sleep_rem = msleep_interruptible(tm);
|
||||
if (sleep_rem != 0) {
|
||||
ret = -1;
|
||||
goto post_unlock;
|
||||
}
|
||||
|
||||
if (mutex_lock_interruptible(&sl->master->bus_mutex) != 0) {
|
||||
ret = -1;
|
||||
goto post_unlock;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
|
||||
*voltage = (((uint16_t) w1_buf[DS2438_VOLTAGE_MSB]) << 8) | ((uint16_t) w1_buf[DS2438_VOLTAGE_LSB]);
|
||||
ret = 0;
|
||||
} else
|
||||
ret = -1;
|
||||
|
||||
pre_unlock:
|
||||
mutex_unlock(&sl->master->bus_mutex);
|
||||
|
||||
post_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t iad_write(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
struct w1_slave *sl = kobj_to_w1_slave(kobj);
|
||||
int ret;
|
||||
|
||||
if (count != 1 || off != 0)
|
||||
return -EFAULT;
|
||||
|
||||
mutex_lock(&sl->master->bus_mutex);
|
||||
|
||||
if (w1_ds2438_change_config_bit(sl, DS2438_STATUS_IAD, *buf & 0x01) == 0)
|
||||
ret = 1;
|
||||
else
|
||||
ret = -EIO;
|
||||
|
||||
mutex_unlock(&sl->master->bus_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t page0_read(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
struct w1_slave *sl = kobj_to_w1_slave(kobj);
|
||||
int ret;
|
||||
u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
|
||||
|
||||
if (off != 0)
|
||||
return 0;
|
||||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&sl->master->bus_mutex);
|
||||
|
||||
if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
|
||||
memcpy(buf, &w1_buf, DS2438_PAGE_SIZE);
|
||||
ret = DS2438_PAGE_SIZE;
|
||||
} else
|
||||
ret = -EIO;
|
||||
|
||||
mutex_unlock(&sl->master->bus_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t temperature_read(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
struct w1_slave *sl = kobj_to_w1_slave(kobj);
|
||||
int ret;
|
||||
ssize_t c = PAGE_SIZE;
|
||||
int16_t temp;
|
||||
|
||||
if (off != 0)
|
||||
return 0;
|
||||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
if (w1_ds2438_get_temperature(sl, &temp) == 0) {
|
||||
c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", temp);
|
||||
ret = PAGE_SIZE - c;
|
||||
} else
|
||||
ret = -EIO;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t vad_read(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
struct w1_slave *sl = kobj_to_w1_slave(kobj);
|
||||
int ret;
|
||||
ssize_t c = PAGE_SIZE;
|
||||
uint16_t voltage;
|
||||
|
||||
if (off != 0)
|
||||
return 0;
|
||||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VAD, &voltage) == 0) {
|
||||
c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", voltage);
|
||||
ret = PAGE_SIZE - c;
|
||||
} else
|
||||
ret = -EIO;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t vdd_read(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
struct w1_slave *sl = kobj_to_w1_slave(kobj);
|
||||
int ret;
|
||||
ssize_t c = PAGE_SIZE;
|
||||
uint16_t voltage;
|
||||
|
||||
if (off != 0)
|
||||
return 0;
|
||||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VDD, &voltage) == 0) {
|
||||
c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", voltage);
|
||||
ret = PAGE_SIZE - c;
|
||||
} else
|
||||
ret = -EIO;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static BIN_ATTR(iad, S_IRUGO | S_IWUSR | S_IWGRP, NULL, iad_write, 1);
|
||||
static BIN_ATTR_RO(page0, DS2438_PAGE_SIZE);
|
||||
static BIN_ATTR_RO(temperature, 0/* real length varies */);
|
||||
static BIN_ATTR_RO(vad, 0/* real length varies */);
|
||||
static BIN_ATTR_RO(vdd, 0/* real length varies */);
|
||||
|
||||
static struct bin_attribute *w1_ds2438_bin_attrs[] = {
|
||||
&bin_attr_iad,
|
||||
&bin_attr_page0,
|
||||
&bin_attr_temperature,
|
||||
&bin_attr_vad,
|
||||
&bin_attr_vdd,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group w1_ds2438_group = {
|
||||
.bin_attrs = w1_ds2438_bin_attrs,
|
||||
};
|
||||
|
||||
static const struct attribute_group *w1_ds2438_groups[] = {
|
||||
&w1_ds2438_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct w1_family_ops w1_ds2438_fops = {
|
||||
.groups = w1_ds2438_groups,
|
||||
};
|
||||
|
||||
static struct w1_family w1_ds2438_family = {
|
||||
.fid = W1_FAMILY_DS2438,
|
||||
.fops = &w1_ds2438_fops,
|
||||
};
|
||||
module_w1_family(w1_ds2438_family);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Mariusz Bialonczyk <manio@skyboo.net>");
|
||||
MODULE_DESCRIPTION("1-wire driver for Maxim/Dallas DS2438 Smart Battery Monitor");
|
||||
MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2438));
|
@ -24,11 +24,13 @@
|
||||
#define DS2760_DATA_SIZE 0x40
|
||||
|
||||
#define DS2760_PROTECTION_REG 0x00
|
||||
|
||||
#define DS2760_STATUS_REG 0x01
|
||||
#define DS2760_STATUS_IE (1 << 2)
|
||||
#define DS2760_STATUS_SWEN (1 << 3)
|
||||
#define DS2760_STATUS_RNAOP (1 << 4)
|
||||
#define DS2760_STATUS_PMOD (1 << 5)
|
||||
#define DS2760_STATUS_IE (1 << 2)
|
||||
#define DS2760_STATUS_SWEN (1 << 3)
|
||||
#define DS2760_STATUS_RNAOP (1 << 4)
|
||||
#define DS2760_STATUS_PMOD (1 << 5)
|
||||
|
||||
#define DS2760_EEPROM_REG 0x07
|
||||
#define DS2760_SPECIAL_FEATURE_REG 0x08
|
||||
#define DS2760_VOLTAGE_MSB 0x0c
|
||||
|
@ -29,6 +29,7 @@
|
||||
#define W1_COUNTER_DS2423 0x1D
|
||||
#define W1_THERM_DS1822 0x22
|
||||
#define W1_EEPROM_DS2433 0x23
|
||||
#define W1_FAMILY_DS2438 0x26
|
||||
#define W1_THERM_DS18B20 0x28
|
||||
#define W1_FAMILY_DS2408 0x29
|
||||
#define W1_EEPROM_DS2431 0x2D
|
||||
|
@ -14,6 +14,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/zorro.h>
|
||||
|
||||
#include "zorro.h"
|
||||
|
||||
|
||||
/**
|
||||
* zorro_match_device - Tell if a Zorro device structure has a matching
|
||||
@ -161,12 +163,13 @@ static int zorro_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
}
|
||||
|
||||
struct bus_type zorro_bus_type = {
|
||||
.name = "zorro",
|
||||
.dev_name = "zorro",
|
||||
.match = zorro_bus_match,
|
||||
.uevent = zorro_uevent,
|
||||
.probe = zorro_device_probe,
|
||||
.remove = zorro_device_remove,
|
||||
.name = "zorro",
|
||||
.dev_name = "zorro",
|
||||
.dev_groups = zorro_device_attribute_groups,
|
||||
.match = zorro_bus_match,
|
||||
.uevent = zorro_uevent,
|
||||
.probe = zorro_device_probe,
|
||||
.remove = zorro_device_remove,
|
||||
};
|
||||
EXPORT_SYMBOL(zorro_bus_type);
|
||||
|
||||
|
@ -23,33 +23,33 @@
|
||||
|
||||
/* show configuration fields */
|
||||
#define zorro_config_attr(name, field, format_string) \
|
||||
static ssize_t \
|
||||
show_##name(struct device *dev, struct device_attribute *attr, char *buf) \
|
||||
static ssize_t name##_show(struct device *dev, \
|
||||
struct device_attribute *attr, char *buf) \
|
||||
{ \
|
||||
struct zorro_dev *z; \
|
||||
\
|
||||
z = to_zorro_dev(dev); \
|
||||
return sprintf(buf, format_string, z->field); \
|
||||
} \
|
||||
static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
|
||||
static DEVICE_ATTR_RO(name);
|
||||
|
||||
zorro_config_attr(id, id, "0x%08x\n");
|
||||
zorro_config_attr(type, rom.er_Type, "0x%02x\n");
|
||||
zorro_config_attr(slotaddr, slotaddr, "0x%04x\n");
|
||||
zorro_config_attr(slotsize, slotsize, "0x%04x\n");
|
||||
|
||||
static ssize_t
|
||||
show_serial(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct zorro_dev *z;
|
||||
|
||||
z = to_zorro_dev(dev);
|
||||
return sprintf(buf, "0x%08x\n", be32_to_cpu(z->rom.er_SerialNumber));
|
||||
}
|
||||
static DEVICE_ATTR_RO(serial);
|
||||
|
||||
static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
|
||||
|
||||
static ssize_t zorro_show_resource(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct zorro_dev *z = to_zorro_dev(dev);
|
||||
|
||||
@ -58,8 +58,27 @@ static ssize_t zorro_show_resource(struct device *dev, struct device_attribute *
|
||||
(unsigned long)zorro_resource_end(z),
|
||||
zorro_resource_flags(z));
|
||||
}
|
||||
static DEVICE_ATTR_RO(resource);
|
||||
|
||||
static DEVICE_ATTR(resource, S_IRUGO, zorro_show_resource, NULL);
|
||||
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct zorro_dev *z = to_zorro_dev(dev);
|
||||
|
||||
return sprintf(buf, ZORRO_DEVICE_MODALIAS_FMT "\n", z->id);
|
||||
}
|
||||
static DEVICE_ATTR_RO(modalias);
|
||||
|
||||
static struct attribute *zorro_device_attrs[] = {
|
||||
&dev_attr_id.attr,
|
||||
&dev_attr_type.attr,
|
||||
&dev_attr_serial.attr,
|
||||
&dev_attr_slotaddr.attr,
|
||||
&dev_attr_slotsize.attr,
|
||||
&dev_attr_resource.attr,
|
||||
&dev_attr_modalias.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
@ -88,32 +107,17 @@ static struct bin_attribute zorro_config_attr = {
|
||||
.read = zorro_read_config,
|
||||
};
|
||||
|
||||
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct zorro_dev *z = to_zorro_dev(dev);
|
||||
static struct bin_attribute *zorro_device_bin_attrs[] = {
|
||||
&zorro_config_attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
return sprintf(buf, ZORRO_DEVICE_MODALIAS_FMT "\n", z->id);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(modalias, S_IRUGO, modalias_show, NULL);
|
||||
|
||||
int zorro_create_sysfs_dev_files(struct zorro_dev *z)
|
||||
{
|
||||
struct device *dev = &z->dev;
|
||||
int error;
|
||||
|
||||
/* current configuration's attributes */
|
||||
if ((error = device_create_file(dev, &dev_attr_id)) ||
|
||||
(error = device_create_file(dev, &dev_attr_type)) ||
|
||||
(error = device_create_file(dev, &dev_attr_serial)) ||
|
||||
(error = device_create_file(dev, &dev_attr_slotaddr)) ||
|
||||
(error = device_create_file(dev, &dev_attr_slotsize)) ||
|
||||
(error = device_create_file(dev, &dev_attr_resource)) ||
|
||||
(error = device_create_file(dev, &dev_attr_modalias)) ||
|
||||
(error = sysfs_create_bin_file(&dev->kobj, &zorro_config_attr)))
|
||||
return error;
|
||||
|
||||
return 0;
|
||||
}
|
||||
static const struct attribute_group zorro_device_attr_group = {
|
||||
.attrs = zorro_device_attrs,
|
||||
.bin_attrs = zorro_device_bin_attrs,
|
||||
};
|
||||
|
||||
const struct attribute_group *zorro_device_attribute_groups[] = {
|
||||
&zorro_device_attr_group,
|
||||
NULL
|
||||
};
|
||||
|
@ -197,9 +197,6 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
|
||||
put_device(&z->dev);
|
||||
continue;
|
||||
}
|
||||
error = zorro_create_sysfs_dev_files(z);
|
||||
if (error)
|
||||
dev_err(&z->dev, "Error creating sysfs files\n");
|
||||
}
|
||||
|
||||
/* Mark all available Zorro II memory */
|
||||
|
@ -5,5 +5,4 @@ extern void zorro_name_device(struct zorro_dev *z);
|
||||
static inline void zorro_name_device(struct zorro_dev *dev) { }
|
||||
#endif
|
||||
|
||||
extern int zorro_create_sysfs_dev_files(struct zorro_dev *z);
|
||||
|
||||
extern const struct attribute_group *zorro_device_attribute_groups[];
|
||||
|
121
fs/block_dev.c
121
fs/block_dev.c
@ -18,6 +18,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/blkpg.h>
|
||||
#include <linux/magic.h>
|
||||
#include <linux/dax.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/pagevec.h>
|
||||
@ -717,50 +718,18 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bdev_write_page);
|
||||
|
||||
/**
|
||||
* bdev_direct_access() - Get the address for directly-accessibly memory
|
||||
* @bdev: The device containing the memory
|
||||
* @dax: control and output parameters for ->direct_access
|
||||
*
|
||||
* If a block device is made up of directly addressable memory, this function
|
||||
* will tell the caller the PFN and the address of the memory. The address
|
||||
* may be directly dereferenced within the kernel without the need to call
|
||||
* ioremap(), kmap() or similar. The PFN is suitable for inserting into
|
||||
* page tables.
|
||||
*
|
||||
* Return: negative errno if an error occurs, otherwise the number of bytes
|
||||
* accessible at this address.
|
||||
*/
|
||||
long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax)
|
||||
int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
|
||||
pgoff_t *pgoff)
|
||||
{
|
||||
sector_t sector = dax->sector;
|
||||
long avail, size = dax->size;
|
||||
const struct block_device_operations *ops = bdev->bd_disk->fops;
|
||||
phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512;
|
||||
|
||||
/*
|
||||
* The device driver is allowed to sleep, in order to make the
|
||||
* memory directly accessible.
|
||||
*/
|
||||
might_sleep();
|
||||
|
||||
if (size < 0)
|
||||
return size;
|
||||
if (!blk_queue_dax(bdev_get_queue(bdev)) || !ops->direct_access)
|
||||
return -EOPNOTSUPP;
|
||||
if ((sector + DIV_ROUND_UP(size, 512)) >
|
||||
part_nr_sects_read(bdev->bd_part))
|
||||
return -ERANGE;
|
||||
sector += get_start_sect(bdev);
|
||||
if (sector % (PAGE_SIZE / 512))
|
||||
if (pgoff)
|
||||
*pgoff = PHYS_PFN(phys_off);
|
||||
if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn, size);
|
||||
if (!avail)
|
||||
return -ERANGE;
|
||||
if (avail > 0 && avail & ~PAGE_MASK)
|
||||
return -ENXIO;
|
||||
return min(avail, size);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bdev_direct_access);
|
||||
EXPORT_SYMBOL(bdev_dax_pgoff);
|
||||
|
||||
/**
|
||||
* bdev_dax_supported() - Check if the device supports dax for filesystem
|
||||
@ -774,63 +743,47 @@ EXPORT_SYMBOL_GPL(bdev_direct_access);
|
||||
*/
|
||||
int bdev_dax_supported(struct super_block *sb, int blocksize)
|
||||
{
|
||||
struct blk_dax_ctl dax = {
|
||||
.sector = 0,
|
||||
.size = PAGE_SIZE,
|
||||
};
|
||||
int err;
|
||||
struct block_device *bdev = sb->s_bdev;
|
||||
struct dax_device *dax_dev;
|
||||
pgoff_t pgoff;
|
||||
int err, id;
|
||||
void *kaddr;
|
||||
pfn_t pfn;
|
||||
long len;
|
||||
|
||||
if (blocksize != PAGE_SIZE) {
|
||||
vfs_msg(sb, KERN_ERR, "error: unsupported blocksize for dax");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = bdev_direct_access(sb->s_bdev, &dax);
|
||||
if (err < 0) {
|
||||
switch (err) {
|
||||
case -EOPNOTSUPP:
|
||||
vfs_msg(sb, KERN_ERR,
|
||||
"error: device does not support dax");
|
||||
break;
|
||||
case -EINVAL:
|
||||
vfs_msg(sb, KERN_ERR,
|
||||
"error: unaligned partition for dax");
|
||||
break;
|
||||
default:
|
||||
vfs_msg(sb, KERN_ERR,
|
||||
"error: dax access failed (%d)", err);
|
||||
}
|
||||
err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
|
||||
if (err) {
|
||||
vfs_msg(sb, KERN_ERR, "error: unaligned partition for dax");
|
||||
return err;
|
||||
}
|
||||
|
||||
dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
|
||||
if (!dax_dev) {
|
||||
vfs_msg(sb, KERN_ERR, "error: device does not support dax");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
id = dax_read_lock();
|
||||
len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
|
||||
dax_read_unlock(id);
|
||||
|
||||
put_dax(dax_dev);
|
||||
|
||||
if (len < 1) {
|
||||
vfs_msg(sb, KERN_ERR,
|
||||
"error: dax access failed (%ld)", len);
|
||||
return len < 0 ? len : -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bdev_dax_supported);
|
||||
|
||||
/**
|
||||
* bdev_dax_capable() - Return if the raw device is capable for dax
|
||||
* @bdev: The device for raw block device access
|
||||
*/
|
||||
bool bdev_dax_capable(struct block_device *bdev)
|
||||
{
|
||||
struct blk_dax_ctl dax = {
|
||||
.size = PAGE_SIZE,
|
||||
};
|
||||
|
||||
if (!IS_ENABLED(CONFIG_FS_DAX))
|
||||
return false;
|
||||
|
||||
dax.sector = 0;
|
||||
if (bdev_direct_access(bdev, &dax) < 0)
|
||||
return false;
|
||||
|
||||
dax.sector = bdev->bd_part->nr_sects - (PAGE_SIZE / 512);
|
||||
if (bdev_direct_access(bdev, &dax) < 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* pseudo-fs
|
||||
*/
|
||||
|
@ -471,6 +471,85 @@ int cdev_add(struct cdev *p, dev_t dev, unsigned count)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdev_set_parent() - set the parent kobject for a char device
|
||||
* @p: the cdev structure
|
||||
* @kobj: the kobject to take a reference to
|
||||
*
|
||||
* cdev_set_parent() sets a parent kobject which will be referenced
|
||||
* appropriately so the parent is not freed before the cdev. This
|
||||
* should be called before cdev_add.
|
||||
*/
|
||||
void cdev_set_parent(struct cdev *p, struct kobject *kobj)
|
||||
{
|
||||
WARN_ON(!kobj->state_initialized);
|
||||
p->kobj.parent = kobj;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdev_device_add() - add a char device and it's corresponding
|
||||
* struct device, linkink
|
||||
* @dev: the device structure
|
||||
* @cdev: the cdev structure
|
||||
*
|
||||
* cdev_device_add() adds the char device represented by @cdev to the system,
|
||||
* just as cdev_add does. It then adds @dev to the system using device_add
|
||||
* The dev_t for the char device will be taken from the struct device which
|
||||
* needs to be initialized first. This helper function correctly takes a
|
||||
* reference to the parent device so the parent will not get released until
|
||||
* all references to the cdev are released.
|
||||
*
|
||||
* This helper uses dev->devt for the device number. If it is not set
|
||||
* it will not add the cdev and it will be equivalent to device_add.
|
||||
*
|
||||
* This function should be used whenever the struct cdev and the
|
||||
* struct device are members of the same structure whose lifetime is
|
||||
* managed by the struct device.
|
||||
*
|
||||
* NOTE: Callers must assume that userspace was able to open the cdev and
|
||||
* can call cdev fops callbacks at any time, even if this function fails.
|
||||
*/
|
||||
int cdev_device_add(struct cdev *cdev, struct device *dev)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (dev->devt) {
|
||||
cdev_set_parent(cdev, &dev->kobj);
|
||||
|
||||
rc = cdev_add(cdev, dev->devt, 1);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = device_add(dev);
|
||||
if (rc)
|
||||
cdev_del(cdev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdev_device_del() - inverse of cdev_device_add
|
||||
* @dev: the device structure
|
||||
* @cdev: the cdev structure
|
||||
*
|
||||
* cdev_device_del() is a helper function to call cdev_del and device_del.
|
||||
* It should be used whenever cdev_device_add is used.
|
||||
*
|
||||
* If dev->devt is not set it will not remove the cdev and will be equivalent
|
||||
* to device_del.
|
||||
*
|
||||
* NOTE: This guarantees that associated sysfs callbacks are not running
|
||||
* or runnable, however any cdevs already open will remain and their fops
|
||||
* will still be callable even after this function returns.
|
||||
*/
|
||||
void cdev_device_del(struct cdev *cdev, struct device *dev)
|
||||
{
|
||||
device_del(dev);
|
||||
if (dev->devt)
|
||||
cdev_del(cdev);
|
||||
}
|
||||
|
||||
static void cdev_unmap(dev_t dev, unsigned count)
|
||||
{
|
||||
kobj_unmap(cdev_map, dev, count);
|
||||
@ -482,6 +561,10 @@ static void cdev_unmap(dev_t dev, unsigned count)
|
||||
*
|
||||
* cdev_del() removes @p from the system, possibly freeing the structure
|
||||
* itself.
|
||||
*
|
||||
* NOTE: This guarantees that cdev device will no longer be able to be
|
||||
* opened, however any cdevs already open will remain and their fops will
|
||||
* still be callable even after cdev_del returns.
|
||||
*/
|
||||
void cdev_del(struct cdev *p)
|
||||
{
|
||||
@ -570,5 +653,8 @@ EXPORT_SYMBOL(cdev_init);
|
||||
EXPORT_SYMBOL(cdev_alloc);
|
||||
EXPORT_SYMBOL(cdev_del);
|
||||
EXPORT_SYMBOL(cdev_add);
|
||||
EXPORT_SYMBOL(cdev_set_parent);
|
||||
EXPORT_SYMBOL(cdev_device_add);
|
||||
EXPORT_SYMBOL(cdev_device_del);
|
||||
EXPORT_SYMBOL(__register_chrdev);
|
||||
EXPORT_SYMBOL(__unregister_chrdev);
|
||||
|
291
fs/dax.c
291
fs/dax.c
@ -55,32 +55,6 @@ static int __init init_dax_wait_table(void)
|
||||
}
|
||||
fs_initcall(init_dax_wait_table);
|
||||
|
||||
static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
|
||||
{
|
||||
struct request_queue *q = bdev->bd_queue;
|
||||
long rc = -EIO;
|
||||
|
||||
dax->addr = ERR_PTR(-EIO);
|
||||
if (blk_queue_enter(q, true) != 0)
|
||||
return rc;
|
||||
|
||||
rc = bdev_direct_access(bdev, dax);
|
||||
if (rc < 0) {
|
||||
dax->addr = ERR_PTR(rc);
|
||||
blk_queue_exit(q);
|
||||
return rc;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void dax_unmap_atomic(struct block_device *bdev,
|
||||
const struct blk_dax_ctl *dax)
|
||||
{
|
||||
if (IS_ERR(dax->addr))
|
||||
return;
|
||||
blk_queue_exit(bdev->bd_queue);
|
||||
}
|
||||
|
||||
static int dax_is_pmd_entry(void *entry)
|
||||
{
|
||||
return (unsigned long)entry & RADIX_DAX_PMD;
|
||||
@ -101,26 +75,6 @@ static int dax_is_empty_entry(void *entry)
|
||||
return (unsigned long)entry & RADIX_DAX_EMPTY;
|
||||
}
|
||||
|
||||
struct page *read_dax_sector(struct block_device *bdev, sector_t n)
|
||||
{
|
||||
struct page *page = alloc_pages(GFP_KERNEL, 0);
|
||||
struct blk_dax_ctl dax = {
|
||||
.size = PAGE_SIZE,
|
||||
.sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
|
||||
};
|
||||
long rc;
|
||||
|
||||
if (!page)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
rc = dax_map_atomic(bdev, &dax);
|
||||
if (rc < 0)
|
||||
return ERR_PTR(rc);
|
||||
memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
|
||||
dax_unmap_atomic(bdev, &dax);
|
||||
return page;
|
||||
}
|
||||
|
||||
/*
|
||||
* DAX radix tree locking
|
||||
*/
|
||||
@ -573,21 +527,30 @@ static int dax_load_hole(struct address_space *mapping, void **entry,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
|
||||
struct page *to, unsigned long vaddr)
|
||||
static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
|
||||
sector_t sector, size_t size, struct page *to,
|
||||
unsigned long vaddr)
|
||||
{
|
||||
struct blk_dax_ctl dax = {
|
||||
.sector = sector,
|
||||
.size = size,
|
||||
};
|
||||
void *vto;
|
||||
void *vto, *kaddr;
|
||||
pgoff_t pgoff;
|
||||
pfn_t pfn;
|
||||
long rc;
|
||||
int id;
|
||||
|
||||
if (dax_map_atomic(bdev, &dax) < 0)
|
||||
return PTR_ERR(dax.addr);
|
||||
rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
id = dax_read_lock();
|
||||
rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
|
||||
if (rc < 0) {
|
||||
dax_read_unlock(id);
|
||||
return rc;
|
||||
}
|
||||
vto = kmap_atomic(to);
|
||||
copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
|
||||
copy_user_page(vto, (void __force *)kaddr, vaddr, to);
|
||||
kunmap_atomic(vto);
|
||||
dax_unmap_atomic(bdev, &dax);
|
||||
dax_read_unlock(id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -755,12 +718,16 @@ unlock_pte:
|
||||
}
|
||||
|
||||
static int dax_writeback_one(struct block_device *bdev,
|
||||
struct address_space *mapping, pgoff_t index, void *entry)
|
||||
struct dax_device *dax_dev, struct address_space *mapping,
|
||||
pgoff_t index, void *entry)
|
||||
{
|
||||
struct radix_tree_root *page_tree = &mapping->page_tree;
|
||||
struct blk_dax_ctl dax;
|
||||
void *entry2, **slot;
|
||||
int ret = 0;
|
||||
void *entry2, **slot, *kaddr;
|
||||
long ret = 0, id;
|
||||
sector_t sector;
|
||||
pgoff_t pgoff;
|
||||
size_t size;
|
||||
pfn_t pfn;
|
||||
|
||||
/*
|
||||
* A page got tagged dirty in DAX mapping? Something is seriously
|
||||
@ -809,26 +776,29 @@ static int dax_writeback_one(struct block_device *bdev,
|
||||
* 'entry'. This allows us to flush for PMD_SIZE and not have to
|
||||
* worry about partial PMD writebacks.
|
||||
*/
|
||||
dax.sector = dax_radix_sector(entry);
|
||||
dax.size = PAGE_SIZE << dax_radix_order(entry);
|
||||
sector = dax_radix_sector(entry);
|
||||
size = PAGE_SIZE << dax_radix_order(entry);
|
||||
|
||||
id = dax_read_lock();
|
||||
ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
|
||||
if (ret)
|
||||
goto dax_unlock;
|
||||
|
||||
/*
|
||||
* We cannot hold tree_lock while calling dax_map_atomic() because it
|
||||
* eventually calls cond_resched().
|
||||
* dax_direct_access() may sleep, so cannot hold tree_lock over
|
||||
* its invocation.
|
||||
*/
|
||||
ret = dax_map_atomic(bdev, &dax);
|
||||
if (ret < 0) {
|
||||
put_locked_mapping_entry(mapping, index, entry);
|
||||
return ret;
|
||||
}
|
||||
ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn);
|
||||
if (ret < 0)
|
||||
goto dax_unlock;
|
||||
|
||||
if (WARN_ON_ONCE(ret < dax.size)) {
|
||||
if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) {
|
||||
ret = -EIO;
|
||||
goto unmap;
|
||||
goto dax_unlock;
|
||||
}
|
||||
|
||||
dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn));
|
||||
wb_cache_pmem(dax.addr, dax.size);
|
||||
dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
|
||||
wb_cache_pmem(kaddr, size);
|
||||
/*
|
||||
* After we have flushed the cache, we can clear the dirty tag. There
|
||||
* cannot be new dirty data in the pfn after the flush has completed as
|
||||
@ -838,8 +808,8 @@ static int dax_writeback_one(struct block_device *bdev,
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
unmap:
|
||||
dax_unmap_atomic(bdev, &dax);
|
||||
dax_unlock:
|
||||
dax_read_unlock(id);
|
||||
put_locked_mapping_entry(mapping, index, entry);
|
||||
return ret;
|
||||
|
||||
@ -860,6 +830,7 @@ int dax_writeback_mapping_range(struct address_space *mapping,
|
||||
struct inode *inode = mapping->host;
|
||||
pgoff_t start_index, end_index;
|
||||
pgoff_t indices[PAGEVEC_SIZE];
|
||||
struct dax_device *dax_dev;
|
||||
struct pagevec pvec;
|
||||
bool done = false;
|
||||
int i, ret = 0;
|
||||
@ -870,6 +841,10 @@ int dax_writeback_mapping_range(struct address_space *mapping,
|
||||
if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
|
||||
return 0;
|
||||
|
||||
dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
|
||||
if (!dax_dev)
|
||||
return -EIO;
|
||||
|
||||
start_index = wbc->range_start >> PAGE_SHIFT;
|
||||
end_index = wbc->range_end >> PAGE_SHIFT;
|
||||
|
||||
@ -890,38 +865,49 @@ int dax_writeback_mapping_range(struct address_space *mapping,
|
||||
break;
|
||||
}
|
||||
|
||||
ret = dax_writeback_one(bdev, mapping, indices[i],
|
||||
pvec.pages[i]);
|
||||
if (ret < 0)
|
||||
ret = dax_writeback_one(bdev, dax_dev, mapping,
|
||||
indices[i], pvec.pages[i]);
|
||||
if (ret < 0) {
|
||||
put_dax(dax_dev);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
put_dax(dax_dev);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
|
||||
|
||||
static int dax_insert_mapping(struct address_space *mapping,
|
||||
struct block_device *bdev, sector_t sector, size_t size,
|
||||
void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
struct block_device *bdev, struct dax_device *dax_dev,
|
||||
sector_t sector, size_t size, void **entryp,
|
||||
struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
{
|
||||
unsigned long vaddr = vmf->address;
|
||||
struct blk_dax_ctl dax = {
|
||||
.sector = sector,
|
||||
.size = size,
|
||||
};
|
||||
void *ret;
|
||||
void *entry = *entryp;
|
||||
void *ret, *kaddr;
|
||||
pgoff_t pgoff;
|
||||
int id, rc;
|
||||
pfn_t pfn;
|
||||
|
||||
if (dax_map_atomic(bdev, &dax) < 0)
|
||||
return PTR_ERR(dax.addr);
|
||||
dax_unmap_atomic(bdev, &dax);
|
||||
rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
|
||||
id = dax_read_lock();
|
||||
rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
|
||||
if (rc < 0) {
|
||||
dax_read_unlock(id);
|
||||
return rc;
|
||||
}
|
||||
dax_read_unlock(id);
|
||||
|
||||
ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0);
|
||||
if (IS_ERR(ret))
|
||||
return PTR_ERR(ret);
|
||||
*entryp = ret;
|
||||
|
||||
return vm_insert_mixed(vma, vaddr, dax.pfn);
|
||||
return vm_insert_mixed(vma, vaddr, pfn);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -970,24 +956,34 @@ static bool dax_range_is_aligned(struct block_device *bdev,
|
||||
return true;
|
||||
}
|
||||
|
||||
int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
|
||||
unsigned int offset, unsigned int length)
|
||||
int __dax_zero_page_range(struct block_device *bdev,
|
||||
struct dax_device *dax_dev, sector_t sector,
|
||||
unsigned int offset, unsigned int size)
|
||||
{
|
||||
struct blk_dax_ctl dax = {
|
||||
.sector = sector,
|
||||
.size = PAGE_SIZE,
|
||||
};
|
||||
|
||||
if (dax_range_is_aligned(bdev, offset, length)) {
|
||||
sector_t start_sector = dax.sector + (offset >> 9);
|
||||
if (dax_range_is_aligned(bdev, offset, size)) {
|
||||
sector_t start_sector = sector + (offset >> 9);
|
||||
|
||||
return blkdev_issue_zeroout(bdev, start_sector,
|
||||
length >> 9, GFP_NOFS, true);
|
||||
size >> 9, GFP_NOFS, true);
|
||||
} else {
|
||||
if (dax_map_atomic(bdev, &dax) < 0)
|
||||
return PTR_ERR(dax.addr);
|
||||
clear_pmem(dax.addr + offset, length);
|
||||
dax_unmap_atomic(bdev, &dax);
|
||||
pgoff_t pgoff;
|
||||
long rc, id;
|
||||
void *kaddr;
|
||||
pfn_t pfn;
|
||||
|
||||
rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
id = dax_read_lock();
|
||||
rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr,
|
||||
&pfn);
|
||||
if (rc < 0) {
|
||||
dax_read_unlock(id);
|
||||
return rc;
|
||||
}
|
||||
clear_pmem(kaddr + offset, size);
|
||||
dax_read_unlock(id);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -1002,9 +998,12 @@ static loff_t
|
||||
dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
||||
struct iomap *iomap)
|
||||
{
|
||||
struct block_device *bdev = iomap->bdev;
|
||||
struct dax_device *dax_dev = iomap->dax_dev;
|
||||
struct iov_iter *iter = data;
|
||||
loff_t end = pos + length, done = 0;
|
||||
ssize_t ret = 0;
|
||||
int id;
|
||||
|
||||
if (iov_iter_rw(iter) == READ) {
|
||||
end = min(end, i_size_read(inode));
|
||||
@ -1029,34 +1028,42 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
||||
(end - 1) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
id = dax_read_lock();
|
||||
while (pos < end) {
|
||||
unsigned offset = pos & (PAGE_SIZE - 1);
|
||||
struct blk_dax_ctl dax = { 0 };
|
||||
const size_t size = ALIGN(length + offset, PAGE_SIZE);
|
||||
const sector_t sector = dax_iomap_sector(iomap, pos);
|
||||
ssize_t map_len;
|
||||
pgoff_t pgoff;
|
||||
void *kaddr;
|
||||
pfn_t pfn;
|
||||
|
||||
if (fatal_signal_pending(current)) {
|
||||
ret = -EINTR;
|
||||
break;
|
||||
}
|
||||
|
||||
dax.sector = dax_iomap_sector(iomap, pos);
|
||||
dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
|
||||
map_len = dax_map_atomic(iomap->bdev, &dax);
|
||||
ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
|
||||
&kaddr, &pfn);
|
||||
if (map_len < 0) {
|
||||
ret = map_len;
|
||||
break;
|
||||
}
|
||||
|
||||
dax.addr += offset;
|
||||
map_len = PFN_PHYS(map_len);
|
||||
kaddr += offset;
|
||||
map_len -= offset;
|
||||
if (map_len > end - pos)
|
||||
map_len = end - pos;
|
||||
|
||||
if (iov_iter_rw(iter) == WRITE)
|
||||
map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
|
||||
map_len = copy_from_iter_pmem(kaddr, map_len, iter);
|
||||
else
|
||||
map_len = copy_to_iter(dax.addr, map_len, iter);
|
||||
dax_unmap_atomic(iomap->bdev, &dax);
|
||||
map_len = copy_to_iter(kaddr, map_len, iter);
|
||||
if (map_len <= 0) {
|
||||
ret = map_len ? map_len : -EFAULT;
|
||||
break;
|
||||
@ -1066,6 +1073,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
||||
length -= map_len;
|
||||
done += map_len;
|
||||
}
|
||||
dax_read_unlock(id);
|
||||
|
||||
return done ? done : ret;
|
||||
}
|
||||
@ -1172,8 +1180,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
|
||||
clear_user_highpage(vmf->cow_page, vaddr);
|
||||
break;
|
||||
case IOMAP_MAPPED:
|
||||
error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE,
|
||||
vmf->cow_page, vaddr);
|
||||
error = copy_user_dax(iomap.bdev, iomap.dax_dev,
|
||||
sector, PAGE_SIZE, vmf->cow_page, vaddr);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
@ -1198,8 +1206,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
|
||||
mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
|
||||
major = VM_FAULT_MAJOR;
|
||||
}
|
||||
error = dax_insert_mapping(mapping, iomap.bdev, sector,
|
||||
PAGE_SIZE, &entry, vmf->vma, vmf);
|
||||
error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev,
|
||||
sector, PAGE_SIZE, &entry, vmf->vma, vmf);
|
||||
/* -EBUSY is fine, somebody else faulted on the same PTE */
|
||||
if (error == -EBUSY)
|
||||
error = 0;
|
||||
@ -1249,41 +1257,48 @@ static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
|
||||
loff_t pos, void **entryp)
|
||||
{
|
||||
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
||||
const sector_t sector = dax_iomap_sector(iomap, pos);
|
||||
struct dax_device *dax_dev = iomap->dax_dev;
|
||||
struct block_device *bdev = iomap->bdev;
|
||||
struct inode *inode = mapping->host;
|
||||
struct blk_dax_ctl dax = {
|
||||
.sector = dax_iomap_sector(iomap, pos),
|
||||
.size = PMD_SIZE,
|
||||
};
|
||||
long length = dax_map_atomic(bdev, &dax);
|
||||
void *ret = NULL;
|
||||
const size_t size = PMD_SIZE;
|
||||
void *ret = NULL, *kaddr;
|
||||
long length = 0;
|
||||
pgoff_t pgoff;
|
||||
pfn_t pfn;
|
||||
int id;
|
||||
|
||||
if (length < 0) /* dax_map_atomic() failed */
|
||||
if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0)
|
||||
goto fallback;
|
||||
if (length < PMD_SIZE)
|
||||
goto unmap_fallback;
|
||||
if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
|
||||
goto unmap_fallback;
|
||||
if (!pfn_t_devmap(dax.pfn))
|
||||
goto unmap_fallback;
|
||||
|
||||
dax_unmap_atomic(bdev, &dax);
|
||||
id = dax_read_lock();
|
||||
length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
|
||||
if (length < 0)
|
||||
goto unlock_fallback;
|
||||
length = PFN_PHYS(length);
|
||||
|
||||
ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
|
||||
if (length < size)
|
||||
goto unlock_fallback;
|
||||
if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR)
|
||||
goto unlock_fallback;
|
||||
if (!pfn_t_devmap(pfn))
|
||||
goto unlock_fallback;
|
||||
dax_read_unlock(id);
|
||||
|
||||
ret = dax_insert_mapping_entry(mapping, vmf, *entryp, sector,
|
||||
RADIX_DAX_PMD);
|
||||
if (IS_ERR(ret))
|
||||
goto fallback;
|
||||
*entryp = ret;
|
||||
|
||||
trace_dax_pmd_insert_mapping(inode, vmf, length, dax.pfn, ret);
|
||||
trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret);
|
||||
return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
|
||||
dax.pfn, vmf->flags & FAULT_FLAG_WRITE);
|
||||
pfn, vmf->flags & FAULT_FLAG_WRITE);
|
||||
|
||||
unmap_fallback:
|
||||
dax_unmap_atomic(bdev, &dax);
|
||||
unlock_fallback:
|
||||
dax_read_unlock(id);
|
||||
fallback:
|
||||
trace_dax_pmd_insert_mapping_fallback(inode, vmf, length,
|
||||
dax.pfn, ret);
|
||||
trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret);
|
||||
return VM_FAULT_FALLBACK;
|
||||
}
|
||||
|
||||
|
@ -799,6 +799,7 @@ int ext2_get_block(struct inode *inode, sector_t iblock,
|
||||
static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
|
||||
unsigned flags, struct iomap *iomap)
|
||||
{
|
||||
struct block_device *bdev;
|
||||
unsigned int blkbits = inode->i_blkbits;
|
||||
unsigned long first_block = offset >> blkbits;
|
||||
unsigned long max_blocks = (length + (1 << blkbits) - 1) >> blkbits;
|
||||
@ -812,8 +813,13 @@ static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
|
||||
return ret;
|
||||
|
||||
iomap->flags = 0;
|
||||
iomap->bdev = inode->i_sb->s_bdev;
|
||||
bdev = inode->i_sb->s_bdev;
|
||||
iomap->bdev = bdev;
|
||||
iomap->offset = (u64)first_block << blkbits;
|
||||
if (blk_queue_dax(bdev->bd_queue))
|
||||
iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
|
||||
else
|
||||
iomap->dax_dev = NULL;
|
||||
|
||||
if (ret == 0) {
|
||||
iomap->type = IOMAP_HOLE;
|
||||
@ -835,6 +841,7 @@ static int
|
||||
ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length,
|
||||
ssize_t written, unsigned flags, struct iomap *iomap)
|
||||
{
|
||||
put_dax(iomap->dax_dev);
|
||||
if (iomap->type == IOMAP_MAPPED &&
|
||||
written < length &&
|
||||
(flags & IOMAP_WRITE))
|
||||
|
@ -3305,6 +3305,7 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
|
||||
static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
|
||||
unsigned flags, struct iomap *iomap)
|
||||
{
|
||||
struct block_device *bdev;
|
||||
unsigned int blkbits = inode->i_blkbits;
|
||||
unsigned long first_block = offset >> blkbits;
|
||||
unsigned long last_block = (offset + length - 1) >> blkbits;
|
||||
@ -3373,7 +3374,12 @@ retry:
|
||||
}
|
||||
|
||||
iomap->flags = 0;
|
||||
iomap->bdev = inode->i_sb->s_bdev;
|
||||
bdev = inode->i_sb->s_bdev;
|
||||
iomap->bdev = bdev;
|
||||
if (blk_queue_dax(bdev->bd_queue))
|
||||
iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
|
||||
else
|
||||
iomap->dax_dev = NULL;
|
||||
iomap->offset = first_block << blkbits;
|
||||
|
||||
if (ret == 0) {
|
||||
@ -3406,6 +3412,7 @@ static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
|
||||
int blkbits = inode->i_blkbits;
|
||||
bool truncate = false;
|
||||
|
||||
put_dax(iomap->dax_dev);
|
||||
if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT))
|
||||
return 0;
|
||||
|
||||
|
@ -360,7 +360,8 @@ static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
|
||||
sector_t sector = iomap->blkno +
|
||||
(((pos & ~(PAGE_SIZE - 1)) - iomap->offset) >> 9);
|
||||
|
||||
return __dax_zero_page_range(iomap->bdev, sector, offset, bytes);
|
||||
return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, sector,
|
||||
offset, bytes);
|
||||
}
|
||||
|
||||
static loff_t
|
||||
|
@ -976,6 +976,7 @@ xfs_file_iomap_begin(
|
||||
int nimaps = 1, error = 0;
|
||||
bool shared = false, trimmed = false;
|
||||
unsigned lockmode;
|
||||
struct block_device *bdev;
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return -EIO;
|
||||
@ -1063,6 +1064,14 @@ xfs_file_iomap_begin(
|
||||
}
|
||||
|
||||
xfs_bmbt_to_iomap(ip, iomap, &imap);
|
||||
|
||||
/* optionally associate a dax device with the iomap bdev */
|
||||
bdev = iomap->bdev;
|
||||
if (blk_queue_dax(bdev->bd_queue))
|
||||
iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
|
||||
else
|
||||
iomap->dax_dev = NULL;
|
||||
|
||||
if (shared)
|
||||
iomap->flags |= IOMAP_F_SHARED;
|
||||
return 0;
|
||||
@ -1140,6 +1149,7 @@ xfs_file_iomap_end(
|
||||
unsigned flags,
|
||||
struct iomap *iomap)
|
||||
{
|
||||
put_dax(iomap->dax_dev);
|
||||
if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
|
||||
return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
|
||||
length, written, iomap);
|
||||
|
@ -1916,28 +1916,12 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
|
||||
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
/**
|
||||
* struct blk_dax_ctl - control and output parameters for ->direct_access
|
||||
* @sector: (input) offset relative to a block_device
|
||||
* @addr: (output) kernel virtual address for @sector populated by driver
|
||||
* @pfn: (output) page frame number for @addr populated by driver
|
||||
* @size: (input) number of bytes requested
|
||||
*/
|
||||
struct blk_dax_ctl {
|
||||
sector_t sector;
|
||||
void *addr;
|
||||
long size;
|
||||
pfn_t pfn;
|
||||
};
|
||||
|
||||
struct block_device_operations {
|
||||
int (*open) (struct block_device *, fmode_t);
|
||||
void (*release) (struct gendisk *, fmode_t);
|
||||
int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
|
||||
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
|
||||
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
|
||||
long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
|
||||
long);
|
||||
unsigned int (*check_events) (struct gendisk *disk,
|
||||
unsigned int clearing);
|
||||
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
|
||||
@ -1956,9 +1940,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
|
||||
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
|
||||
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
|
||||
struct writeback_control *);
|
||||
extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
|
||||
extern int bdev_dax_supported(struct super_block *, int);
|
||||
extern bool bdev_dax_capable(struct block_device *);
|
||||
int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
|
||||
#else /* CONFIG_BLOCK */
|
||||
|
||||
struct block_device;
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
struct file_operations;
|
||||
struct inode;
|
||||
@ -26,6 +27,10 @@ void cdev_put(struct cdev *p);
|
||||
|
||||
int cdev_add(struct cdev *, dev_t, unsigned);
|
||||
|
||||
void cdev_set_parent(struct cdev *p, struct kobject *kobj);
|
||||
int cdev_device_add(struct cdev *cdev, struct device *dev);
|
||||
void cdev_device_del(struct cdev *cdev, struct device *dev);
|
||||
|
||||
void cdev_del(struct cdev *);
|
||||
|
||||
void cd_forget(struct inode *);
|
||||
|
@ -7,6 +7,28 @@
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
struct iomap_ops;
|
||||
struct dax_device;
|
||||
struct dax_operations {
|
||||
/*
|
||||
* direct_access: translate a device-relative
|
||||
* logical-page-offset into an absolute physical pfn. Return the
|
||||
* number of pages available for DAX at that pfn.
|
||||
*/
|
||||
long (*direct_access)(struct dax_device *, pgoff_t, long,
|
||||
void **, pfn_t *);
|
||||
};
|
||||
|
||||
int dax_read_lock(void);
|
||||
void dax_read_unlock(int id);
|
||||
struct dax_device *dax_get_by_host(const char *host);
|
||||
struct dax_device *alloc_dax(void *private, const char *host,
|
||||
const struct dax_operations *ops);
|
||||
void put_dax(struct dax_device *dax_dev);
|
||||
bool dax_alive(struct dax_device *dax_dev);
|
||||
void kill_dax(struct dax_device *dax_dev);
|
||||
void *dax_get_private(struct dax_device *dax_dev);
|
||||
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
||||
void **kaddr, pfn_t *pfn);
|
||||
|
||||
/*
|
||||
* We use lowest available bit in exceptional entry for locking, one bit for
|
||||
@ -48,17 +70,13 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
|
||||
pgoff_t index, void *entry, bool wake_all);
|
||||
|
||||
#ifdef CONFIG_FS_DAX
|
||||
struct page *read_dax_sector(struct block_device *bdev, sector_t n);
|
||||
int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
|
||||
int __dax_zero_page_range(struct block_device *bdev,
|
||||
struct dax_device *dax_dev, sector_t sector,
|
||||
unsigned int offset, unsigned int length);
|
||||
#else
|
||||
static inline struct page *read_dax_sector(struct block_device *bdev,
|
||||
sector_t n)
|
||||
{
|
||||
return ERR_PTR(-ENXIO);
|
||||
}
|
||||
static inline int __dax_zero_page_range(struct block_device *bdev,
|
||||
sector_t sector, unsigned int offset, unsigned int length)
|
||||
struct dax_device *dax_dev, sector_t sector,
|
||||
unsigned int offset, unsigned int length)
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
|
@ -128,13 +128,15 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
|
||||
* < 0 : error
|
||||
* >= 0 : the number of bytes accessible at the address
|
||||
*/
|
||||
typedef long (*dm_direct_access_fn) (struct dm_target *ti, sector_t sector,
|
||||
void **kaddr, pfn_t *pfn, long size);
|
||||
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
|
||||
long nr_pages, void **kaddr, pfn_t *pfn);
|
||||
#define PAGE_SECTORS (PAGE_SIZE / 512)
|
||||
|
||||
void dm_error(const char *message);
|
||||
|
||||
struct dm_dev {
|
||||
struct block_device *bdev;
|
||||
struct dax_device *dax_dev;
|
||||
fmode_t mode;
|
||||
char name[16];
|
||||
};
|
||||
@ -176,7 +178,7 @@ struct target_type {
|
||||
dm_busy_fn busy;
|
||||
dm_iterate_devices_fn iterate_devices;
|
||||
dm_io_hints_fn io_hints;
|
||||
dm_direct_access_fn direct_access;
|
||||
dm_dax_direct_access_fn direct_access;
|
||||
|
||||
/* For internal device-mapper use. */
|
||||
struct list_head list;
|
||||
|
@ -70,6 +70,7 @@ enum fpga_mgr_states {
|
||||
*/
|
||||
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
|
||||
#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
|
||||
#define FPGA_MGR_ENCRYPTED_BITSTREAM BIT(2)
|
||||
|
||||
/**
|
||||
* struct fpga_image_info - information specific to a FPGA image
|
||||
|
@ -491,6 +491,12 @@ struct vmbus_channel_rescind_offer {
|
||||
u32 child_relid;
|
||||
} __packed;
|
||||
|
||||
static inline u32
|
||||
hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
|
||||
{
|
||||
return rbi->ring_buffer->pending_send_sz;
|
||||
}
|
||||
|
||||
/*
|
||||
* Request Offer -- no parameters, SynIC message contains the partition ID
|
||||
* Set Snoop -- no parameters, SynIC message contains the partition ID
|
||||
@ -524,10 +530,10 @@ struct vmbus_channel_open_channel {
|
||||
u32 target_vp;
|
||||
|
||||
/*
|
||||
* The upstream ring buffer begins at offset zero in the memory
|
||||
* described by RingBufferGpadlHandle. The downstream ring buffer
|
||||
* follows it at this offset (in pages).
|
||||
*/
|
||||
* The upstream ring buffer begins at offset zero in the memory
|
||||
* described by RingBufferGpadlHandle. The downstream ring buffer
|
||||
* follows it at this offset (in pages).
|
||||
*/
|
||||
u32 downstream_ringbuffer_pageoffset;
|
||||
|
||||
/* User-specific data to be passed along to the server endpoint. */
|
||||
@ -1013,7 +1019,7 @@ extern int vmbus_open(struct vmbus_channel *channel,
|
||||
u32 recv_ringbuffersize,
|
||||
void *userdata,
|
||||
u32 userdatalen,
|
||||
void(*onchannel_callback)(void *context),
|
||||
void (*onchannel_callback)(void *context),
|
||||
void *context);
|
||||
|
||||
extern void vmbus_close(struct vmbus_channel *channel);
|
||||
@ -1155,6 +1161,17 @@ static inline void *hv_get_drvdata(struct hv_device *dev)
|
||||
return dev_get_drvdata(&dev->device);
|
||||
}
|
||||
|
||||
struct hv_ring_buffer_debug_info {
|
||||
u32 current_interrupt_mask;
|
||||
u32 current_read_index;
|
||||
u32 current_write_index;
|
||||
u32 bytes_avail_toread;
|
||||
u32 bytes_avail_towrite;
|
||||
};
|
||||
|
||||
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||
struct hv_ring_buffer_debug_info *debug_info);
|
||||
|
||||
/* Vmbus interface */
|
||||
#define vmbus_driver_register(driver) \
|
||||
__vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
|
||||
@ -1428,7 +1445,7 @@ struct hyperv_service_callback {
|
||||
char *log_msg;
|
||||
uuid_le data;
|
||||
struct vmbus_channel *channel;
|
||||
void (*callback) (void *context);
|
||||
void (*callback)(void *context);
|
||||
};
|
||||
|
||||
#define MAX_SRV_VER 0x7ffffff
|
||||
@ -1504,8 +1521,6 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel)
|
||||
cached_write_sz = hv_get_cached_bytes_to_write(rbi);
|
||||
if (cached_write_sz < pending_sz)
|
||||
vmbus_setevent(channel);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user