2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-25 21:54:06 +08:00
linux-next/drivers/firewire/core.h
Linus Torvalds 2f78d8e249 IEEE 1394 (FireWire) subsystem updates post v3.4:
- Fix mismatch between DMA mapping direction (was wrong) and DMA synchronization
     direction (was correct) of isochronous reception buffers of userspace drivers
     if vma-mapped for R/W access.  For example, libdc1394 was affected.
 
   - more consistent retry stategy in device discovery/ rediscovery, and improved
     failure diagnostics
 
   - various small cleanups, e.g. use SCSI layer's DMA mapping API in firewire-sbp2
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.17 (GNU/Linux)
 
 iQIcBAABAgAGBQJPvoOWAAoJEHnzb7JUXXnQpG4P/j4u9kU+MDegDrRsAzZBdP+6
 in2FW8opUzSXZyLSzU/JYcO7P0luEEdLIE65fkaVRbP1x6m7aAjgaAO/0zbqZQPD
 QVyIAOPa8zcbEIi1cEnI75KU/AMyQpMk/eU9Et4FrbQszgyFvJNe+hS+MocMooPt
 vukqmWJuIPfH/G3FsX0eQWEXS7GktkYxmLJM/k6z4JpngAVmelM/Ce45VFg7tbN+
 IeV5N7+FF4zKmwgiKIpkw5tSO6If7mjoQQVH/Q8DiHJDhFlDUrKR9/TvaJesVtEg
 5n97BbfvXc8mu6EE8lbl1ats2R1I00JDpMylHPt4J6G1XDjb1/GvqSjXYXwPT77J
 O4xZwqT4C5JKnG9h2a4npf6PgDHBbMnUK9cDBxfA5ZPkTeZmGXWuP05zYJqeNJm9
 UrEusEjzEEspVWFqxOD+2/YLF/0rxqA/P1C7Luuw7Y9nIcCt3VskuW6iebYuf5Dk
 yNoykYVph8zDJE+Am7jtsgNCPfd4TomFttjMcc/0EVqNa1iVO/AuLlvYv+Ab75oU
 BpOUEg8Pq9nQeya3XjM585R0rF4PG5wsVrBe2ZzJ+cqNe6JH264/YuBgFv1bkUg3
 UtHNIG633KAk71P1jl7PeuvAQksLnCYak2JOviZ/oB79Yamoxifhcm4mNkwTeCv7
 paRqDNAzoaThqvqx723E
 =fulr
 -----END PGP SIGNATURE-----

Merge tag 'firewire-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394

Pull IEEE 1394 (FireWire) subsystem updates from Stefan Richter:

 - Fix mismatch between DMA mapping direction (was wrong) and DMA
   synchronization direction (was correct) of isochronous reception
   buffers of userspace drivers if vma-mapped for R/W access.  For
   example, libdc1394 was affected.

 - more consistent retry stategy in device discovery/ rediscovery, and
   improved failure diagnostics

 - various small cleanups, e.g. use SCSI layer's DMA mapping API in
   firewire-sbp2

* tag 'firewire-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394:
  firewire: sbp2: document the absence of alignment requirements
  firewire: sbp2: remove superfluous blk_queue_max_segment_size() call
  firewire: sbp2: use scsi_dma_(un)map
  firewire: sbp2: give correct DMA device to scsi framework
  firewire: core: fw_device_refresh(): clean up error handling
  firewire: core: log config rom reading errors
  firewire: core: log error in case of failed bus manager lock
  firewire: move rcode_string() to core
  firewire: core: improve reread_config_rom() interface
  firewire: core: wait for inaccessible devices after bus reset
  firewire: ohci: omit spinlock IRQ flags where possible
  firewire: ohci: correct signedness of a local variable
  firewire: core: fix DMA mapping direction
  firewire: use module_pci_driver
2012-05-24 12:57:47 -07:00

257 lines
7.3 KiB
C

#ifndef _FIREWIRE_CORE_H
#define _FIREWIRE_CORE_H
#include <linux/compiler.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/idr.h>
#include <linux/mm_types.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/atomic.h>
struct device;
struct fw_card;
struct fw_device;
struct fw_iso_buffer;
struct fw_iso_context;
struct fw_iso_packet;
struct fw_node;
struct fw_packet;
/* -card */
extern __printf(2, 3)
void fw_err(const struct fw_card *card, const char *fmt, ...);
extern __printf(2, 3)
void fw_notice(const struct fw_card *card, const char *fmt, ...);
/* bitfields within the PHY registers */
#define PHY_LINK_ACTIVE 0x80
#define PHY_CONTENDER 0x40
#define PHY_BUS_RESET 0x40
#define PHY_EXTENDED_REGISTERS 0xe0
#define PHY_BUS_SHORT_RESET 0x40
#define PHY_INT_STATUS_BITS 0x3c
#define PHY_ENABLE_ACCEL 0x02
#define PHY_ENABLE_MULTI 0x01
#define PHY_PAGE_SELECT 0xe0
#define BANDWIDTH_AVAILABLE_INITIAL 4915
#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
#define BROADCAST_CHANNEL_VALID (1 << 30)
#define CSR_STATE_BIT_CMSTR (1 << 8)
#define CSR_STATE_BIT_ABDICATE (1 << 10)
struct fw_card_driver {
/*
* Enable the given card with the given initial config rom.
* This function is expected to activate the card, and either
* enable the PHY or set the link_on bit and initiate a bus
* reset.
*/
int (*enable)(struct fw_card *card,
const __be32 *config_rom, size_t length);
int (*read_phy_reg)(struct fw_card *card, int address);
int (*update_phy_reg)(struct fw_card *card, int address,
int clear_bits, int set_bits);
/*
* Update the config rom for an enabled card. This function
* should change the config rom that is presented on the bus
* and initiate a bus reset.
*/
int (*set_config_rom)(struct fw_card *card,
const __be32 *config_rom, size_t length);
void (*send_request)(struct fw_card *card, struct fw_packet *packet);
void (*send_response)(struct fw_card *card, struct fw_packet *packet);
/* Calling cancel is valid once a packet has been submitted. */
int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
/*
* Allow the specified node ID to do direct DMA out and in of
* host memory. The card will disable this for all node when
* a bus reset happens, so driver need to reenable this after
* bus reset. Returns 0 on success, -ENODEV if the card
* doesn't support this, -ESTALE if the generation doesn't
* match.
*/
int (*enable_phys_dma)(struct fw_card *card,
int node_id, int generation);
u32 (*read_csr)(struct fw_card *card, int csr_offset);
void (*write_csr)(struct fw_card *card, int csr_offset, u32 value);
struct fw_iso_context *
(*allocate_iso_context)(struct fw_card *card,
int type, int channel, size_t header_size);
void (*free_iso_context)(struct fw_iso_context *ctx);
int (*start_iso)(struct fw_iso_context *ctx,
s32 cycle, u32 sync, u32 tags);
int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels);
int (*queue_iso)(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload);
void (*flush_queue_iso)(struct fw_iso_context *ctx);
int (*flush_iso_completions)(struct fw_iso_context *ctx);
int (*stop_iso)(struct fw_iso_context *ctx);
};
void fw_card_initialize(struct fw_card *card,
const struct fw_card_driver *driver, struct device *device);
int fw_card_add(struct fw_card *card,
u32 max_receive, u32 link_speed, u64 guid);
void fw_core_remove_card(struct fw_card *card);
int fw_compute_block_crc(__be32 *block);
void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset);
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
/* -cdev */
extern const struct file_operations fw_device_ops;
void fw_device_cdev_update(struct fw_device *device);
void fw_device_cdev_remove(struct fw_device *device);
void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p);
/* -device */
extern struct rw_semaphore fw_device_rwsem;
extern struct idr fw_device_idr;
extern int fw_cdev_major;
static inline struct fw_device *fw_device_get(struct fw_device *device)
{
get_device(&device->device);
return device;
}
static inline void fw_device_put(struct fw_device *device)
{
put_device(&device->device);
}
struct fw_device *fw_device_get_by_devt(dev_t devt);
int fw_device_set_broadcast_channel(struct device *dev, void *gen);
void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
/* -iso */
int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
enum dma_data_direction direction);
int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
struct vm_area_struct *vma);
/* -topology */
enum {
FW_NODE_CREATED,
FW_NODE_UPDATED,
FW_NODE_DESTROYED,
FW_NODE_LINK_ON,
FW_NODE_LINK_OFF,
FW_NODE_INITIATED_RESET,
};
struct fw_node {
u16 node_id;
u8 color;
u8 port_count;
u8 link_on:1;
u8 initiated_reset:1;
u8 b_path:1;
u8 phy_speed:2; /* As in the self ID packet. */
u8 max_speed:2; /* Minimum of all phy-speeds on the path from the
* local node to this node. */
u8 max_depth:4; /* Maximum depth to any leaf node */
u8 max_hops:4; /* Max hops in this sub tree */
atomic_t ref_count;
/* For serializing node topology into a list. */
struct list_head link;
/* Upper layer specific data. */
void *data;
struct fw_node *ports[0];
};
static inline struct fw_node *fw_node_get(struct fw_node *node)
{
atomic_inc(&node->ref_count);
return node;
}
static inline void fw_node_put(struct fw_node *node)
{
if (atomic_dec_and_test(&node->ref_count))
kfree(node);
}
void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
int generation, int self_id_count, u32 *self_ids, bool bm_abdicate);
void fw_destroy_nodes(struct fw_card *card);
/*
* Check whether new_generation is the immediate successor of old_generation.
* Take counter roll-over at 255 (as per OHCI) into account.
*/
static inline bool is_next_generation(int new_generation, int old_generation)
{
return (new_generation & 0xff) == ((old_generation + 1) & 0xff);
}
/* -transaction */
#define TCODE_LINK_INTERNAL 0xe
#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
#define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == TCODE_LINK_INTERNAL)
#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
#define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
#define LOCAL_BUS 0xffc0
void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
int fw_get_response_length(struct fw_request *request);
void fw_fill_response(struct fw_packet *response, u32 *request_header,
int rcode, void *payload, size_t length);
#define FW_PHY_CONFIG_NO_NODE_ID -1
#define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count);
static inline bool is_ping_packet(u32 *data)
{
return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1];
}
#endif /* _FIREWIRE_CORE_H */