1 #ifndef _FIREWIRE_CORE_H 2 #define _FIREWIRE_CORE_H 3 4 #include <linux/compiler.h> 5 #include <linux/device.h> 6 #include <linux/dma-mapping.h> 7 #include <linux/fs.h> 8 #include <linux/list.h> 9 #include <linux/idr.h> 10 #include <linux/mm_types.h> 11 #include <linux/rwsem.h> 12 #include <linux/slab.h> 13 #include <linux/types.h> 14 15 #include <linux/atomic.h> 16 17 struct device; 18 struct fw_card; 19 struct fw_device; 20 struct fw_iso_buffer; 21 struct fw_iso_context; 22 struct fw_iso_packet; 23 struct fw_node; 24 struct fw_packet; 25 26 27 /* -card */ 28 29 extern __printf(2, 3) 30 void fw_err(const struct fw_card *card, const char *fmt, ...); 31 extern __printf(2, 3) 32 void fw_notice(const struct fw_card *card, const char *fmt, ...); 33 34 /* bitfields within the PHY registers */ 35 #define PHY_LINK_ACTIVE 0x80 36 #define PHY_CONTENDER 0x40 37 #define PHY_BUS_RESET 0x40 38 #define PHY_EXTENDED_REGISTERS 0xe0 39 #define PHY_BUS_SHORT_RESET 0x40 40 #define PHY_INT_STATUS_BITS 0x3c 41 #define PHY_ENABLE_ACCEL 0x02 42 #define PHY_ENABLE_MULTI 0x01 43 #define PHY_PAGE_SELECT 0xe0 44 45 #define BANDWIDTH_AVAILABLE_INITIAL 4915 46 #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) 47 #define BROADCAST_CHANNEL_VALID (1 << 30) 48 49 #define CSR_STATE_BIT_CMSTR (1 << 8) 50 #define CSR_STATE_BIT_ABDICATE (1 << 10) 51 52 struct fw_card_driver { 53 /* 54 * Enable the given card with the given initial config rom. 55 * This function is expected to activate the card, and either 56 * enable the PHY or set the link_on bit and initiate a bus 57 * reset. 58 */ 59 int (*enable)(struct fw_card *card, 60 const __be32 *config_rom, size_t length); 61 62 int (*read_phy_reg)(struct fw_card *card, int address); 63 int (*update_phy_reg)(struct fw_card *card, int address, 64 int clear_bits, int set_bits); 65 66 /* 67 * Update the config rom for an enabled card. This function 68 * should change the config rom that is presented on the bus 69 * and initiate a bus reset. 70 */ 71 int (*set_config_rom)(struct fw_card *card, 72 const __be32 *config_rom, size_t length); 73 74 void (*send_request)(struct fw_card *card, struct fw_packet *packet); 75 void (*send_response)(struct fw_card *card, struct fw_packet *packet); 76 /* Calling cancel is valid once a packet has been submitted. */ 77 int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet); 78 79 /* 80 * Allow the specified node ID to do direct DMA out and in of 81 * host memory. The card will disable this for all node when 82 * a bus reset happens, so driver need to reenable this after 83 * bus reset. Returns 0 on success, -ENODEV if the card 84 * doesn't support this, -ESTALE if the generation doesn't 85 * match. 86 */ 87 int (*enable_phys_dma)(struct fw_card *card, 88 int node_id, int generation); 89 90 u32 (*read_csr)(struct fw_card *card, int csr_offset); 91 void (*write_csr)(struct fw_card *card, int csr_offset, u32 value); 92 93 struct fw_iso_context * 94 (*allocate_iso_context)(struct fw_card *card, 95 int type, int channel, size_t header_size); 96 void (*free_iso_context)(struct fw_iso_context *ctx); 97 98 int (*start_iso)(struct fw_iso_context *ctx, 99 s32 cycle, u32 sync, u32 tags); 100 101 int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels); 102 103 int (*queue_iso)(struct fw_iso_context *ctx, 104 struct fw_iso_packet *packet, 105 struct fw_iso_buffer *buffer, 106 unsigned long payload); 107 108 void (*flush_queue_iso)(struct fw_iso_context *ctx); 109 110 int (*flush_iso_completions)(struct fw_iso_context *ctx); 111 112 int (*stop_iso)(struct fw_iso_context *ctx); 113 }; 114 115 void fw_card_initialize(struct fw_card *card, 116 const struct fw_card_driver *driver, struct device *device); 117 int fw_card_add(struct fw_card *card, 118 u32 max_receive, u32 link_speed, u64 guid); 119 void fw_core_remove_card(struct fw_card *card); 120 int fw_compute_block_crc(__be32 *block); 121 void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset); 122 void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); 123 124 /* -cdev */ 125 126 extern const struct file_operations fw_device_ops; 127 128 void fw_device_cdev_update(struct fw_device *device); 129 void fw_device_cdev_remove(struct fw_device *device); 130 void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p); 131 132 133 /* -device */ 134 135 extern struct rw_semaphore fw_device_rwsem; 136 extern struct idr fw_device_idr; 137 extern int fw_cdev_major; 138 139 static inline struct fw_device *fw_device_get(struct fw_device *device) 140 { 141 get_device(&device->device); 142 143 return device; 144 } 145 146 static inline void fw_device_put(struct fw_device *device) 147 { 148 put_device(&device->device); 149 } 150 151 struct fw_device *fw_device_get_by_devt(dev_t devt); 152 int fw_device_set_broadcast_channel(struct device *dev, void *gen); 153 void fw_node_event(struct fw_card *card, struct fw_node *node, int event); 154 155 156 /* -iso */ 157 158 int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count); 159 int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card, 160 enum dma_data_direction direction); 161 int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer, 162 struct vm_area_struct *vma); 163 164 165 /* -topology */ 166 167 enum { 168 FW_NODE_CREATED, 169 FW_NODE_UPDATED, 170 FW_NODE_DESTROYED, 171 FW_NODE_LINK_ON, 172 FW_NODE_LINK_OFF, 173 FW_NODE_INITIATED_RESET, 174 }; 175 176 struct fw_node { 177 u16 node_id; 178 u8 color; 179 u8 port_count; 180 u8 link_on:1; 181 u8 initiated_reset:1; 182 u8 b_path:1; 183 u8 phy_speed:2; /* As in the self ID packet. */ 184 u8 max_speed:2; /* Minimum of all phy-speeds on the path from the 185 * local node to this node. */ 186 u8 max_depth:4; /* Maximum depth to any leaf node */ 187 u8 max_hops:4; /* Max hops in this sub tree */ 188 atomic_t ref_count; 189 190 /* For serializing node topology into a list. */ 191 struct list_head link; 192 193 /* Upper layer specific data. */ 194 void *data; 195 196 struct fw_node *ports[0]; 197 }; 198 199 static inline struct fw_node *fw_node_get(struct fw_node *node) 200 { 201 atomic_inc(&node->ref_count); 202 203 return node; 204 } 205 206 static inline void fw_node_put(struct fw_node *node) 207 { 208 if (atomic_dec_and_test(&node->ref_count)) 209 kfree(node); 210 } 211 212 void fw_core_handle_bus_reset(struct fw_card *card, int node_id, 213 int generation, int self_id_count, u32 *self_ids, bool bm_abdicate); 214 void fw_destroy_nodes(struct fw_card *card); 215 216 /* 217 * Check whether new_generation is the immediate successor of old_generation. 218 * Take counter roll-over at 255 (as per OHCI) into account. 219 */ 220 static inline bool is_next_generation(int new_generation, int old_generation) 221 { 222 return (new_generation & 0xff) == ((old_generation + 1) & 0xff); 223 } 224 225 226 /* -transaction */ 227 228 #define TCODE_LINK_INTERNAL 0xe 229 230 #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) 231 #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) 232 #define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == TCODE_LINK_INTERNAL) 233 #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) 234 #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) 235 #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) 236 #define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0) 237 238 #define LOCAL_BUS 0xffc0 239 240 void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); 241 void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); 242 int fw_get_response_length(struct fw_request *request); 243 void fw_fill_response(struct fw_packet *response, u32 *request_header, 244 int rcode, void *payload, size_t length); 245 246 #define FW_PHY_CONFIG_NO_NODE_ID -1 247 #define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1 248 void fw_send_phy_config(struct fw_card *card, 249 int node_id, int generation, int gap_count); 250 251 static inline bool is_ping_packet(u32 *data) 252 { 253 return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1]; 254 } 255 256 #endif /* _FIREWIRE_CORE_H */ 257