1 /*
2  * Core IEEE1394 transaction logic
3  *
4  * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software Foundation,
18  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  */
20 
21 #include <linux/bug.h>
22 #include <linux/completion.h>
23 #include <linux/device.h>
24 #include <linux/errno.h>
25 #include <linux/firewire.h>
26 #include <linux/firewire-constants.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/idr.h>
30 #include <linux/jiffies.h>
31 #include <linux/kernel.h>
32 #include <linux/list.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/string.h>
37 #include <linux/timer.h>
38 #include <linux/types.h>
39 
40 #include <asm/byteorder.h>
41 
42 #include "core.h"
43 
44 #define HEADER_PRI(pri)			((pri) << 0)
45 #define HEADER_TCODE(tcode)		((tcode) << 4)
46 #define HEADER_RETRY(retry)		((retry) << 8)
47 #define HEADER_TLABEL(tlabel)		((tlabel) << 10)
48 #define HEADER_DESTINATION(destination)	((destination) << 16)
49 #define HEADER_SOURCE(source)		((source) << 16)
50 #define HEADER_RCODE(rcode)		((rcode) << 12)
51 #define HEADER_OFFSET_HIGH(offset_high)	((offset_high) << 0)
52 #define HEADER_DATA_LENGTH(length)	((length) << 16)
53 #define HEADER_EXTENDED_TCODE(tcode)	((tcode) << 0)
54 
55 #define HEADER_GET_TCODE(q)		(((q) >> 4) & 0x0f)
56 #define HEADER_GET_TLABEL(q)		(((q) >> 10) & 0x3f)
57 #define HEADER_GET_RCODE(q)		(((q) >> 12) & 0x0f)
58 #define HEADER_GET_DESTINATION(q)	(((q) >> 16) & 0xffff)
59 #define HEADER_GET_SOURCE(q)		(((q) >> 16) & 0xffff)
60 #define HEADER_GET_OFFSET_HIGH(q)	(((q) >> 0) & 0xffff)
61 #define HEADER_GET_DATA_LENGTH(q)	(((q) >> 16) & 0xffff)
62 #define HEADER_GET_EXTENDED_TCODE(q)	(((q) >> 0) & 0xffff)
63 
64 #define HEADER_DESTINATION_IS_BROADCAST(q) \
65 	(((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f))
66 
67 #define PHY_PACKET_CONFIG	0x0
68 #define PHY_PACKET_LINK_ON	0x1
69 #define PHY_PACKET_SELF_ID	0x2
70 
71 #define PHY_CONFIG_GAP_COUNT(gap_count)	(((gap_count) << 16) | (1 << 22))
72 #define PHY_CONFIG_ROOT_ID(node_id)	((((node_id) & 0x3f) << 24) | (1 << 23))
73 #define PHY_IDENTIFIER(id)		((id) << 30)
74 
75 static int close_transaction(struct fw_transaction *transaction,
76 			     struct fw_card *card, int rcode)
77 {
78 	struct fw_transaction *t;
79 	unsigned long flags;
80 
81 	spin_lock_irqsave(&card->lock, flags);
82 	list_for_each_entry(t, &card->transaction_list, link) {
83 		if (t == transaction) {
84 			list_del(&t->link);
85 			card->tlabel_mask &= ~(1ULL << t->tlabel);
86 			break;
87 		}
88 	}
89 	spin_unlock_irqrestore(&card->lock, flags);
90 
91 	if (&t->link != &card->transaction_list) {
92 		t->callback(card, rcode, NULL, 0, t->callback_data);
93 		return 0;
94 	}
95 
96 	return -ENOENT;
97 }
98 
99 /*
100  * Only valid for transactions that are potentially pending (ie have
101  * been sent).
102  */
103 int fw_cancel_transaction(struct fw_card *card,
104 			  struct fw_transaction *transaction)
105 {
106 	/*
107 	 * Cancel the packet transmission if it's still queued.  That
108 	 * will call the packet transmission callback which cancels
109 	 * the transaction.
110 	 */
111 
112 	if (card->driver->cancel_packet(card, &transaction->packet) == 0)
113 		return 0;
114 
115 	/*
116 	 * If the request packet has already been sent, we need to see
117 	 * if the transaction is still pending and remove it in that case.
118 	 */
119 
120 	return close_transaction(transaction, card, RCODE_CANCELLED);
121 }
122 EXPORT_SYMBOL(fw_cancel_transaction);
123 
124 static void transmit_complete_callback(struct fw_packet *packet,
125 				       struct fw_card *card, int status)
126 {
127 	struct fw_transaction *t =
128 	    container_of(packet, struct fw_transaction, packet);
129 
130 	switch (status) {
131 	case ACK_COMPLETE:
132 		close_transaction(t, card, RCODE_COMPLETE);
133 		break;
134 	case ACK_PENDING:
135 		t->timestamp = packet->timestamp;
136 		break;
137 	case ACK_BUSY_X:
138 	case ACK_BUSY_A:
139 	case ACK_BUSY_B:
140 		close_transaction(t, card, RCODE_BUSY);
141 		break;
142 	case ACK_DATA_ERROR:
143 		close_transaction(t, card, RCODE_DATA_ERROR);
144 		break;
145 	case ACK_TYPE_ERROR:
146 		close_transaction(t, card, RCODE_TYPE_ERROR);
147 		break;
148 	default:
149 		/*
150 		 * In this case the ack is really a juju specific
151 		 * rcode, so just forward that to the callback.
152 		 */
153 		close_transaction(t, card, status);
154 		break;
155 	}
156 }
157 
158 static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
159 		int destination_id, int source_id, int generation, int speed,
160 		unsigned long long offset, void *payload, size_t length)
161 {
162 	int ext_tcode;
163 
164 	if (tcode == TCODE_STREAM_DATA) {
165 		packet->header[0] =
166 			HEADER_DATA_LENGTH(length) |
167 			destination_id |
168 			HEADER_TCODE(TCODE_STREAM_DATA);
169 		packet->header_length = 4;
170 		packet->payload = payload;
171 		packet->payload_length = length;
172 
173 		goto common;
174 	}
175 
176 	if (tcode > 0x10) {
177 		ext_tcode = tcode & ~0x10;
178 		tcode = TCODE_LOCK_REQUEST;
179 	} else
180 		ext_tcode = 0;
181 
182 	packet->header[0] =
183 		HEADER_RETRY(RETRY_X) |
184 		HEADER_TLABEL(tlabel) |
185 		HEADER_TCODE(tcode) |
186 		HEADER_DESTINATION(destination_id);
187 	packet->header[1] =
188 		HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
189 	packet->header[2] =
190 		offset;
191 
192 	switch (tcode) {
193 	case TCODE_WRITE_QUADLET_REQUEST:
194 		packet->header[3] = *(u32 *)payload;
195 		packet->header_length = 16;
196 		packet->payload_length = 0;
197 		break;
198 
199 	case TCODE_LOCK_REQUEST:
200 	case TCODE_WRITE_BLOCK_REQUEST:
201 		packet->header[3] =
202 			HEADER_DATA_LENGTH(length) |
203 			HEADER_EXTENDED_TCODE(ext_tcode);
204 		packet->header_length = 16;
205 		packet->payload = payload;
206 		packet->payload_length = length;
207 		break;
208 
209 	case TCODE_READ_QUADLET_REQUEST:
210 		packet->header_length = 12;
211 		packet->payload_length = 0;
212 		break;
213 
214 	case TCODE_READ_BLOCK_REQUEST:
215 		packet->header[3] =
216 			HEADER_DATA_LENGTH(length) |
217 			HEADER_EXTENDED_TCODE(ext_tcode);
218 		packet->header_length = 16;
219 		packet->payload_length = 0;
220 		break;
221 
222 	default:
223 		WARN(1, KERN_ERR "wrong tcode %d", tcode);
224 	}
225  common:
226 	packet->speed = speed;
227 	packet->generation = generation;
228 	packet->ack = 0;
229 	packet->payload_bus = 0;
230 }
231 
232 /**
233  * This function provides low-level access to the IEEE1394 transaction
234  * logic.  Most C programs would use either fw_read(), fw_write() or
235  * fw_lock() instead - those function are convenience wrappers for
236  * this function.  The fw_send_request() function is primarily
237  * provided as a flexible, one-stop entry point for languages bindings
238  * and protocol bindings.
239  *
240  * FIXME: Document this function further, in particular the possible
241  * values for rcode in the callback.  In short, we map ACK_COMPLETE to
242  * RCODE_COMPLETE, internal errors set errno and set rcode to
243  * RCODE_SEND_ERROR (which is out of range for standard ieee1394
244  * rcodes).  All other rcodes are forwarded unchanged.  For all
245  * errors, payload is NULL, length is 0.
246  *
247  * Can not expect the callback to be called before the function
248  * returns, though this does happen in some cases (ACK_COMPLETE and
249  * errors).
250  *
251  * The payload is only used for write requests and must not be freed
252  * until the callback has been called.
253  *
254  * @param card the card from which to send the request
255  * @param tcode the tcode for this transaction.  Do not use
256  *   TCODE_LOCK_REQUEST directly, instead use TCODE_LOCK_MASK_SWAP
257  *   etc. to specify tcode and ext_tcode.
258  * @param node_id the destination node ID (bus ID and PHY ID concatenated)
259  * @param generation the generation for which node_id is valid
260  * @param speed the speed to use for sending the request
261  * @param offset the 48 bit offset on the destination node
262  * @param payload the data payload for the request subaction
263  * @param length the length in bytes of the data to read
264  * @param callback function to be called when the transaction is completed
265  * @param callback_data pointer to arbitrary data, which will be
266  *   passed to the callback
267  *
268  * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller
269  * needs to synthesize @destination_id with fw_stream_packet_destination_id().
270  */
271 void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
272 		     int destination_id, int generation, int speed,
273 		     unsigned long long offset, void *payload, size_t length,
274 		     fw_transaction_callback_t callback, void *callback_data)
275 {
276 	unsigned long flags;
277 	int tlabel;
278 
279 	/*
280 	 * Bump the flush timer up 100ms first of all so we
281 	 * don't race with a flush timer callback.
282 	 */
283 
284 	mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
285 
286 	/*
287 	 * Allocate tlabel from the bitmap and put the transaction on
288 	 * the list while holding the card spinlock.
289 	 */
290 
291 	spin_lock_irqsave(&card->lock, flags);
292 
293 	tlabel = card->current_tlabel;
294 	if (card->tlabel_mask & (1ULL << tlabel)) {
295 		spin_unlock_irqrestore(&card->lock, flags);
296 		callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
297 		return;
298 	}
299 
300 	card->current_tlabel = (card->current_tlabel + 1) & 0x3f;
301 	card->tlabel_mask |= (1ULL << tlabel);
302 
303 	t->node_id = destination_id;
304 	t->tlabel = tlabel;
305 	t->callback = callback;
306 	t->callback_data = callback_data;
307 
308 	fw_fill_request(&t->packet, tcode, t->tlabel,
309 			destination_id, card->node_id, generation,
310 			speed, offset, payload, length);
311 	t->packet.callback = transmit_complete_callback;
312 
313 	list_add_tail(&t->link, &card->transaction_list);
314 
315 	spin_unlock_irqrestore(&card->lock, flags);
316 
317 	card->driver->send_request(card, &t->packet);
318 }
319 EXPORT_SYMBOL(fw_send_request);
320 
321 struct transaction_callback_data {
322 	struct completion done;
323 	void *payload;
324 	int rcode;
325 };
326 
327 static void transaction_callback(struct fw_card *card, int rcode,
328 				 void *payload, size_t length, void *data)
329 {
330 	struct transaction_callback_data *d = data;
331 
332 	if (rcode == RCODE_COMPLETE)
333 		memcpy(d->payload, payload, length);
334 	d->rcode = rcode;
335 	complete(&d->done);
336 }
337 
338 /**
339  * fw_run_transaction - send request and sleep until transaction is completed
340  *
341  * Returns the RCODE.
342  */
343 int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
344 		       int generation, int speed, unsigned long long offset,
345 		       void *payload, size_t length)
346 {
347 	struct transaction_callback_data d;
348 	struct fw_transaction t;
349 
350 	init_completion(&d.done);
351 	d.payload = payload;
352 	fw_send_request(card, &t, tcode, destination_id, generation, speed,
353 			offset, payload, length, transaction_callback, &d);
354 	wait_for_completion(&d.done);
355 
356 	return d.rcode;
357 }
358 EXPORT_SYMBOL(fw_run_transaction);
359 
360 static DEFINE_MUTEX(phy_config_mutex);
361 static DECLARE_COMPLETION(phy_config_done);
362 
363 static void transmit_phy_packet_callback(struct fw_packet *packet,
364 					 struct fw_card *card, int status)
365 {
366 	complete(&phy_config_done);
367 }
368 
369 static struct fw_packet phy_config_packet = {
370 	.header_length	= 8,
371 	.payload_length	= 0,
372 	.speed		= SCODE_100,
373 	.callback	= transmit_phy_packet_callback,
374 };
375 
376 void fw_send_phy_config(struct fw_card *card,
377 			int node_id, int generation, int gap_count)
378 {
379 	long timeout = DIV_ROUND_UP(HZ, 10);
380 	u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
381 		   PHY_CONFIG_ROOT_ID(node_id) |
382 		   PHY_CONFIG_GAP_COUNT(gap_count);
383 
384 	mutex_lock(&phy_config_mutex);
385 
386 	phy_config_packet.header[0] = data;
387 	phy_config_packet.header[1] = ~data;
388 	phy_config_packet.generation = generation;
389 	INIT_COMPLETION(phy_config_done);
390 
391 	card->driver->send_request(card, &phy_config_packet);
392 	wait_for_completion_timeout(&phy_config_done, timeout);
393 
394 	mutex_unlock(&phy_config_mutex);
395 }
396 
397 void fw_flush_transactions(struct fw_card *card)
398 {
399 	struct fw_transaction *t, *next;
400 	struct list_head list;
401 	unsigned long flags;
402 
403 	INIT_LIST_HEAD(&list);
404 	spin_lock_irqsave(&card->lock, flags);
405 	list_splice_init(&card->transaction_list, &list);
406 	card->tlabel_mask = 0;
407 	spin_unlock_irqrestore(&card->lock, flags);
408 
409 	list_for_each_entry_safe(t, next, &list, link) {
410 		card->driver->cancel_packet(card, &t->packet);
411 
412 		/*
413 		 * At this point cancel_packet will never call the
414 		 * transaction callback, since we just took all the
415 		 * transactions out of the list.  So do it here.
416 		 */
417 		t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
418 	}
419 }
420 
421 static struct fw_address_handler *lookup_overlapping_address_handler(
422 	struct list_head *list, unsigned long long offset, size_t length)
423 {
424 	struct fw_address_handler *handler;
425 
426 	list_for_each_entry(handler, list, link) {
427 		if (handler->offset < offset + length &&
428 		    offset < handler->offset + handler->length)
429 			return handler;
430 	}
431 
432 	return NULL;
433 }
434 
435 static struct fw_address_handler *lookup_enclosing_address_handler(
436 	struct list_head *list, unsigned long long offset, size_t length)
437 {
438 	struct fw_address_handler *handler;
439 
440 	list_for_each_entry(handler, list, link) {
441 		if (handler->offset <= offset &&
442 		    offset + length <= handler->offset + handler->length)
443 			return handler;
444 	}
445 
446 	return NULL;
447 }
448 
449 static DEFINE_SPINLOCK(address_handler_lock);
450 static LIST_HEAD(address_handler_list);
451 
452 const struct fw_address_region fw_high_memory_region =
453 	{ .start = 0x000100000000ULL, .end = 0xffffe0000000ULL,  };
454 EXPORT_SYMBOL(fw_high_memory_region);
455 
456 #if 0
457 const struct fw_address_region fw_low_memory_region =
458 	{ .start = 0x000000000000ULL, .end = 0x000100000000ULL,  };
459 const struct fw_address_region fw_private_region =
460 	{ .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL,  };
461 const struct fw_address_region fw_csr_region =
462 	{ .start = CSR_REGISTER_BASE,
463 	  .end   = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END,  };
464 const struct fw_address_region fw_unit_space_region =
465 	{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
466 #endif  /*  0  */
467 
468 /**
469  * fw_core_add_address_handler - register for incoming requests
470  * @handler: callback
471  * @region: region in the IEEE 1212 node space address range
472  *
473  * region->start, ->end, and handler->length have to be quadlet-aligned.
474  *
475  * When a request is received that falls within the specified address range,
476  * the specified callback is invoked.  The parameters passed to the callback
477  * give the details of the particular request.
478  *
479  * Return value:  0 on success, non-zero otherwise.
480  * The start offset of the handler's address region is determined by
481  * fw_core_add_address_handler() and is returned in handler->offset.
482  */
483 int fw_core_add_address_handler(struct fw_address_handler *handler,
484 				const struct fw_address_region *region)
485 {
486 	struct fw_address_handler *other;
487 	unsigned long flags;
488 	int ret = -EBUSY;
489 
490 	if (region->start & 0xffff000000000003ULL ||
491 	    region->end   & 0xffff000000000003ULL ||
492 	    region->start >= region->end ||
493 	    handler->length & 3 ||
494 	    handler->length == 0)
495 		return -EINVAL;
496 
497 	spin_lock_irqsave(&address_handler_lock, flags);
498 
499 	handler->offset = region->start;
500 	while (handler->offset + handler->length <= region->end) {
501 		other =
502 		    lookup_overlapping_address_handler(&address_handler_list,
503 						       handler->offset,
504 						       handler->length);
505 		if (other != NULL) {
506 			handler->offset += other->length;
507 		} else {
508 			list_add_tail(&handler->link, &address_handler_list);
509 			ret = 0;
510 			break;
511 		}
512 	}
513 
514 	spin_unlock_irqrestore(&address_handler_lock, flags);
515 
516 	return ret;
517 }
518 EXPORT_SYMBOL(fw_core_add_address_handler);
519 
520 /**
521  * fw_core_remove_address_handler - unregister an address handler
522  */
523 void fw_core_remove_address_handler(struct fw_address_handler *handler)
524 {
525 	unsigned long flags;
526 
527 	spin_lock_irqsave(&address_handler_lock, flags);
528 	list_del(&handler->link);
529 	spin_unlock_irqrestore(&address_handler_lock, flags);
530 }
531 EXPORT_SYMBOL(fw_core_remove_address_handler);
532 
533 struct fw_request {
534 	struct fw_packet response;
535 	u32 request_header[4];
536 	int ack;
537 	u32 length;
538 	u32 data[0];
539 };
540 
541 static void free_response_callback(struct fw_packet *packet,
542 				   struct fw_card *card, int status)
543 {
544 	struct fw_request *request;
545 
546 	request = container_of(packet, struct fw_request, response);
547 	kfree(request);
548 }
549 
550 void fw_fill_response(struct fw_packet *response, u32 *request_header,
551 		      int rcode, void *payload, size_t length)
552 {
553 	int tcode, tlabel, extended_tcode, source, destination;
554 
555 	tcode          = HEADER_GET_TCODE(request_header[0]);
556 	tlabel         = HEADER_GET_TLABEL(request_header[0]);
557 	source         = HEADER_GET_DESTINATION(request_header[0]);
558 	destination    = HEADER_GET_SOURCE(request_header[1]);
559 	extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
560 
561 	response->header[0] =
562 		HEADER_RETRY(RETRY_1) |
563 		HEADER_TLABEL(tlabel) |
564 		HEADER_DESTINATION(destination);
565 	response->header[1] =
566 		HEADER_SOURCE(source) |
567 		HEADER_RCODE(rcode);
568 	response->header[2] = 0;
569 
570 	switch (tcode) {
571 	case TCODE_WRITE_QUADLET_REQUEST:
572 	case TCODE_WRITE_BLOCK_REQUEST:
573 		response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
574 		response->header_length = 12;
575 		response->payload_length = 0;
576 		break;
577 
578 	case TCODE_READ_QUADLET_REQUEST:
579 		response->header[0] |=
580 			HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
581 		if (payload != NULL)
582 			response->header[3] = *(u32 *)payload;
583 		else
584 			response->header[3] = 0;
585 		response->header_length = 16;
586 		response->payload_length = 0;
587 		break;
588 
589 	case TCODE_READ_BLOCK_REQUEST:
590 	case TCODE_LOCK_REQUEST:
591 		response->header[0] |= HEADER_TCODE(tcode + 2);
592 		response->header[3] =
593 			HEADER_DATA_LENGTH(length) |
594 			HEADER_EXTENDED_TCODE(extended_tcode);
595 		response->header_length = 16;
596 		response->payload = payload;
597 		response->payload_length = length;
598 		break;
599 
600 	default:
601 		WARN(1, KERN_ERR "wrong tcode %d", tcode);
602 	}
603 
604 	response->payload_bus = 0;
605 }
606 EXPORT_SYMBOL(fw_fill_response);
607 
608 static struct fw_request *allocate_request(struct fw_packet *p)
609 {
610 	struct fw_request *request;
611 	u32 *data, length;
612 	int request_tcode, t;
613 
614 	request_tcode = HEADER_GET_TCODE(p->header[0]);
615 	switch (request_tcode) {
616 	case TCODE_WRITE_QUADLET_REQUEST:
617 		data = &p->header[3];
618 		length = 4;
619 		break;
620 
621 	case TCODE_WRITE_BLOCK_REQUEST:
622 	case TCODE_LOCK_REQUEST:
623 		data = p->payload;
624 		length = HEADER_GET_DATA_LENGTH(p->header[3]);
625 		break;
626 
627 	case TCODE_READ_QUADLET_REQUEST:
628 		data = NULL;
629 		length = 4;
630 		break;
631 
632 	case TCODE_READ_BLOCK_REQUEST:
633 		data = NULL;
634 		length = HEADER_GET_DATA_LENGTH(p->header[3]);
635 		break;
636 
637 	default:
638 		fw_error("ERROR - corrupt request received - %08x %08x %08x\n",
639 			 p->header[0], p->header[1], p->header[2]);
640 		return NULL;
641 	}
642 
643 	request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
644 	if (request == NULL)
645 		return NULL;
646 
647 	t = (p->timestamp & 0x1fff) + 4000;
648 	if (t >= 8000)
649 		t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000;
650 	else
651 		t = (p->timestamp & ~0x1fff) + t;
652 
653 	request->response.speed = p->speed;
654 	request->response.timestamp = t;
655 	request->response.generation = p->generation;
656 	request->response.ack = 0;
657 	request->response.callback = free_response_callback;
658 	request->ack = p->ack;
659 	request->length = length;
660 	if (data)
661 		memcpy(request->data, data, length);
662 
663 	memcpy(request->request_header, p->header, sizeof(p->header));
664 
665 	return request;
666 }
667 
668 void fw_send_response(struct fw_card *card,
669 		      struct fw_request *request, int rcode)
670 {
671 	/* unified transaction or broadcast transaction: don't respond */
672 	if (request->ack != ACK_PENDING ||
673 	    HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) {
674 		kfree(request);
675 		return;
676 	}
677 
678 	if (rcode == RCODE_COMPLETE)
679 		fw_fill_response(&request->response, request->request_header,
680 				 rcode, request->data, request->length);
681 	else
682 		fw_fill_response(&request->response, request->request_header,
683 				 rcode, NULL, 0);
684 
685 	card->driver->send_response(card, &request->response);
686 }
687 EXPORT_SYMBOL(fw_send_response);
688 
689 void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
690 {
691 	struct fw_address_handler *handler;
692 	struct fw_request *request;
693 	unsigned long long offset;
694 	unsigned long flags;
695 	int tcode, destination, source;
696 
697 	if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
698 		return;
699 
700 	request = allocate_request(p);
701 	if (request == NULL) {
702 		/* FIXME: send statically allocated busy packet. */
703 		return;
704 	}
705 
706 	offset      =
707 		((unsigned long long)
708 		 HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
709 	tcode       = HEADER_GET_TCODE(p->header[0]);
710 	destination = HEADER_GET_DESTINATION(p->header[0]);
711 	source      = HEADER_GET_SOURCE(p->header[1]);
712 
713 	spin_lock_irqsave(&address_handler_lock, flags);
714 	handler = lookup_enclosing_address_handler(&address_handler_list,
715 						   offset, request->length);
716 	spin_unlock_irqrestore(&address_handler_lock, flags);
717 
718 	/*
719 	 * FIXME: lookup the fw_node corresponding to the sender of
720 	 * this request and pass that to the address handler instead
721 	 * of the node ID.  We may also want to move the address
722 	 * allocations to fw_node so we only do this callback if the
723 	 * upper layers registered it for this node.
724 	 */
725 
726 	if (handler == NULL)
727 		fw_send_response(card, request, RCODE_ADDRESS_ERROR);
728 	else
729 		handler->address_callback(card, request,
730 					  tcode, destination, source,
731 					  p->generation, p->speed, offset,
732 					  request->data, request->length,
733 					  handler->callback_data);
734 }
735 EXPORT_SYMBOL(fw_core_handle_request);
736 
737 void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
738 {
739 	struct fw_transaction *t;
740 	unsigned long flags;
741 	u32 *data;
742 	size_t data_length;
743 	int tcode, tlabel, destination, source, rcode;
744 
745 	tcode       = HEADER_GET_TCODE(p->header[0]);
746 	tlabel      = HEADER_GET_TLABEL(p->header[0]);
747 	destination = HEADER_GET_DESTINATION(p->header[0]);
748 	source      = HEADER_GET_SOURCE(p->header[1]);
749 	rcode       = HEADER_GET_RCODE(p->header[1]);
750 
751 	spin_lock_irqsave(&card->lock, flags);
752 	list_for_each_entry(t, &card->transaction_list, link) {
753 		if (t->node_id == source && t->tlabel == tlabel) {
754 			list_del(&t->link);
755 			card->tlabel_mask &= ~(1 << t->tlabel);
756 			break;
757 		}
758 	}
759 	spin_unlock_irqrestore(&card->lock, flags);
760 
761 	if (&t->link == &card->transaction_list) {
762 		fw_notify("Unsolicited response (source %x, tlabel %x)\n",
763 			  source, tlabel);
764 		return;
765 	}
766 
767 	/*
768 	 * FIXME: sanity check packet, is length correct, does tcodes
769 	 * and addresses match.
770 	 */
771 
772 	switch (tcode) {
773 	case TCODE_READ_QUADLET_RESPONSE:
774 		data = (u32 *) &p->header[3];
775 		data_length = 4;
776 		break;
777 
778 	case TCODE_WRITE_RESPONSE:
779 		data = NULL;
780 		data_length = 0;
781 		break;
782 
783 	case TCODE_READ_BLOCK_RESPONSE:
784 	case TCODE_LOCK_RESPONSE:
785 		data = p->payload;
786 		data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
787 		break;
788 
789 	default:
790 		/* Should never happen, this is just to shut up gcc. */
791 		data = NULL;
792 		data_length = 0;
793 		break;
794 	}
795 
796 	/*
797 	 * The response handler may be executed while the request handler
798 	 * is still pending.  Cancel the request handler.
799 	 */
800 	card->driver->cancel_packet(card, &t->packet);
801 
802 	t->callback(card, rcode, data, data_length, t->callback_data);
803 }
804 EXPORT_SYMBOL(fw_core_handle_response);
805 
806 static const struct fw_address_region topology_map_region =
807 	{ .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
808 	  .end   = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
809 
810 static void handle_topology_map(struct fw_card *card, struct fw_request *request,
811 		int tcode, int destination, int source, int generation,
812 		int speed, unsigned long long offset,
813 		void *payload, size_t length, void *callback_data)
814 {
815 	int start;
816 
817 	if (!TCODE_IS_READ_REQUEST(tcode)) {
818 		fw_send_response(card, request, RCODE_TYPE_ERROR);
819 		return;
820 	}
821 
822 	if ((offset & 3) > 0 || (length & 3) > 0) {
823 		fw_send_response(card, request, RCODE_ADDRESS_ERROR);
824 		return;
825 	}
826 
827 	start = (offset - topology_map_region.start) / 4;
828 	memcpy(payload, &card->topology_map[start], length);
829 
830 	fw_send_response(card, request, RCODE_COMPLETE);
831 }
832 
833 static struct fw_address_handler topology_map = {
834 	.length			= 0x400,
835 	.address_callback	= handle_topology_map,
836 };
837 
838 static const struct fw_address_region registers_region =
839 	{ .start = CSR_REGISTER_BASE,
840 	  .end   = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
841 
842 static void handle_registers(struct fw_card *card, struct fw_request *request,
843 		int tcode, int destination, int source, int generation,
844 		int speed, unsigned long long offset,
845 		void *payload, size_t length, void *callback_data)
846 {
847 	int reg = offset & ~CSR_REGISTER_BASE;
848 	unsigned long long bus_time;
849 	__be32 *data = payload;
850 	int rcode = RCODE_COMPLETE;
851 
852 	switch (reg) {
853 	case CSR_CYCLE_TIME:
854 	case CSR_BUS_TIME:
855 		if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) {
856 			rcode = RCODE_TYPE_ERROR;
857 			break;
858 		}
859 
860 		bus_time = card->driver->get_bus_time(card);
861 		if (reg == CSR_CYCLE_TIME)
862 			*data = cpu_to_be32(bus_time);
863 		else
864 			*data = cpu_to_be32(bus_time >> 25);
865 		break;
866 
867 	case CSR_BROADCAST_CHANNEL:
868 		if (tcode == TCODE_READ_QUADLET_REQUEST)
869 			*data = cpu_to_be32(card->broadcast_channel);
870 		else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
871 			card->broadcast_channel =
872 			    (be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) |
873 			    BROADCAST_CHANNEL_INITIAL;
874 		else
875 			rcode = RCODE_TYPE_ERROR;
876 		break;
877 
878 	case CSR_BUS_MANAGER_ID:
879 	case CSR_BANDWIDTH_AVAILABLE:
880 	case CSR_CHANNELS_AVAILABLE_HI:
881 	case CSR_CHANNELS_AVAILABLE_LO:
882 		/*
883 		 * FIXME: these are handled by the OHCI hardware and
884 		 * the stack never sees these request. If we add
885 		 * support for a new type of controller that doesn't
886 		 * handle this in hardware we need to deal with these
887 		 * transactions.
888 		 */
889 		BUG();
890 		break;
891 
892 	case CSR_BUSY_TIMEOUT:
893 		/* FIXME: Implement this. */
894 
895 	default:
896 		rcode = RCODE_ADDRESS_ERROR;
897 		break;
898 	}
899 
900 	fw_send_response(card, request, rcode);
901 }
902 
903 static struct fw_address_handler registers = {
904 	.length			= 0x400,
905 	.address_callback	= handle_registers,
906 };
907 
908 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
909 MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
910 MODULE_LICENSE("GPL");
911 
912 static const u32 vendor_textual_descriptor[] = {
913 	/* textual descriptor leaf () */
914 	0x00060000,
915 	0x00000000,
916 	0x00000000,
917 	0x4c696e75,		/* L i n u */
918 	0x78204669,		/* x   F i */
919 	0x72657769,		/* r e w i */
920 	0x72650000,		/* r e     */
921 };
922 
923 static const u32 model_textual_descriptor[] = {
924 	/* model descriptor leaf () */
925 	0x00030000,
926 	0x00000000,
927 	0x00000000,
928 	0x4a756a75,		/* J u j u */
929 };
930 
931 static struct fw_descriptor vendor_id_descriptor = {
932 	.length = ARRAY_SIZE(vendor_textual_descriptor),
933 	.immediate = 0x03d00d1e,
934 	.key = 0x81000000,
935 	.data = vendor_textual_descriptor,
936 };
937 
938 static struct fw_descriptor model_id_descriptor = {
939 	.length = ARRAY_SIZE(model_textual_descriptor),
940 	.immediate = 0x17000001,
941 	.key = 0x81000000,
942 	.data = model_textual_descriptor,
943 };
944 
945 static int __init fw_core_init(void)
946 {
947 	int ret;
948 
949 	ret = bus_register(&fw_bus_type);
950 	if (ret < 0)
951 		return ret;
952 
953 	fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
954 	if (fw_cdev_major < 0) {
955 		bus_unregister(&fw_bus_type);
956 		return fw_cdev_major;
957 	}
958 
959 	fw_core_add_address_handler(&topology_map, &topology_map_region);
960 	fw_core_add_address_handler(&registers, &registers_region);
961 	fw_core_add_descriptor(&vendor_id_descriptor);
962 	fw_core_add_descriptor(&model_id_descriptor);
963 
964 	return 0;
965 }
966 
967 static void __exit fw_core_cleanup(void)
968 {
969 	unregister_chrdev(fw_cdev_major, "firewire");
970 	bus_unregister(&fw_bus_type);
971 	idr_destroy(&fw_device_idr);
972 }
973 
974 module_init(fw_core_init);
975 module_exit(fw_core_cleanup);
976