aboutsummaryrefslogtreecommitdiff
path: root/drivers/firewire/core-transaction.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firewire/core-transaction.c')
-rw-r--r--drivers/firewire/core-transaction.c203
1 files changed, 141 insertions, 62 deletions
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index b42a0bde849..eb6935c8ad9 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -31,11 +31,13 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/rculist.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
@@ -72,6 +74,15 @@
#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
#define PHY_IDENTIFIER(id) ((id) << 30)
+/* returns 0 if the split timeout handler is already running */
+static int try_cancel_split_timeout(struct fw_transaction *t)
+{
+ if (t->is_split_transaction)
+ return del_timer(&t->split_timeout_timer);
+ else
+ return 1;
+}
+
static int close_transaction(struct fw_transaction *transaction,
struct fw_card *card, int rcode)
{
@@ -81,7 +92,7 @@ static int close_transaction(struct fw_transaction *transaction,
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t == transaction) {
- if (!del_timer(&t->split_timeout_timer)) {
+ if (!try_cancel_split_timeout(t)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
@@ -141,16 +152,28 @@ static void split_transaction_timeout_callback(unsigned long data)
card->tlabel_mask &= ~(1ULL << t->tlabel);
spin_unlock_irqrestore(&card->lock, flags);
- card->driver->cancel_packet(card, &t->packet);
-
- /*
- * At this point cancel_packet will never call the transaction
- * callback, since we just took the transaction out of the list.
- * So do it here.
- */
t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
}
+static void start_split_transaction_timeout(struct fw_transaction *t,
+ struct fw_card *card)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ return;
+ }
+
+ t->is_split_transaction = true;
+ mod_timer(&t->split_timeout_timer,
+ jiffies + card->split_timeout_jiffies);
+
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
static void transmit_complete_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
@@ -162,7 +185,7 @@ static void transmit_complete_callback(struct fw_packet *packet,
close_transaction(t, card, RCODE_COMPLETE);
break;
case ACK_PENDING:
- t->timestamp = packet->timestamp;
+ start_split_transaction_timeout(t, card);
break;
case ACK_BUSY_X:
case ACK_BUSY_A:
@@ -250,7 +273,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
break;
default:
- WARN(1, "wrong tcode %d", tcode);
+ WARN(1, "wrong tcode %d\n", tcode);
}
common:
packet->speed = speed;
@@ -305,8 +328,8 @@ static int allocate_tlabel(struct fw_card *card)
* It will contain tag, channel, and sy data instead of a node ID then.
*
* The payload buffer at @data is going to be DMA-mapped except in case of
- * quadlet-sized payload or of local (loopback) requests. Hence make sure that
- * the buffer complies with the restrictions for DMA-mapped memory. The
+ * @length <= 8 or of local (loopback) requests. Hence make sure that the
+ * buffer complies with the restrictions of the streaming DMA mapping API.
* @payload must not be freed before the @callback is called.
*
* In case of request types without payload, @data is NULL and @length is 0.
@@ -349,11 +372,9 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
t->node_id = destination_id;
t->tlabel = tlabel;
t->card = card;
+ t->is_split_transaction = false;
setup_timer(&t->split_timeout_timer,
split_transaction_timeout_callback, (unsigned long)t);
- /* FIXME: start this timer later, relative to t->timestamp */
- mod_timer(&t->split_timeout_timer,
- jiffies + card->split_timeout_jiffies);
t->callback = callback;
t->callback_data = callback_data;
@@ -392,7 +413,8 @@ static void transaction_callback(struct fw_card *card, int rcode,
*
* Returns the RCODE. See fw_send_request() for parameter documentation.
* Unlike fw_send_request(), @data points to the payload of the request or/and
- * to the payload of the response.
+ * to the payload of the response. DMA mapping restrictions apply to outbound
+ * request payloads of >= 8 bytes but not to inbound response payloads.
*/
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
int generation, int speed, unsigned long long offset,
@@ -423,7 +445,8 @@ static void transmit_phy_packet_callback(struct fw_packet *packet,
}
static struct fw_packet phy_config_packet = {
- .header_length = 8,
+ .header_length = 12,
+ .header[0] = TCODE_LINK_INTERNAL << 4,
.payload_length = 0,
.speed = SCODE_100,
.callback = transmit_phy_packet_callback,
@@ -451,10 +474,10 @@ void fw_send_phy_config(struct fw_card *card,
mutex_lock(&phy_config_mutex);
- phy_config_packet.header[0] = data;
- phy_config_packet.header[1] = ~data;
+ phy_config_packet.header[1] = data;
+ phy_config_packet.header[2] = ~data;
phy_config_packet.generation = generation;
- INIT_COMPLETION(phy_config_done);
+ reinit_completion(&phy_config_done);
card->driver->send_request(card, &phy_config_packet);
wait_for_completion_timeout(&phy_config_done, timeout);
@@ -467,7 +490,7 @@ static struct fw_address_handler *lookup_overlapping_address_handler(
{
struct fw_address_handler *handler;
- list_for_each_entry(handler, list, link) {
+ list_for_each_entry_rcu(handler, list, link) {
if (handler->offset < offset + length &&
offset < handler->offset + handler->length)
return handler;
@@ -488,7 +511,7 @@ static struct fw_address_handler *lookup_enclosing_address_handler(
{
struct fw_address_handler *handler;
- list_for_each_entry(handler, list, link) {
+ list_for_each_entry_rcu(handler, list, link) {
if (is_enclosing_handler(handler, offset, length))
return handler;
}
@@ -496,16 +519,17 @@ static struct fw_address_handler *lookup_enclosing_address_handler(
return NULL;
}
-static DEFINE_SPINLOCK(address_handler_lock);
+static DEFINE_SPINLOCK(address_handler_list_lock);
static LIST_HEAD(address_handler_list);
const struct fw_address_region fw_high_memory_region =
- { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, };
+ { .start = FW_MAX_PHYSICAL_RANGE, .end = 0xffffe0000000ULL, };
EXPORT_SYMBOL(fw_high_memory_region);
+static const struct fw_address_region low_memory_region =
+ { .start = 0x000000000000ULL, .end = FW_MAX_PHYSICAL_RANGE, };
+
#if 0
-const struct fw_address_region fw_low_memory_region =
- { .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
const struct fw_address_region fw_private_region =
{ .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
const struct fw_address_region fw_csr_region =
@@ -532,6 +556,7 @@ static bool is_in_fcp_region(u64 offset, size_t length)
* the specified callback is invoked. The parameters passed to the callback
* give the details of the particular request.
*
+ * To be called in process context.
* Return value: 0 on success, non-zero otherwise.
*
* The start offset of the handler's address region is determined by
@@ -543,7 +568,6 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
const struct fw_address_region *region)
{
struct fw_address_handler *other;
- unsigned long flags;
int ret = -EBUSY;
if (region->start & 0xffff000000000003ULL ||
@@ -553,7 +577,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
handler->length == 0)
return -EINVAL;
- spin_lock_irqsave(&address_handler_lock, flags);
+ spin_lock(&address_handler_list_lock);
handler->offset = region->start;
while (handler->offset + handler->length <= region->end) {
@@ -566,13 +590,13 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
if (other != NULL) {
handler->offset += other->length;
} else {
- list_add_tail(&handler->link, &address_handler_list);
+ list_add_tail_rcu(&handler->link, &address_handler_list);
ret = 0;
break;
}
}
- spin_unlock_irqrestore(&address_handler_lock, flags);
+ spin_unlock(&address_handler_list_lock);
return ret;
}
@@ -580,14 +604,18 @@ EXPORT_SYMBOL(fw_core_add_address_handler);
/**
* fw_core_remove_address_handler() - unregister an address handler
+ *
+ * To be called in process context.
+ *
+ * When fw_core_remove_address_handler() returns, @handler->callback() is
+ * guaranteed to not run on any CPU anymore.
*/
void fw_core_remove_address_handler(struct fw_address_handler *handler)
{
- unsigned long flags;
-
- spin_lock_irqsave(&address_handler_lock, flags);
- list_del(&handler->link);
- spin_unlock_irqrestore(&address_handler_lock, flags);
+ spin_lock(&address_handler_list_lock);
+ list_del_rcu(&handler->link);
+ spin_unlock(&address_handler_list_lock);
+ synchronize_rcu();
}
EXPORT_SYMBOL(fw_core_remove_address_handler);
@@ -638,7 +666,7 @@ int fw_get_response_length(struct fw_request *r)
}
default:
- WARN(1, "wrong tcode %d", tcode);
+ WARN(1, "wrong tcode %d\n", tcode);
return 0;
}
}
@@ -694,7 +722,7 @@ void fw_fill_response(struct fw_packet *response, u32 *request_header,
break;
default:
- WARN(1, "wrong tcode %d", tcode);
+ WARN(1, "wrong tcode %d\n", tcode);
}
response->payload_mapped = false;
@@ -748,7 +776,7 @@ static struct fw_request *allocate_request(struct fw_card *card,
break;
default:
- fw_error("ERROR - corrupt request received - %08x %08x %08x\n",
+ fw_notice(card, "ERROR - corrupt request received - %08x %08x %08x\n",
p->header[0], p->header[1], p->header[2]);
return NULL;
}
@@ -798,13 +826,21 @@ void fw_send_response(struct fw_card *card,
}
EXPORT_SYMBOL(fw_send_response);
+/**
+ * fw_get_request_speed() - returns speed at which the @request was received
+ */
+int fw_get_request_speed(struct fw_request *request)
+{
+ return request->response.speed;
+}
+EXPORT_SYMBOL(fw_get_request_speed);
+
static void handle_exclusive_region_request(struct fw_card *card,
struct fw_packet *p,
struct fw_request *request,
unsigned long long offset)
{
struct fw_address_handler *handler;
- unsigned long flags;
int tcode, destination, source;
destination = HEADER_GET_DESTINATION(p->header[0]);
@@ -813,27 +849,19 @@ static void handle_exclusive_region_request(struct fw_card *card,
if (tcode == TCODE_LOCK_REQUEST)
tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]);
- spin_lock_irqsave(&address_handler_lock, flags);
+ rcu_read_lock();
handler = lookup_enclosing_address_handler(&address_handler_list,
offset, request->length);
- spin_unlock_irqrestore(&address_handler_lock, flags);
-
- /*
- * FIXME: lookup the fw_node corresponding to the sender of
- * this request and pass that to the address handler instead
- * of the node ID. We may also want to move the address
- * allocations to fw_node so we only do this callback if the
- * upper layers registered it for this node.
- */
-
- if (handler == NULL)
- fw_send_response(card, request, RCODE_ADDRESS_ERROR);
- else
+ if (handler)
handler->address_callback(card, request,
tcode, destination, source,
p->generation, offset,
request->data, request->length,
handler->callback_data);
+ rcu_read_unlock();
+
+ if (!handler)
+ fw_send_response(card, request, RCODE_ADDRESS_ERROR);
}
static void handle_fcp_region_request(struct fw_card *card,
@@ -842,7 +870,6 @@ static void handle_fcp_region_request(struct fw_card *card,
unsigned long long offset)
{
struct fw_address_handler *handler;
- unsigned long flags;
int tcode, destination, source;
if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
@@ -864,8 +891,8 @@ static void handle_fcp_region_request(struct fw_card *card,
return;
}
- spin_lock_irqsave(&address_handler_lock, flags);
- list_for_each_entry(handler, &address_handler_list, link) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(handler, &address_handler_list, link) {
if (is_enclosing_handler(handler, offset, request->length))
handler->address_callback(card, NULL, tcode,
destination, source,
@@ -874,7 +901,7 @@ static void handle_fcp_region_request(struct fw_card *card,
request->length,
handler->callback_data);
}
- spin_unlock_irqrestore(&address_handler_lock, flags);
+ rcu_read_unlock();
fw_send_response(card, request, RCODE_COMPLETE);
}
@@ -925,7 +952,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t->node_id == source && t->tlabel == tlabel) {
- if (!del_timer(&t->split_timeout_timer)) {
+ if (!try_cancel_split_timeout(t)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
@@ -938,7 +965,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
if (&t->link == &card->transaction_list) {
timed_out:
- fw_notify("Unsolicited response (source %x, tlabel %x)\n",
+ fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
source, tlabel);
return;
}
@@ -982,6 +1009,32 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
}
EXPORT_SYMBOL(fw_core_handle_response);
+/**
+ * fw_rcode_string - convert a firewire result code to an error description
+ * @rcode: the result code
+ */
+const char *fw_rcode_string(int rcode)
+{
+ static const char *const names[] = {
+ [RCODE_COMPLETE] = "no error",
+ [RCODE_CONFLICT_ERROR] = "conflict error",
+ [RCODE_DATA_ERROR] = "data error",
+ [RCODE_TYPE_ERROR] = "type error",
+ [RCODE_ADDRESS_ERROR] = "address error",
+ [RCODE_SEND_ERROR] = "send error",
+ [RCODE_CANCELLED] = "timeout",
+ [RCODE_BUSY] = "busy",
+ [RCODE_GENERATION] = "bus reset",
+ [RCODE_NO_ACK] = "no ack",
+ };
+
+ if ((unsigned int)rcode < ARRAY_SIZE(names) && names[rcode])
+ return names[rcode];
+ else
+ return "unknown";
+}
+EXPORT_SYMBOL(fw_rcode_string);
+
static const struct fw_address_region topology_map_region =
{ .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
.end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
@@ -1024,8 +1077,8 @@ static void update_split_timeout(struct fw_card *card)
cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
- cycles = max(cycles, 800u); /* minimum as per the spec */
- cycles = min(cycles, 3u * 8000u); /* maximum OHCI timeout */
+ /* minimum per IEEE 1394, maximum which doesn't overflow OHCI */
+ cycles = clamp(cycles, 800u, 3u * 8000u);
card->split_timeout_cycles = cycles;
card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
@@ -1151,6 +1204,23 @@ static struct fw_address_handler registers = {
.address_callback = handle_registers,
};
+static void handle_low_memory(struct fw_card *card, struct fw_request *request,
+ int tcode, int destination, int source, int generation,
+ unsigned long long offset, void *payload, size_t length,
+ void *callback_data)
+{
+ /*
+ * This catches requests not handled by the physical DMA unit,
+ * i.e., wrong transaction types or unauthorized source nodes.
+ */
+ fw_send_response(card, request, RCODE_TYPE_ERROR);
+}
+
+static struct fw_address_handler low_memory = {
+ .length = FW_MAX_PHYSICAL_RANGE,
+ .address_callback = handle_low_memory,
+};
+
MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
MODULE_LICENSE("GPL");
@@ -1192,18 +1262,26 @@ static int __init fw_core_init(void)
{
int ret;
+ fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM, 0);
+ if (!fw_workqueue)
+ return -ENOMEM;
+
ret = bus_register(&fw_bus_type);
- if (ret < 0)
+ if (ret < 0) {
+ destroy_workqueue(fw_workqueue);
return ret;
+ }
fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
if (fw_cdev_major < 0) {
bus_unregister(&fw_bus_type);
+ destroy_workqueue(fw_workqueue);
return fw_cdev_major;
}
fw_core_add_address_handler(&topology_map, &topology_map_region);
fw_core_add_address_handler(&registers, &registers_region);
+ fw_core_add_address_handler(&low_memory, &low_memory_region);
fw_core_add_descriptor(&vendor_id_descriptor);
fw_core_add_descriptor(&model_id_descriptor);
@@ -1214,6 +1292,7 @@ static void __exit fw_core_cleanup(void)
{
unregister_chrdev(fw_cdev_major, "firewire");
bus_unregister(&fw_bus_type);
+ destroy_workqueue(fw_workqueue);
idr_destroy(&fw_device_idr);
}