Commit 8326f5e1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'firewire-updates-6.11' of...

Merge tag 'firewire-updates-6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394

Pull firewire updates from Takashi Sakamoto:
 "There are many lines of changes for FireWire subsystem, but there is
  practically no functional change.

  Most of the changes are for code refactoring, some KUnit tests to
  added helper functions, and new tracepoints events for both the core
  functions and 1394 OHCI driver.

  The tracepoints events now cover the verbose logging enabled by debug
  parameter of firewire-ohci kernel module. The parameter would be
  removed in any future timing, thus it is now deprecated"

* tag 'firewire-updates-6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394: (32 commits)
  firewire: core: move copy_port_status() helper function to TP_fast_assign() block
  Revert "firewire: ohci: use common macro to interpret be32 data in le32 buffer"
  firewire: ohci: add tracepoints event for data of Self-ID DMA
  firewire: ohci: use inline functions to operate data of self-ID DMA
  firewire: ohci: add static inline functions to deserialize for Self-ID DMA operation
  firewire: ohci: use static function to handle endian issue on PowerPC platform
  firewire: ohci: use common macro to interpret be32 data in le32 buffer
  firewire: core: Fix spelling mistakes in tracepoint messages
  firewire: ohci: add tracepoints event for hardIRQ event
  firewire: ohci: add support for Linux kernel tracepoints
  firewire: core: add tracepoints events for completions of packets in isochronous context
  firewire: core: add tracepoints events for queueing packets of isochronous context
  firewire: core: add tracepoints events for flushing completions of isochronous context
  firewire: core: add tracepoints events for flushing of isochronous context
  firewire: core: add tracepoints events for starting/stopping of isochronous context
  firewire: core: add tracepoints events for setting channels of multichannel context
  firewire: core: add tracepoints events for allocation/deallocation of isochronous context
  firewire: core: undefine macros after use in tracepoints events
  firewire: core: record card index in tracepoints event for self ID sequence
  firewire: core: use inline helper functions to serialize phy config packet
  ...
parents 13a78715 06dcc4c9
...@@ -4,3 +4,5 @@ CONFIG_FIREWIRE=y ...@@ -4,3 +4,5 @@ CONFIG_FIREWIRE=y
CONFIG_FIREWIRE_KUNIT_UAPI_TEST=y CONFIG_FIREWIRE_KUNIT_UAPI_TEST=y
CONFIG_FIREWIRE_KUNIT_DEVICE_ATTRIBUTE_TEST=y CONFIG_FIREWIRE_KUNIT_DEVICE_ATTRIBUTE_TEST=y
CONFIG_FIREWIRE_KUNIT_PACKET_SERDES_TEST=y CONFIG_FIREWIRE_KUNIT_PACKET_SERDES_TEST=y
CONFIG_FIREWIRE_KUNIT_SELF_ID_SEQUENCE_HELPER_TEST=y
CONFIG_FIREWIRE_KUNIT_OHCI_SERDES_TEST=y
...@@ -66,6 +66,21 @@ config FIREWIRE_KUNIT_PACKET_SERDES_TEST ...@@ -66,6 +66,21 @@ config FIREWIRE_KUNIT_PACKET_SERDES_TEST
For more information on KUnit and unit tests in general, refer For more information on KUnit and unit tests in general, refer
to the KUnit documentation in Documentation/dev-tools/kunit/. to the KUnit documentation in Documentation/dev-tools/kunit/.
config FIREWIRE_KUNIT_SELF_ID_SEQUENCE_HELPER_TEST
tristate "KUnit tests for helpers of self ID sequence" if !KUNIT_ALL_TESTS
depends on FIREWIRE && KUNIT
default KUNIT_ALL_TESTS
help
This builds the KUnit tests for helpers of self ID sequence.
KUnit tests run during boot and output the results to the debug
log in TAP format (https://testanything.org/). Only useful for
kernel devs running KUnit test harness and are not for inclusion
into a production build.
For more information on KUnit and unit tests in general, refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
config FIREWIRE_OHCI config FIREWIRE_OHCI
tristate "OHCI-1394 controllers" tristate "OHCI-1394 controllers"
depends on PCI && FIREWIRE && MMU depends on PCI && FIREWIRE && MMU
...@@ -77,6 +92,22 @@ config FIREWIRE_OHCI ...@@ -77,6 +92,22 @@ config FIREWIRE_OHCI
To compile this driver as a module, say M here: The module will be To compile this driver as a module, say M here: The module will be
called firewire-ohci. called firewire-ohci.
config FIREWIRE_KUNIT_OHCI_SERDES_TEST
tristate "KUnit tests for serialization/deserialization of data in buffers/registers" if !KUNIT_ALL_TESTS
depends on FIREWIRE && KUNIT
default KUNIT_ALL_TESTS
help
This builds the KUnit tests to check serialization and deserialization
of data in buffers and registers defined in 1394 OHCI specification.
KUnit tests run during boot and output the results to the debug
log in TAP format (https://testanything.org/). Only useful for
kernel devs running KUnit test harness and are not for inclusion
into a production build.
For more information on KUnit and unit tests in general, refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
config FIREWIRE_SBP2 config FIREWIRE_SBP2
tristate "Storage devices (SBP-2 protocol)" tristate "Storage devices (SBP-2 protocol)"
depends on FIREWIRE && SCSI depends on FIREWIRE && SCSI
......
...@@ -18,3 +18,5 @@ obj-$(CONFIG_PROVIDE_OHCI1394_DMA_INIT) += init_ohci1394_dma.o ...@@ -18,3 +18,5 @@ obj-$(CONFIG_PROVIDE_OHCI1394_DMA_INIT) += init_ohci1394_dma.o
obj-$(CONFIG_FIREWIRE_KUNIT_UAPI_TEST) += uapi-test.o obj-$(CONFIG_FIREWIRE_KUNIT_UAPI_TEST) += uapi-test.o
obj-$(CONFIG_FIREWIRE_KUNIT_PACKET_SERDES_TEST) += packet-serdes-test.o obj-$(CONFIG_FIREWIRE_KUNIT_PACKET_SERDES_TEST) += packet-serdes-test.o
obj-$(CONFIG_FIREWIRE_KUNIT_SELF_ID_SEQUENCE_HELPER_TEST) += self-id-sequence-helper-test.o
obj-$(CONFIG_FIREWIRE_KUNIT_OHCI_SERDES_TEST) += ohci-serdes-test.o
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
#include "core.h" #include "core.h"
#include <trace/events/firewire.h>
/* /*
* Isochronous DMA context management * Isochronous DMA context management
*/ */
...@@ -148,12 +150,20 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card, ...@@ -148,12 +150,20 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
ctx->callback.sc = callback; ctx->callback.sc = callback;
ctx->callback_data = callback_data; ctx->callback_data = callback_data;
trace_isoc_outbound_allocate(ctx, channel, speed);
trace_isoc_inbound_single_allocate(ctx, channel, header_size);
trace_isoc_inbound_multiple_allocate(ctx);
return ctx; return ctx;
} }
EXPORT_SYMBOL(fw_iso_context_create); EXPORT_SYMBOL(fw_iso_context_create);
void fw_iso_context_destroy(struct fw_iso_context *ctx) void fw_iso_context_destroy(struct fw_iso_context *ctx)
{ {
trace_isoc_outbound_destroy(ctx);
trace_isoc_inbound_single_destroy(ctx);
trace_isoc_inbound_multiple_destroy(ctx);
ctx->card->driver->free_iso_context(ctx); ctx->card->driver->free_iso_context(ctx);
} }
EXPORT_SYMBOL(fw_iso_context_destroy); EXPORT_SYMBOL(fw_iso_context_destroy);
...@@ -161,12 +171,18 @@ EXPORT_SYMBOL(fw_iso_context_destroy); ...@@ -161,12 +171,18 @@ EXPORT_SYMBOL(fw_iso_context_destroy);
int fw_iso_context_start(struct fw_iso_context *ctx, int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags) int cycle, int sync, int tags)
{ {
trace_isoc_outbound_start(ctx, cycle);
trace_isoc_inbound_single_start(ctx, cycle, sync, tags);
trace_isoc_inbound_multiple_start(ctx, cycle, sync, tags);
return ctx->card->driver->start_iso(ctx, cycle, sync, tags); return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
} }
EXPORT_SYMBOL(fw_iso_context_start); EXPORT_SYMBOL(fw_iso_context_start);
int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels) int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels)
{ {
trace_isoc_inbound_multiple_channels(ctx, *channels);
return ctx->card->driver->set_iso_channels(ctx, channels); return ctx->card->driver->set_iso_channels(ctx, channels);
} }
...@@ -175,24 +191,40 @@ int fw_iso_context_queue(struct fw_iso_context *ctx, ...@@ -175,24 +191,40 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_buffer *buffer, struct fw_iso_buffer *buffer,
unsigned long payload) unsigned long payload)
{ {
trace_isoc_outbound_queue(ctx, payload, packet);
trace_isoc_inbound_single_queue(ctx, payload, packet);
trace_isoc_inbound_multiple_queue(ctx, payload, packet);
return ctx->card->driver->queue_iso(ctx, packet, buffer, payload); return ctx->card->driver->queue_iso(ctx, packet, buffer, payload);
} }
EXPORT_SYMBOL(fw_iso_context_queue); EXPORT_SYMBOL(fw_iso_context_queue);
void fw_iso_context_queue_flush(struct fw_iso_context *ctx) void fw_iso_context_queue_flush(struct fw_iso_context *ctx)
{ {
trace_isoc_outbound_flush(ctx);
trace_isoc_inbound_single_flush(ctx);
trace_isoc_inbound_multiple_flush(ctx);
ctx->card->driver->flush_queue_iso(ctx); ctx->card->driver->flush_queue_iso(ctx);
} }
EXPORT_SYMBOL(fw_iso_context_queue_flush); EXPORT_SYMBOL(fw_iso_context_queue_flush);
int fw_iso_context_flush_completions(struct fw_iso_context *ctx) int fw_iso_context_flush_completions(struct fw_iso_context *ctx)
{ {
trace_isoc_outbound_flush_completions(ctx);
trace_isoc_inbound_single_flush_completions(ctx);
trace_isoc_inbound_multiple_flush_completions(ctx);
return ctx->card->driver->flush_iso_completions(ctx); return ctx->card->driver->flush_iso_completions(ctx);
} }
EXPORT_SYMBOL(fw_iso_context_flush_completions); EXPORT_SYMBOL(fw_iso_context_flush_completions);
int fw_iso_context_stop(struct fw_iso_context *ctx) int fw_iso_context_stop(struct fw_iso_context *ctx)
{ {
trace_isoc_outbound_stop(ctx);
trace_isoc_inbound_single_stop(ctx);
trace_isoc_inbound_multiple_stop(ctx);
return ctx->card->driver->stop_iso(ctx); return ctx->card->driver->stop_iso(ctx);
} }
EXPORT_SYMBOL(fw_iso_context_stop); EXPORT_SYMBOL(fw_iso_context_stop);
......
...@@ -20,84 +20,9 @@ ...@@ -20,84 +20,9 @@
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include "core.h" #include "core.h"
#include "phy-packet-definitions.h"
#include <trace/events/firewire.h> #include <trace/events/firewire.h>
#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
#define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
#define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
#define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
#define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
#define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
#define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
#define SELFID_PORT_CHILD 0x3
#define SELFID_PORT_PARENT 0x2
#define SELFID_PORT_NCONN 0x1
#define SELFID_PORT_NONE 0x0
static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
{
u32 q;
int port_type, shift, seq;
*total_port_count = 0;
*child_port_count = 0;
shift = 6;
q = *sid;
seq = 0;
while (1) {
port_type = (q >> shift) & 0x03;
switch (port_type) {
case SELFID_PORT_CHILD:
(*child_port_count)++;
fallthrough;
case SELFID_PORT_PARENT:
case SELFID_PORT_NCONN:
(*total_port_count)++;
fallthrough;
case SELFID_PORT_NONE:
break;
}
shift -= 2;
if (shift == 0) {
if (!SELF_ID_MORE_PACKETS(q))
return sid + 1;
shift = 16;
sid++;
q = *sid;
/*
* Check that the extra packets actually are
* extended self ID packets and that the
* sequence numbers in the extended self ID
* packets increase as expected.
*/
if (!SELF_ID_EXTENDED(q) ||
seq != SELF_ID_EXT_SEQUENCE(q))
return NULL;
seq++;
}
}
}
static int get_port_type(u32 *sid, int port_index)
{
int index, shift;
index = (port_index + 5) / 8;
shift = 16 - ((port_index + 5) & 7) * 2;
return (sid[index] >> shift) & 0x03;
}
static struct fw_node *fw_node_create(u32 sid, int port_count, int color) static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
{ {
struct fw_node *node; struct fw_node *node;
...@@ -107,10 +32,11 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color) ...@@ -107,10 +32,11 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
return NULL; return NULL;
node->color = color; node->color = color;
node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid); node->node_id = LOCAL_BUS | phy_packet_self_id_get_phy_id(sid);
node->link_on = SELF_ID_LINK_ON(sid); node->link_on = phy_packet_self_id_zero_get_link_active(sid);
node->phy_speed = SELF_ID_PHY_SPEED(sid); // NOTE: Only two bits, thus only for SCODE_100, SCODE_200, SCODE_400, and SCODE_BETA.
node->initiated_reset = SELF_ID_PHY_INITIATOR(sid); node->phy_speed = phy_packet_self_id_zero_get_scode(sid);
node->initiated_reset = phy_packet_self_id_zero_get_initiated_reset(sid);
node->port_count = port_count; node->port_count = port_count;
refcount_set(&node->ref_count, 1); refcount_set(&node->ref_count, 1);
...@@ -169,13 +95,16 @@ static inline struct fw_node *fw_node(struct list_head *l) ...@@ -169,13 +95,16 @@ static inline struct fw_node *fw_node(struct list_head *l)
* internally consistent. On success this function returns the * internally consistent. On success this function returns the
* fw_node corresponding to the local card otherwise NULL. * fw_node corresponding to the local card otherwise NULL.
*/ */
static struct fw_node *build_tree(struct fw_card *card, static struct fw_node *build_tree(struct fw_card *card, const u32 *sid, int self_id_count,
u32 *sid, int self_id_count) unsigned int generation)
{ {
struct self_id_sequence_enumerator enumerator = {
.cursor = sid,
.quadlet_count = self_id_count,
};
struct fw_node *node, *child, *local_node, *irm_node; struct fw_node *node, *child, *local_node, *irm_node;
struct list_head stack, *h; struct list_head stack;
u32 *next_sid, *end, q; int phy_id, stack_depth;
int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
int gap_count; int gap_count;
bool beta_repeaters_present; bool beta_repeaters_present;
...@@ -183,24 +112,56 @@ static struct fw_node *build_tree(struct fw_card *card, ...@@ -183,24 +112,56 @@ static struct fw_node *build_tree(struct fw_card *card,
node = NULL; node = NULL;
INIT_LIST_HEAD(&stack); INIT_LIST_HEAD(&stack);
stack_depth = 0; stack_depth = 0;
end = sid + self_id_count;
phy_id = 0; phy_id = 0;
irm_node = NULL; irm_node = NULL;
gap_count = SELF_ID_GAP_COUNT(*sid); gap_count = phy_packet_self_id_zero_get_gap_count(*sid);
beta_repeaters_present = false; beta_repeaters_present = false;
while (sid < end) { while (enumerator.quadlet_count > 0) {
next_sid = count_ports(sid, &port_count, &child_port_count); unsigned int child_port_count = 0;
unsigned int total_port_count = 0;
unsigned int parent_count = 0;
unsigned int quadlet_count;
const u32 *self_id_sequence;
unsigned int port_capacity;
enum phy_packet_self_id_port_status port_status;
unsigned int port_index;
struct list_head *h;
int i;
self_id_sequence = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
if (IS_ERR(self_id_sequence)) {
if (PTR_ERR(self_id_sequence) != -ENODATA) {
fw_err(card, "inconsistent extended self IDs: %ld\n",
PTR_ERR(self_id_sequence));
return NULL;
}
break;
}
if (next_sid == NULL) { port_capacity = self_id_sequence_get_port_capacity(quadlet_count);
fw_err(card, "inconsistent extended self IDs\n"); trace_self_id_sequence(card->index, self_id_sequence, quadlet_count, generation);
return NULL;
for (port_index = 0; port_index < port_capacity; ++port_index) {
port_status = self_id_sequence_get_port_status(self_id_sequence, quadlet_count,
port_index);
switch (port_status) {
case PHY_PACKET_SELF_ID_PORT_STATUS_CHILD:
++child_port_count;
fallthrough;
case PHY_PACKET_SELF_ID_PORT_STATUS_PARENT:
case PHY_PACKET_SELF_ID_PORT_STATUS_NCONN:
++total_port_count;
fallthrough;
case PHY_PACKET_SELF_ID_PORT_STATUS_NONE:
default:
break;
}
} }
q = *sid; if (phy_id != phy_packet_self_id_get_phy_id(self_id_sequence[0])) {
if (phy_id != SELF_ID_PHY_ID(q)) {
fw_err(card, "PHY ID mismatch in self ID: %d != %d\n", fw_err(card, "PHY ID mismatch in self ID: %d != %d\n",
phy_id, SELF_ID_PHY_ID(q)); phy_id, phy_packet_self_id_get_phy_id(self_id_sequence[0]));
return NULL; return NULL;
} }
...@@ -221,7 +182,7 @@ static struct fw_node *build_tree(struct fw_card *card, ...@@ -221,7 +182,7 @@ static struct fw_node *build_tree(struct fw_card *card,
*/ */
child = fw_node(h); child = fw_node(h);
node = fw_node_create(q, port_count, card->color); node = fw_node_create(self_id_sequence[0], total_port_count, card->color);
if (node == NULL) { if (node == NULL) {
fw_err(card, "out of memory while building topology\n"); fw_err(card, "out of memory while building topology\n");
return NULL; return NULL;
...@@ -230,48 +191,40 @@ static struct fw_node *build_tree(struct fw_card *card, ...@@ -230,48 +191,40 @@ static struct fw_node *build_tree(struct fw_card *card,
if (phy_id == (card->node_id & 0x3f)) if (phy_id == (card->node_id & 0x3f))
local_node = node; local_node = node;
if (SELF_ID_CONTENDER(q)) if (phy_packet_self_id_zero_get_contender(self_id_sequence[0]))
irm_node = node; irm_node = node;
parent_count = 0; for (port_index = 0; port_index < total_port_count; ++port_index) {
port_status = self_id_sequence_get_port_status(self_id_sequence, quadlet_count,
for (i = 0; i < port_count; i++) { port_index);
switch (get_port_type(sid, i)) { switch (port_status) {
case SELFID_PORT_PARENT: case PHY_PACKET_SELF_ID_PORT_STATUS_PARENT:
/* // Who's your daddy? We dont know the parent node at this time, so
* Who's your daddy? We dont know the // we temporarily abuse node->color for remembering the entry in
* parent node at this time, so we // the node->ports array where the parent node should be. Later,
* temporarily abuse node->color for // when we handle the parent node, we fix up the reference.
* remembering the entry in the ++parent_count;
* node->ports array where the parent
* node should be. Later, when we
* handle the parent node, we fix up
* the reference.
*/
parent_count++;
node->color = i; node->color = i;
break; break;
case SELFID_PORT_CHILD: case PHY_PACKET_SELF_ID_PORT_STATUS_CHILD:
node->ports[i] = child; node->ports[port_index] = child;
/* // Fix up parent reference for this child node.
* Fix up parent reference for this
* child node.
*/
child->ports[child->color] = node; child->ports[child->color] = node;
child->color = card->color; child->color = card->color;
child = fw_node(child->link.next); child = fw_node(child->link.next);
break; break;
case PHY_PACKET_SELF_ID_PORT_STATUS_NCONN:
case PHY_PACKET_SELF_ID_PORT_STATUS_NONE:
default:
break;
} }
} }
/* // Check that the node reports exactly one parent port, except for the root, which
* Check that the node reports exactly one parent // of course should have no parents.
* port, except for the root, which of course should if ((enumerator.quadlet_count == 0 && parent_count != 0) ||
* have no parents. (enumerator.quadlet_count > 0 && parent_count != 1)) {
*/
if ((next_sid == end && parent_count != 0) ||
(next_sid < end && parent_count != 1)) {
fw_err(card, "parent port inconsistency for node %d: " fw_err(card, "parent port inconsistency for node %d: "
"parent_count=%d\n", phy_id, parent_count); "parent_count=%d\n", phy_id, parent_count);
return NULL; return NULL;
...@@ -282,20 +235,16 @@ static struct fw_node *build_tree(struct fw_card *card, ...@@ -282,20 +235,16 @@ static struct fw_node *build_tree(struct fw_card *card,
list_add_tail(&node->link, &stack); list_add_tail(&node->link, &stack);
stack_depth += 1 - child_port_count; stack_depth += 1 - child_port_count;
if (node->phy_speed == SCODE_BETA && if (node->phy_speed == SCODE_BETA && parent_count + child_port_count > 1)
parent_count + child_port_count > 1)
beta_repeaters_present = true; beta_repeaters_present = true;
/* // If PHYs report different gap counts, set an invalid count which will force a gap
* If PHYs report different gap counts, set an invalid count // count reconfiguration and a reset.
* which will force a gap count reconfiguration and a reset. if (phy_packet_self_id_zero_get_gap_count(self_id_sequence[0]) != gap_count)
*/
if (SELF_ID_GAP_COUNT(q) != gap_count)
gap_count = 0; gap_count = 0;
update_hop_count(node); update_hop_count(node);
sid = next_sid;
phy_id++; phy_id++;
} }
...@@ -536,7 +485,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, ...@@ -536,7 +485,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
card->bm_abdicate = bm_abdicate; card->bm_abdicate = bm_abdicate;
fw_schedule_bm_work(card, 0); fw_schedule_bm_work(card, 0);
local_node = build_tree(card, self_ids, self_id_count); local_node = build_tree(card, self_ids, self_id_count, generation);
update_topology_map(card, self_ids, self_id_count); update_topology_map(card, self_ids, self_id_count);
......
// SPDX-License-Identifier: GPL-2.0-or-later // SPDX-License-Identifier: GPL-2.0-or-later
// Copyright (c) 2024 Takashi Sakamoto // Copyright (c) 2024 Takashi Sakamoto
#include <linux/types.h>
#include <linux/err.h>
#include "packet-header-definitions.h"
#include "phy-packet-definitions.h"
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/firewire.h> #include <trace/events/firewire.h>
#ifdef TRACEPOINTS_ENABLED
EXPORT_TRACEPOINT_SYMBOL_GPL(isoc_inbound_single_completions);
EXPORT_TRACEPOINT_SYMBOL_GPL(isoc_inbound_multiple_completions);
EXPORT_TRACEPOINT_SYMBOL_GPL(isoc_outbound_completions);
#endif
...@@ -29,20 +29,13 @@ ...@@ -29,20 +29,13 @@
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include "core.h" #include "core.h"
#include <trace/events/firewire.h>
#include "packet-header-definitions.h" #include "packet-header-definitions.h"
#include "phy-packet-definitions.h"
#include <trace/events/firewire.h>
#define HEADER_DESTINATION_IS_BROADCAST(header) \ #define HEADER_DESTINATION_IS_BROADCAST(header) \
((async_header_get_destination(header) & 0x3f) == 0x3f) ((async_header_get_destination(header) & 0x3f) == 0x3f)
#define PHY_PACKET_CONFIG 0x0
#define PHY_PACKET_LINK_ON 0x1
#define PHY_PACKET_SELF_ID 0x2
#define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22))
#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
#define PHY_IDENTIFIER(id) ((id) << 30)
/* returns 0 if the split timeout handler is already running */ /* returns 0 if the split timeout handler is already running */
static int try_cancel_split_timeout(struct fw_transaction *t) static int try_cancel_split_timeout(struct fw_transaction *t)
{ {
...@@ -481,10 +474,14 @@ void fw_send_phy_config(struct fw_card *card, ...@@ -481,10 +474,14 @@ void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count) int node_id, int generation, int gap_count)
{ {
long timeout = DIV_ROUND_UP(HZ, 10); long timeout = DIV_ROUND_UP(HZ, 10);
u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG); u32 data = 0;
phy_packet_set_packet_identifier(&data, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG);
if (node_id != FW_PHY_CONFIG_NO_NODE_ID) if (node_id != FW_PHY_CONFIG_NO_NODE_ID) {
data |= PHY_CONFIG_ROOT_ID(node_id); phy_packet_phy_config_set_root_id(&data, node_id);
phy_packet_phy_config_set_force_root_node(&data, true);
}
if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) { if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) {
gap_count = card->driver->read_phy_reg(card, 1); gap_count = card->driver->read_phy_reg(card, 1);
...@@ -495,7 +492,8 @@ void fw_send_phy_config(struct fw_card *card, ...@@ -495,7 +492,8 @@ void fw_send_phy_config(struct fw_card *card,
if (gap_count == 63) if (gap_count == 63)
return; return;
} }
data |= PHY_CONFIG_GAP_COUNT(gap_count); phy_packet_phy_config_set_gap_count(&data, gap_count);
phy_packet_phy_config_set_gap_count_optimization(&data, true);
mutex_lock(&phy_config_mutex); mutex_lock(&phy_config_mutex);
......
// SPDX-License-Identifier: GPL-2.0-or-later
//
// ohci-serdes-test.c - An application of Kunit to check serialization/deserialization of data in
// buffers and registers defined in 1394 OHCI specification.
//
// Copyright (c) 2024 Takashi Sakamoto
#include <kunit/test.h>
#include "ohci.h"
static void test_self_id_count_register_deserialization(struct kunit *test)
{
const u32 expected = 0x803d0594;
bool is_error = ohci1394_self_id_count_is_error(expected);
u8 generation = ohci1394_self_id_count_get_generation(expected);
u32 size = ohci1394_self_id_count_get_size(expected);
KUNIT_EXPECT_TRUE(test, is_error);
KUNIT_EXPECT_EQ(test, 0x3d, generation);
KUNIT_EXPECT_EQ(test, 0x165, size);
}
static void test_self_id_receive_buffer_deserialization(struct kunit *test)
{
const u32 buffer[] = {
0x0006f38b,
0x807fcc56,
0x7f8033a9,
0x8145cc5e,
0x7eba33a1,
};
u8 generation = ohci1394_self_id_receive_q0_get_generation(buffer[0]);
u16 timestamp = ohci1394_self_id_receive_q0_get_timestamp(buffer[0]);
KUNIT_EXPECT_EQ(test, 0x6, generation);
KUNIT_EXPECT_EQ(test, 0xf38b, timestamp);
}
static struct kunit_case ohci_serdes_test_cases[] = {
KUNIT_CASE(test_self_id_count_register_deserialization),
KUNIT_CASE(test_self_id_receive_buffer_deserialization),
{}
};
static struct kunit_suite ohci_serdes_test_suite = {
.name = "firewire-ohci-serdes",
.test_cases = ohci_serdes_test_cases,
};
kunit_test_suite(ohci_serdes_test_suite);
MODULE_DESCRIPTION("FireWire buffers and registers serialization/deserialization unit test suite");
MODULE_LICENSE("GPL");
This diff is collapsed.
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
#define OHCI1394_HCControl_softReset 0x00010000 #define OHCI1394_HCControl_softReset 0x00010000
#define OHCI1394_SelfIDBuffer 0x064 #define OHCI1394_SelfIDBuffer 0x064
#define OHCI1394_SelfIDCount 0x068 #define OHCI1394_SelfIDCount 0x068
#define OHCI1394_SelfIDCount_selfIDError 0x80000000
#define OHCI1394_IRMultiChanMaskHiSet 0x070 #define OHCI1394_IRMultiChanMaskHiSet 0x070
#define OHCI1394_IRMultiChanMaskHiClear 0x074 #define OHCI1394_IRMultiChanMaskHiClear 0x074
#define OHCI1394_IRMultiChanMaskLoSet 0x078 #define OHCI1394_IRMultiChanMaskLoSet 0x078
...@@ -156,4 +155,46 @@ ...@@ -156,4 +155,46 @@
#define OHCI1394_phy_tcode 0xe #define OHCI1394_phy_tcode 0xe
// Self-ID DMA.
#define OHCI1394_SelfIDCount_selfIDError_MASK 0x80000000
#define OHCI1394_SelfIDCount_selfIDError_SHIFT 31
#define OHCI1394_SelfIDCount_selfIDGeneration_MASK 0x00ff0000
#define OHCI1394_SelfIDCount_selfIDGeneration_SHIFT 16
#define OHCI1394_SelfIDCount_selfIDSize_MASK 0x000007fc
#define OHCI1394_SelfIDCount_selfIDSize_SHIFT 2
static inline bool ohci1394_self_id_count_is_error(u32 value)
{
return !!((value & OHCI1394_SelfIDCount_selfIDError_MASK) >> OHCI1394_SelfIDCount_selfIDError_SHIFT);
}
static inline u8 ohci1394_self_id_count_get_generation(u32 value)
{
return (value & OHCI1394_SelfIDCount_selfIDGeneration_MASK) >> OHCI1394_SelfIDCount_selfIDGeneration_SHIFT;
}
// In 1394 OHCI specification, the maximum size of self ID stream is 504 quadlets
// (= 63 devices * 4 self ID packets * 2 quadlets). The selfIDSize field accommodates it and its
// additional first quadlet, since the field is 9 bits (0x1ff = 511).
static inline u32 ohci1394_self_id_count_get_size(u32 value)
{
return (value & OHCI1394_SelfIDCount_selfIDSize_MASK) >> OHCI1394_SelfIDCount_selfIDSize_SHIFT;
}
#define OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_MASK 0x00ff0000
#define OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_SHIFT 16
#define OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_MASK 0x0000ffff
#define OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_SHIFT 0
static inline u8 ohci1394_self_id_receive_q0_get_generation(u32 quadlet0)
{
return (quadlet0 & OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_MASK) >> OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_SHIFT;
}
static inline u16 ohci1394_self_id_receive_q0_get_timestamp(u32 quadlet0)
{
return (quadlet0 & OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_MASK) >> OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_SHIFT;
}
#endif /* _FIREWIRE_OHCI_H */ #endif /* _FIREWIRE_OHCI_H */
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
#ifndef _FIREWIRE_PACKET_HEADER_DEFINITIONS_H #ifndef _FIREWIRE_PACKET_HEADER_DEFINITIONS_H
#define _FIREWIRE_PACKET_HEADER_DEFINITIONS_H #define _FIREWIRE_PACKET_HEADER_DEFINITIONS_H
#include <linux/types.h>
#define ASYNC_HEADER_QUADLET_COUNT 4 #define ASYNC_HEADER_QUADLET_COUNT 4
#define ASYNC_HEADER_Q0_DESTINATION_SHIFT 16 #define ASYNC_HEADER_Q0_DESTINATION_SHIFT 16
......
This diff is collapsed.
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-or-later
//
// self-id-sequence-helper-test.c - An application of Kunit to test helpers of self ID sequence.
//
// Copyright (c) 2024 Takashi Sakamoto
#include <kunit/test.h>
#include "phy-packet-definitions.h"
static void test_self_id_sequence_enumerator_valid(struct kunit *test)
{
static const u32 valid_sequences[] = {
0x00000000,
0x00000001, 0x00800000,
0x00000001, 0x00800001, 0x00900000,
0x00000000,
};
struct self_id_sequence_enumerator enumerator;
const u32 *entry;
unsigned int quadlet_count;
enumerator.cursor = valid_sequences;
enumerator.quadlet_count = ARRAY_SIZE(valid_sequences);
entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
KUNIT_EXPECT_PTR_EQ(test, entry, &valid_sequences[0]);
KUNIT_EXPECT_EQ(test, quadlet_count, 1);
KUNIT_EXPECT_EQ(test, enumerator.quadlet_count, 6);
entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
KUNIT_EXPECT_PTR_EQ(test, entry, &valid_sequences[1]);
KUNIT_EXPECT_EQ(test, quadlet_count, 2);
KUNIT_EXPECT_EQ(test, enumerator.quadlet_count, 4);
entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
KUNIT_EXPECT_PTR_EQ(test, entry, &valid_sequences[3]);
KUNIT_EXPECT_EQ(test, quadlet_count, 3);
KUNIT_EXPECT_EQ(test, enumerator.quadlet_count, 1);
entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
KUNIT_EXPECT_PTR_EQ(test, entry, &valid_sequences[6]);
KUNIT_EXPECT_EQ(test, quadlet_count, 1);
KUNIT_EXPECT_EQ(test, enumerator.quadlet_count, 0);
entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
KUNIT_EXPECT_EQ(test, PTR_ERR(entry), -ENODATA);
}
static void test_self_id_sequence_enumerator_invalid(struct kunit *test)
{
static const u32 invalid_sequences[] = {
0x00000001,
};
struct self_id_sequence_enumerator enumerator;
const u32 *entry;
unsigned int count;
enumerator.cursor = invalid_sequences;
enumerator.quadlet_count = ARRAY_SIZE(invalid_sequences);
entry = self_id_sequence_enumerator_next(&enumerator, &count);
KUNIT_EXPECT_EQ(test, PTR_ERR(entry), -EPROTO);
}
static void test_self_id_sequence_get_port_status(struct kunit *test)
{
static const u32 expected[] = {
0x000000e5,
0x00839e79,
0x0091e79d,
0x00a279e4,
};
u32 quadlets [] = {
0x00000001,
0x00800001,
0x00900001,
0x00a00000,
};
enum phy_packet_self_id_port_status port_status[28];
unsigned int port_capacity;
unsigned int port_index;
KUNIT_ASSERT_EQ(test, ARRAY_SIZE(expected), ARRAY_SIZE(quadlets));
// With an extra port.
port_capacity = self_id_sequence_get_port_capacity(ARRAY_SIZE(expected)) + 1;
KUNIT_ASSERT_EQ(test, port_capacity, ARRAY_SIZE(port_status));
for (port_index = 0; port_index < port_capacity; ++port_index) {
port_status[port_index] =
self_id_sequence_get_port_status(expected, ARRAY_SIZE(expected), port_index);
self_id_sequence_set_port_status(quadlets, ARRAY_SIZE(quadlets), port_index,
port_status[port_index]);
}
// Self ID zero.
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[0]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[1]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[2]);
// Self ID one.
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[3]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[4]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[5]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[6]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[7]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[8]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[9]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[10]);
// Self ID two.
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[11]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[12]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[13]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[14]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[15]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[16]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[17]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[18]);
// Self ID three.
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[19]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[20]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[21]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[22]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[23]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[24]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[25]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[26]);
// Our of order.
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[27]);
KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected));
}
static struct kunit_case self_id_sequence_helper_test_cases[] = {
KUNIT_CASE(test_self_id_sequence_enumerator_valid),
KUNIT_CASE(test_self_id_sequence_enumerator_invalid),
KUNIT_CASE(test_self_id_sequence_get_port_status),
{}
};
static struct kunit_suite self_id_sequence_helper_test_suite = {
.name = "self-id-sequence-helper",
.test_cases = self_id_sequence_helper_test_cases,
};
kunit_test_suite(self_id_sequence_helper_test_suite);
MODULE_DESCRIPTION("Unit test suite for helpers of self ID sequence");
MODULE_LICENSE("GPL");
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-or-later
// Copyright (c) 2024 Takashi Sakamoto
#undef TRACE_SYSTEM
#define TRACE_SYSTEM firewire_ohci
#if !defined(_FIREWIRE_OHCI_TRACE_EVENT_H) || defined(TRACE_HEADER_MULTI_READ)
#define _FIREWIRE_OHCI_TRACE_EVENT_H
#include <linux/tracepoint.h>
// Some macros and helper functions are defined in 'drivers/firewire/ohci.c'.
TRACE_EVENT(irqs,
TP_PROTO(unsigned int card_index, u32 events),
TP_ARGS(card_index, events),
TP_STRUCT__entry(
__field(u8, card_index)
__field(u32, events)
),
TP_fast_assign(
__entry->card_index = card_index;
__entry->events = events;
),
TP_printk(
"card_index=%u events=%s",
__entry->card_index,
__print_flags(__entry->events, "|",
{ OHCI1394_selfIDComplete, "selfIDComplete" },
{ OHCI1394_RQPkt, "RQPkt" },
{ OHCI1394_RSPkt, "RSPkt" },
{ OHCI1394_reqTxComplete, "reqTxComplete" },
{ OHCI1394_respTxComplete, "respTxComplete" },
{ OHCI1394_isochRx, "isochRx" },
{ OHCI1394_isochTx, "isochTx" },
{ OHCI1394_postedWriteErr, "postedWriteErr" },
{ OHCI1394_cycleTooLong, "cycleTooLong" },
{ OHCI1394_cycle64Seconds, "cycle64Seconds" },
{ OHCI1394_cycleInconsistent, "cycleInconsistent" },
{ OHCI1394_regAccessFail, "regAccessFail" },
{ OHCI1394_unrecoverableError, "unrecoverableError" },
{ OHCI1394_busReset, "busReset" }
)
)
);
#define QUADLET_SIZE 4
#define SELF_ID_COUNT_IS_ERROR(reg) \
(!!(((reg) & OHCI1394_SelfIDCount_selfIDError_MASK) >> OHCI1394_SelfIDCount_selfIDError_SHIFT))
#define SELF_ID_COUNT_GET_GENERATION(reg) \
(((reg) & OHCI1394_SelfIDCount_selfIDGeneration_MASK) >> OHCI1394_SelfIDCount_selfIDGeneration_SHIFT)
#define SELF_ID_RECEIVE_Q0_GET_GENERATION(quadlet) \
(((quadlet) & OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_MASK) >> OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_SHIFT)
#define SELF_ID_RECEIVE_Q0_GET_TIMESTAMP(quadlet) \
(((quadlet) & OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_MASK) >> OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_SHIFT)
TRACE_EVENT(self_id_complete,
TP_PROTO(unsigned int card_index, u32 reg, const __le32 *self_id_receive, bool has_be_header_quirk),
TP_ARGS(card_index, reg, self_id_receive, has_be_header_quirk),
TP_STRUCT__entry(
__field(u8, card_index)
__field(u32, reg)
__dynamic_array(u32, self_id_receive, ohci1394_self_id_count_get_size(reg))
),
TP_fast_assign(
__entry->card_index = card_index;
__entry->reg = reg;
{
u32 *ptr = __get_dynamic_array(self_id_receive);
int i;
for (i = 0; i < __get_dynamic_array_len(self_id_receive) / QUADLET_SIZE; ++i)
ptr[i] = cond_le32_to_cpu(self_id_receive[i], has_be_header_quirk);
}
),
TP_printk(
"card_index=%u is_error=%s generation_at_bus_reset=%u generation_at_completion=%u timestamp=0x%04x packet_data=%s",
__entry->card_index,
SELF_ID_COUNT_IS_ERROR(__entry->reg) ? "true" : "false",
SELF_ID_COUNT_GET_GENERATION(__entry->reg),
SELF_ID_RECEIVE_Q0_GET_GENERATION(((const u32 *)__get_dynamic_array(self_id_receive))[0]),
SELF_ID_RECEIVE_Q0_GET_TIMESTAMP(((const u32 *)__get_dynamic_array(self_id_receive))[0]),
__print_array(((const u32 *)__get_dynamic_array(self_id_receive)) + 1,
(__get_dynamic_array_len(self_id_receive) / QUADLET_SIZE) - 1, QUADLET_SIZE)
)
);
#undef SELF_ID_COUNT_IS_ERROR
#undef SELF_ID_COUNT_GET_GENERATION
#undef SELF_ID_RECEIVE_Q0_GET_GENERATION
#undef SELF_ID_RECEIVE_Q0_GET_TIMESTAMP
#undef QUADLET_SIZE
#endif // _FIREWIRE_OHCI_TRACE_EVENT_H
#include <trace/define_trace.h>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment