Commit 8326f5e1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'firewire-updates-6.11' of...

Merge tag 'firewire-updates-6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394

Pull firewire updates from Takashi Sakamoto:
 "There are many lines of changes for FireWire subsystem, but there is
  practically no functional change.

  Most of the changes are for code refactoring, some KUnit tests to
  added helper functions, and new tracepoints events for both the core
  functions and 1394 OHCI driver.

  The tracepoints events now cover the verbose logging enabled by debug
  parameter of firewire-ohci kernel module. The parameter would be
  removed in any future timing, thus it is now deprecated"

* tag 'firewire-updates-6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394: (32 commits)
  firewire: core: move copy_port_status() helper function to TP_fast_assign() block
  Revert "firewire: ohci: use common macro to interpret be32 data in le32 buffer"
  firewire: ohci: add tracepoints event for data of Self-ID DMA
  firewire: ohci: use inline functions to operate data of self-ID DMA
  firewire: ohci: add static inline functions to deserialize for Self-ID DMA operation
  firewire: ohci: use static function to handle endian issue on PowerPC platform
  firewire: ohci: use common macro to interpret be32 data in le32 buffer
  firewire: core: Fix spelling mistakes in tracepoint messages
  firewire: ohci: add tracepoints event for hardIRQ event
  firewire: ohci: add support for Linux kernel tracepoints
  firewire: core: add tracepoints events for completions of packets in isochronous context
  firewire: core: add tracepoints events for queueing packets of isochronous context
  firewire: core: add tracepoints events for flushing completions of isochronous context
  firewire: core: add tracepoints events for flushing of isochronous context
  firewire: core: add tracepoints events for starting/stopping of isochronous context
  firewire: core: add tracepoints events for setting channels of multichannel context
  firewire: core: add tracepoints events for allocation/deallocation of isochronous context
  firewire: core: undefine macros after use in tracepoints events
  firewire: core: record card index in tracepoints event for self ID sequence
  firewire: core: use inline helper functions to serialize phy config packet
  ...
parents 13a78715 06dcc4c9
......@@ -4,3 +4,5 @@ CONFIG_FIREWIRE=y
CONFIG_FIREWIRE_KUNIT_UAPI_TEST=y
CONFIG_FIREWIRE_KUNIT_DEVICE_ATTRIBUTE_TEST=y
CONFIG_FIREWIRE_KUNIT_PACKET_SERDES_TEST=y
CONFIG_FIREWIRE_KUNIT_SELF_ID_SEQUENCE_HELPER_TEST=y
CONFIG_FIREWIRE_KUNIT_OHCI_SERDES_TEST=y
......@@ -66,6 +66,21 @@ config FIREWIRE_KUNIT_PACKET_SERDES_TEST
For more information on KUnit and unit tests in general, refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
config FIREWIRE_KUNIT_SELF_ID_SEQUENCE_HELPER_TEST
tristate "KUnit tests for helpers of self ID sequence" if !KUNIT_ALL_TESTS
depends on FIREWIRE && KUNIT
default KUNIT_ALL_TESTS
help
This builds the KUnit tests for helpers of self ID sequence.
KUnit tests run during boot and output the results to the debug
log in TAP format (https://testanything.org/). Only useful for
kernel devs running KUnit test harness and are not for inclusion
into a production build.
For more information on KUnit and unit tests in general, refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"
depends on PCI && FIREWIRE && MMU
......@@ -77,6 +92,22 @@ config FIREWIRE_OHCI
To compile this driver as a module, say M here: The module will be
called firewire-ohci.
config FIREWIRE_KUNIT_OHCI_SERDES_TEST
tristate "KUnit tests for serialization/deserialization of data in buffers/registers" if !KUNIT_ALL_TESTS
depends on FIREWIRE && KUNIT
default KUNIT_ALL_TESTS
help
This builds the KUnit tests to check serialization and deserialization
of data in buffers and registers defined in 1394 OHCI specification.
KUnit tests run during boot and output the results to the debug
log in TAP format (https://testanything.org/). Only useful for
kernel devs running KUnit test harness and are not for inclusion
into a production build.
For more information on KUnit and unit tests in general, refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
config FIREWIRE_SBP2
tristate "Storage devices (SBP-2 protocol)"
depends on FIREWIRE && SCSI
......
......@@ -18,3 +18,5 @@ obj-$(CONFIG_PROVIDE_OHCI1394_DMA_INIT) += init_ohci1394_dma.o
obj-$(CONFIG_FIREWIRE_KUNIT_UAPI_TEST) += uapi-test.o
obj-$(CONFIG_FIREWIRE_KUNIT_PACKET_SERDES_TEST) += packet-serdes-test.o
obj-$(CONFIG_FIREWIRE_KUNIT_SELF_ID_SEQUENCE_HELPER_TEST) += self-id-sequence-helper-test.o
obj-$(CONFIG_FIREWIRE_KUNIT_OHCI_SERDES_TEST) += ohci-serdes-test.o
......@@ -22,6 +22,8 @@
#include "core.h"
#include <trace/events/firewire.h>
/*
* Isochronous DMA context management
*/
......@@ -148,12 +150,20 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
ctx->callback.sc = callback;
ctx->callback_data = callback_data;
trace_isoc_outbound_allocate(ctx, channel, speed);
trace_isoc_inbound_single_allocate(ctx, channel, header_size);
trace_isoc_inbound_multiple_allocate(ctx);
return ctx;
}
EXPORT_SYMBOL(fw_iso_context_create);
void fw_iso_context_destroy(struct fw_iso_context *ctx)
{
trace_isoc_outbound_destroy(ctx);
trace_isoc_inbound_single_destroy(ctx);
trace_isoc_inbound_multiple_destroy(ctx);
ctx->card->driver->free_iso_context(ctx);
}
EXPORT_SYMBOL(fw_iso_context_destroy);
......@@ -161,12 +171,18 @@ EXPORT_SYMBOL(fw_iso_context_destroy);
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags)
{
trace_isoc_outbound_start(ctx, cycle);
trace_isoc_inbound_single_start(ctx, cycle, sync, tags);
trace_isoc_inbound_multiple_start(ctx, cycle, sync, tags);
return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
}
EXPORT_SYMBOL(fw_iso_context_start);
int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels)
{
trace_isoc_inbound_multiple_channels(ctx, *channels);
return ctx->card->driver->set_iso_channels(ctx, channels);
}
......@@ -175,24 +191,40 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
trace_isoc_outbound_queue(ctx, payload, packet);
trace_isoc_inbound_single_queue(ctx, payload, packet);
trace_isoc_inbound_multiple_queue(ctx, payload, packet);
return ctx->card->driver->queue_iso(ctx, packet, buffer, payload);
}
EXPORT_SYMBOL(fw_iso_context_queue);
void fw_iso_context_queue_flush(struct fw_iso_context *ctx)
{
trace_isoc_outbound_flush(ctx);
trace_isoc_inbound_single_flush(ctx);
trace_isoc_inbound_multiple_flush(ctx);
ctx->card->driver->flush_queue_iso(ctx);
}
EXPORT_SYMBOL(fw_iso_context_queue_flush);
int fw_iso_context_flush_completions(struct fw_iso_context *ctx)
{
trace_isoc_outbound_flush_completions(ctx);
trace_isoc_inbound_single_flush_completions(ctx);
trace_isoc_inbound_multiple_flush_completions(ctx);
return ctx->card->driver->flush_iso_completions(ctx);
}
EXPORT_SYMBOL(fw_iso_context_flush_completions);
int fw_iso_context_stop(struct fw_iso_context *ctx)
{
trace_isoc_outbound_stop(ctx);
trace_isoc_inbound_single_stop(ctx);
trace_isoc_inbound_multiple_stop(ctx);
return ctx->card->driver->stop_iso(ctx);
}
EXPORT_SYMBOL(fw_iso_context_stop);
......
......@@ -20,84 +20,9 @@
#include <asm/byteorder.h>
#include "core.h"
#include "phy-packet-definitions.h"
#include <trace/events/firewire.h>
#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
#define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
#define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
#define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
#define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
#define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
#define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
#define SELFID_PORT_CHILD 0x3
#define SELFID_PORT_PARENT 0x2
#define SELFID_PORT_NCONN 0x1
#define SELFID_PORT_NONE 0x0
static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
{
u32 q;
int port_type, shift, seq;
*total_port_count = 0;
*child_port_count = 0;
shift = 6;
q = *sid;
seq = 0;
while (1) {
port_type = (q >> shift) & 0x03;
switch (port_type) {
case SELFID_PORT_CHILD:
(*child_port_count)++;
fallthrough;
case SELFID_PORT_PARENT:
case SELFID_PORT_NCONN:
(*total_port_count)++;
fallthrough;
case SELFID_PORT_NONE:
break;
}
shift -= 2;
if (shift == 0) {
if (!SELF_ID_MORE_PACKETS(q))
return sid + 1;
shift = 16;
sid++;
q = *sid;
/*
* Check that the extra packets actually are
* extended self ID packets and that the
* sequence numbers in the extended self ID
* packets increase as expected.
*/
if (!SELF_ID_EXTENDED(q) ||
seq != SELF_ID_EXT_SEQUENCE(q))
return NULL;
seq++;
}
}
}
static int get_port_type(u32 *sid, int port_index)
{
int index, shift;
index = (port_index + 5) / 8;
shift = 16 - ((port_index + 5) & 7) * 2;
return (sid[index] >> shift) & 0x03;
}
static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
{
struct fw_node *node;
......@@ -107,10 +32,11 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
return NULL;
node->color = color;
node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
node->link_on = SELF_ID_LINK_ON(sid);
node->phy_speed = SELF_ID_PHY_SPEED(sid);
node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
node->node_id = LOCAL_BUS | phy_packet_self_id_get_phy_id(sid);
node->link_on = phy_packet_self_id_zero_get_link_active(sid);
// NOTE: Only two bits, thus only for SCODE_100, SCODE_200, SCODE_400, and SCODE_BETA.
node->phy_speed = phy_packet_self_id_zero_get_scode(sid);
node->initiated_reset = phy_packet_self_id_zero_get_initiated_reset(sid);
node->port_count = port_count;
refcount_set(&node->ref_count, 1);
......@@ -169,13 +95,16 @@ static inline struct fw_node *fw_node(struct list_head *l)
* internally consistent. On success this function returns the
* fw_node corresponding to the local card otherwise NULL.
*/
static struct fw_node *build_tree(struct fw_card *card,
u32 *sid, int self_id_count)
static struct fw_node *build_tree(struct fw_card *card, const u32 *sid, int self_id_count,
unsigned int generation)
{
struct self_id_sequence_enumerator enumerator = {
.cursor = sid,
.quadlet_count = self_id_count,
};
struct fw_node *node, *child, *local_node, *irm_node;
struct list_head stack, *h;
u32 *next_sid, *end, q;
int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
struct list_head stack;
int phy_id, stack_depth;
int gap_count;
bool beta_repeaters_present;
......@@ -183,24 +112,56 @@ static struct fw_node *build_tree(struct fw_card *card,
node = NULL;
INIT_LIST_HEAD(&stack);
stack_depth = 0;
end = sid + self_id_count;
phy_id = 0;
irm_node = NULL;
gap_count = SELF_ID_GAP_COUNT(*sid);
gap_count = phy_packet_self_id_zero_get_gap_count(*sid);
beta_repeaters_present = false;
while (sid < end) {
next_sid = count_ports(sid, &port_count, &child_port_count);
while (enumerator.quadlet_count > 0) {
unsigned int child_port_count = 0;
unsigned int total_port_count = 0;
unsigned int parent_count = 0;
unsigned int quadlet_count;
const u32 *self_id_sequence;
unsigned int port_capacity;
enum phy_packet_self_id_port_status port_status;
unsigned int port_index;
struct list_head *h;
int i;
if (next_sid == NULL) {
fw_err(card, "inconsistent extended self IDs\n");
self_id_sequence = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
if (IS_ERR(self_id_sequence)) {
if (PTR_ERR(self_id_sequence) != -ENODATA) {
fw_err(card, "inconsistent extended self IDs: %ld\n",
PTR_ERR(self_id_sequence));
return NULL;
}
break;
}
q = *sid;
if (phy_id != SELF_ID_PHY_ID(q)) {
port_capacity = self_id_sequence_get_port_capacity(quadlet_count);
trace_self_id_sequence(card->index, self_id_sequence, quadlet_count, generation);
for (port_index = 0; port_index < port_capacity; ++port_index) {
port_status = self_id_sequence_get_port_status(self_id_sequence, quadlet_count,
port_index);
switch (port_status) {
case PHY_PACKET_SELF_ID_PORT_STATUS_CHILD:
++child_port_count;
fallthrough;
case PHY_PACKET_SELF_ID_PORT_STATUS_PARENT:
case PHY_PACKET_SELF_ID_PORT_STATUS_NCONN:
++total_port_count;
fallthrough;
case PHY_PACKET_SELF_ID_PORT_STATUS_NONE:
default:
break;
}
}
if (phy_id != phy_packet_self_id_get_phy_id(self_id_sequence[0])) {
fw_err(card, "PHY ID mismatch in self ID: %d != %d\n",
phy_id, SELF_ID_PHY_ID(q));
phy_id, phy_packet_self_id_get_phy_id(self_id_sequence[0]));
return NULL;
}
......@@ -221,7 +182,7 @@ static struct fw_node *build_tree(struct fw_card *card,
*/
child = fw_node(h);
node = fw_node_create(q, port_count, card->color);
node = fw_node_create(self_id_sequence[0], total_port_count, card->color);
if (node == NULL) {
fw_err(card, "out of memory while building topology\n");
return NULL;
......@@ -230,48 +191,40 @@ static struct fw_node *build_tree(struct fw_card *card,
if (phy_id == (card->node_id & 0x3f))
local_node = node;
if (SELF_ID_CONTENDER(q))
if (phy_packet_self_id_zero_get_contender(self_id_sequence[0]))
irm_node = node;
parent_count = 0;
for (i = 0; i < port_count; i++) {
switch (get_port_type(sid, i)) {
case SELFID_PORT_PARENT:
/*
* Who's your daddy? We dont know the
* parent node at this time, so we
* temporarily abuse node->color for
* remembering the entry in the
* node->ports array where the parent
* node should be. Later, when we
* handle the parent node, we fix up
* the reference.
*/
parent_count++;
for (port_index = 0; port_index < total_port_count; ++port_index) {
port_status = self_id_sequence_get_port_status(self_id_sequence, quadlet_count,
port_index);
switch (port_status) {
case PHY_PACKET_SELF_ID_PORT_STATUS_PARENT:
// Who's your daddy? We dont know the parent node at this time, so
// we temporarily abuse node->color for remembering the entry in
// the node->ports array where the parent node should be. Later,
// when we handle the parent node, we fix up the reference.
++parent_count;
node->color = i;
break;
case SELFID_PORT_CHILD:
node->ports[i] = child;
/*
* Fix up parent reference for this
* child node.
*/
case PHY_PACKET_SELF_ID_PORT_STATUS_CHILD:
node->ports[port_index] = child;
// Fix up parent reference for this child node.
child->ports[child->color] = node;
child->color = card->color;
child = fw_node(child->link.next);
break;
case PHY_PACKET_SELF_ID_PORT_STATUS_NCONN:
case PHY_PACKET_SELF_ID_PORT_STATUS_NONE:
default:
break;
}
}
/*
* Check that the node reports exactly one parent
* port, except for the root, which of course should
* have no parents.
*/
if ((next_sid == end && parent_count != 0) ||
(next_sid < end && parent_count != 1)) {
// Check that the node reports exactly one parent port, except for the root, which
// of course should have no parents.
if ((enumerator.quadlet_count == 0 && parent_count != 0) ||
(enumerator.quadlet_count > 0 && parent_count != 1)) {
fw_err(card, "parent port inconsistency for node %d: "
"parent_count=%d\n", phy_id, parent_count);
return NULL;
......@@ -282,20 +235,16 @@ static struct fw_node *build_tree(struct fw_card *card,
list_add_tail(&node->link, &stack);
stack_depth += 1 - child_port_count;
if (node->phy_speed == SCODE_BETA &&
parent_count + child_port_count > 1)
if (node->phy_speed == SCODE_BETA && parent_count + child_port_count > 1)
beta_repeaters_present = true;
/*
* If PHYs report different gap counts, set an invalid count
* which will force a gap count reconfiguration and a reset.
*/
if (SELF_ID_GAP_COUNT(q) != gap_count)
// If PHYs report different gap counts, set an invalid count which will force a gap
// count reconfiguration and a reset.
if (phy_packet_self_id_zero_get_gap_count(self_id_sequence[0]) != gap_count)
gap_count = 0;
update_hop_count(node);
sid = next_sid;
phy_id++;
}
......@@ -536,7 +485,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
card->bm_abdicate = bm_abdicate;
fw_schedule_bm_work(card, 0);
local_node = build_tree(card, self_ids, self_id_count);
local_node = build_tree(card, self_ids, self_id_count, generation);
update_topology_map(card, self_ids, self_id_count);
......
// SPDX-License-Identifier: GPL-2.0-or-later
// Copyright (c) 2024 Takashi Sakamoto
#include <linux/types.h>
#include <linux/err.h>
#include "packet-header-definitions.h"
#include "phy-packet-definitions.h"
#define CREATE_TRACE_POINTS
#include <trace/events/firewire.h>
#ifdef TRACEPOINTS_ENABLED
EXPORT_TRACEPOINT_SYMBOL_GPL(isoc_inbound_single_completions);
EXPORT_TRACEPOINT_SYMBOL_GPL(isoc_inbound_multiple_completions);
EXPORT_TRACEPOINT_SYMBOL_GPL(isoc_outbound_completions);
#endif
......@@ -29,20 +29,13 @@
#include <asm/byteorder.h>
#include "core.h"
#include <trace/events/firewire.h>
#include "packet-header-definitions.h"
#include "phy-packet-definitions.h"
#include <trace/events/firewire.h>
#define HEADER_DESTINATION_IS_BROADCAST(header) \
((async_header_get_destination(header) & 0x3f) == 0x3f)
#define PHY_PACKET_CONFIG 0x0
#define PHY_PACKET_LINK_ON 0x1
#define PHY_PACKET_SELF_ID 0x2
#define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22))
#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
#define PHY_IDENTIFIER(id) ((id) << 30)
/* returns 0 if the split timeout handler is already running */
static int try_cancel_split_timeout(struct fw_transaction *t)
{
......@@ -481,10 +474,14 @@ void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count)
{
long timeout = DIV_ROUND_UP(HZ, 10);
u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG);
u32 data = 0;
phy_packet_set_packet_identifier(&data, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG);
if (node_id != FW_PHY_CONFIG_NO_NODE_ID)
data |= PHY_CONFIG_ROOT_ID(node_id);
if (node_id != FW_PHY_CONFIG_NO_NODE_ID) {
phy_packet_phy_config_set_root_id(&data, node_id);
phy_packet_phy_config_set_force_root_node(&data, true);
}
if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) {
gap_count = card->driver->read_phy_reg(card, 1);
......@@ -495,7 +492,8 @@ void fw_send_phy_config(struct fw_card *card,
if (gap_count == 63)
return;
}
data |= PHY_CONFIG_GAP_COUNT(gap_count);
phy_packet_phy_config_set_gap_count(&data, gap_count);
phy_packet_phy_config_set_gap_count_optimization(&data, true);
mutex_lock(&phy_config_mutex);
......
// SPDX-License-Identifier: GPL-2.0-or-later
//
// ohci-serdes-test.c - An application of Kunit to check serialization/deserialization of data in
// buffers and registers defined in 1394 OHCI specification.
//
// Copyright (c) 2024 Takashi Sakamoto
#include <kunit/test.h>
#include "ohci.h"
static void test_self_id_count_register_deserialization(struct kunit *test)
{
const u32 expected = 0x803d0594;
bool is_error = ohci1394_self_id_count_is_error(expected);
u8 generation = ohci1394_self_id_count_get_generation(expected);
u32 size = ohci1394_self_id_count_get_size(expected);
KUNIT_EXPECT_TRUE(test, is_error);
KUNIT_EXPECT_EQ(test, 0x3d, generation);
KUNIT_EXPECT_EQ(test, 0x165, size);
}
static void test_self_id_receive_buffer_deserialization(struct kunit *test)
{
const u32 buffer[] = {
0x0006f38b,
0x807fcc56,
0x7f8033a9,
0x8145cc5e,
0x7eba33a1,
};
u8 generation = ohci1394_self_id_receive_q0_get_generation(buffer[0]);
u16 timestamp = ohci1394_self_id_receive_q0_get_timestamp(buffer[0]);
KUNIT_EXPECT_EQ(test, 0x6, generation);
KUNIT_EXPECT_EQ(test, 0xf38b, timestamp);
}
static struct kunit_case ohci_serdes_test_cases[] = {
KUNIT_CASE(test_self_id_count_register_deserialization),
KUNIT_CASE(test_self_id_receive_buffer_deserialization),
{}
};
static struct kunit_suite ohci_serdes_test_suite = {
.name = "firewire-ohci-serdes",
.test_cases = ohci_serdes_test_cases,
};
kunit_test_suite(ohci_serdes_test_suite);
MODULE_DESCRIPTION("FireWire buffers and registers serialization/deserialization unit test suite");
MODULE_LICENSE("GPL");
......@@ -41,6 +41,14 @@
#include "core.h"
#include "ohci.h"
#include "packet-header-definitions.h"
#include "phy-packet-definitions.h"
#include <trace/events/firewire.h>
static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk);
#define CREATE_TRACE_POINTS
#include <trace/events/firewire_ohci.h>
#define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
#define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
......@@ -437,23 +445,25 @@ static void log_irqs(struct fw_ohci *ohci, u32 evt)
? " ?" : "");
}
static const char *speed[] = {
static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
{
static const char *const speed[] = {
[0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
};
static const char *power[] = {
};
static const char *const power[] = {
[0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
[4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
};
static const char port[] = { '.', '-', 'p', 'c', };
static char _p(u32 *s, int shift)
{
return port[*s >> shift & 3];
}
static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
{
u32 *s;
};
static const char port[] = {
[PHY_PACKET_SELF_ID_PORT_STATUS_NONE] = '.',
[PHY_PACKET_SELF_ID_PORT_STATUS_NCONN] = '-',
[PHY_PACKET_SELF_ID_PORT_STATUS_PARENT] = 'p',
[PHY_PACKET_SELF_ID_PORT_STATUS_CHILD] = 'c',
};
struct self_id_sequence_enumerator enumerator = {
.cursor = ohci->self_id_buffer,
.quadlet_count = self_id_count,
};
if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
return;
......@@ -461,20 +471,46 @@ static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
self_id_count, generation, ohci->node_id);
for (s = ohci->self_id_buffer; self_id_count--; ++s)
if ((*s & 1 << 23) == 0)
while (enumerator.quadlet_count > 0) {
unsigned int quadlet_count;
unsigned int port_index;
const u32 *s;
int i;
s = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
if (IS_ERR(s))
break;
ohci_notice(ohci,
"selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
*s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
*s,
phy_packet_self_id_get_phy_id(*s),
port[self_id_sequence_get_port_status(s, quadlet_count, 0)],
port[self_id_sequence_get_port_status(s, quadlet_count, 1)],
port[self_id_sequence_get_port_status(s, quadlet_count, 2)],
speed[*s >> 14 & 3], *s >> 16 & 63,
power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
*s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
else
port_index = 3;
for (i = 1; i < quadlet_count; ++i) {
ohci_notice(ohci,
"selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
*s, *s >> 24 & 63,
_p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
_p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
s[i],
phy_packet_self_id_get_phy_id(s[i]),
port[self_id_sequence_get_port_status(s, quadlet_count, port_index)],
port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 1)],
port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 2)],
port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 3)],
port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 4)],
port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 5)],
port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 6)],
port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 7)]
);
port_index += 8;
}
}
}
static const char *evts[] = {
......@@ -841,10 +877,25 @@ static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
}
#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
#define cond_le32_to_cpu(v) \
(ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk)
{
return has_be_header_quirk ? (__force __u32)value : le32_to_cpu(value);
}
static bool has_be_header_quirk(const struct fw_ohci *ohci)
{
return !!(ohci->quirks & QUIRK_BE_HEADERS);
}
#else
#define cond_le32_to_cpu(v) le32_to_cpu(v)
static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk __maybe_unused)
{
return le32_to_cpu(value);
}
static bool has_be_header_quirk(const struct fw_ohci *ohci)
{
return false;
}
#endif
static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
......@@ -854,9 +905,9 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
u32 status, length, tcode;
int evt;
p.header[0] = cond_le32_to_cpu(buffer[0]);
p.header[1] = cond_le32_to_cpu(buffer[1]);
p.header[2] = cond_le32_to_cpu(buffer[2]);
p.header[0] = cond_le32_to_cpu(buffer[0], has_be_header_quirk(ohci));
p.header[1] = cond_le32_to_cpu(buffer[1], has_be_header_quirk(ohci));
p.header[2] = cond_le32_to_cpu(buffer[2], has_be_header_quirk(ohci));
tcode = async_header_get_tcode(p.header);
switch (tcode) {
......@@ -868,7 +919,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
break;
case TCODE_READ_BLOCK_REQUEST :
p.header[3] = cond_le32_to_cpu(buffer[3]);
p.header[3] = cond_le32_to_cpu(buffer[3], has_be_header_quirk(ohci));
p.header_length = 16;
p.payload_length = 0;
break;
......@@ -877,7 +928,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
case TCODE_READ_BLOCK_RESPONSE:
case TCODE_LOCK_REQUEST:
case TCODE_LOCK_RESPONSE:
p.header[3] = cond_le32_to_cpu(buffer[3]);
p.header[3] = cond_le32_to_cpu(buffer[3], has_be_header_quirk(ohci));
p.header_length = 16;
p.payload_length = async_header_get_data_length(p.header);
if (p.payload_length > MAX_ASYNC_PAYLOAD) {
......@@ -902,7 +953,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
/* FIXME: What to do about evt_* errors? */
length = (p.header_length + p.payload_length + 3) / 4;
status = cond_le32_to_cpu(buffer[length]);
status = cond_le32_to_cpu(buffer[length], has_be_header_quirk(ohci));
evt = (status >> 16) & 0x1f;
p.ack = evt - 16;
......@@ -1817,7 +1868,8 @@ static u32 update_bus_time(struct fw_ohci *ohci)
return ohci->bus_time | cycle_time_seconds;
}
static int get_status_for_port(struct fw_ohci *ohci, int port_index)
static int get_status_for_port(struct fw_ohci *ohci, int port_index,
enum phy_packet_self_id_port_status *status)
{
int reg;
......@@ -1831,33 +1883,44 @@ static int get_status_for_port(struct fw_ohci *ohci, int port_index)
switch (reg & 0x0f) {
case 0x06:
return 2; /* is child node (connected to parent node) */
// is child node (connected to parent node)
*status = PHY_PACKET_SELF_ID_PORT_STATUS_PARENT;
break;
case 0x0e:
return 3; /* is parent node (connected to child node) */
// is parent node (connected to child node)
*status = PHY_PACKET_SELF_ID_PORT_STATUS_CHILD;
break;
default:
// not connected
*status = PHY_PACKET_SELF_ID_PORT_STATUS_NCONN;
break;
}
return 1; /* not connected */
return 0;
}
static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
int self_id_count)
{
unsigned int left_phy_id = phy_packet_self_id_get_phy_id(self_id);
int i;
u32 entry;
for (i = 0; i < self_id_count; i++) {
entry = ohci->self_id_buffer[i];
if ((self_id & 0xff000000) == (entry & 0xff000000))
u32 entry = ohci->self_id_buffer[i];
unsigned int right_phy_id = phy_packet_self_id_get_phy_id(entry);
if (left_phy_id == right_phy_id)
return -1;
if ((self_id & 0xff000000) < (entry & 0xff000000))
if (left_phy_id < right_phy_id)
return i;
}
return i;
}
static int initiated_reset(struct fw_ohci *ohci)
static bool initiated_reset(struct fw_ohci *ohci)
{
int reg;
int ret = 0;
int ret = false;
mutex_lock(&ohci->phy_reg_mutex);
reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */
......@@ -1870,7 +1933,7 @@ static int initiated_reset(struct fw_ohci *ohci)
if (reg >= 0) {
if ((reg & 0x08) == 0x08) {
/* bit 3 indicates "initiated reset" */
ret = 0x2;
ret = true;
}
}
}
......@@ -1886,9 +1949,14 @@ static int initiated_reset(struct fw_ohci *ohci)
*/
static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
{
int reg, i, pos, status;
/* link active 1, speed 3, bridge 0, contender 1, more packets 0 */
u32 self_id = 0x8040c800;
int reg, i, pos;
u32 self_id = 0;
// link active 1, speed 3, bridge 0, contender 1, more packets 0.
phy_packet_set_packet_identifier(&self_id, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID);
phy_packet_self_id_zero_set_link_active(&self_id, true);
phy_packet_self_id_zero_set_scode(&self_id, SCODE_800);
phy_packet_self_id_zero_set_contender(&self_id, true);
reg = reg_read(ohci, OHCI1394_NodeID);
if (!(reg & OHCI1394_NodeID_idValid)) {
......@@ -1896,26 +1964,30 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
"node ID not valid, new bus reset in progress\n");
return -EBUSY;
}
self_id |= ((reg & 0x3f) << 24); /* phy ID */
phy_packet_self_id_set_phy_id(&self_id, reg & 0x3f);
reg = ohci_read_phy_reg(&ohci->card, 4);
if (reg < 0)
return reg;
self_id |= ((reg & 0x07) << 8); /* power class */
phy_packet_self_id_zero_set_power_class(&self_id, reg & 0x07);
reg = ohci_read_phy_reg(&ohci->card, 1);
if (reg < 0)
return reg;
self_id |= ((reg & 0x3f) << 16); /* gap count */
phy_packet_self_id_zero_set_gap_count(&self_id, reg & 0x3f);
for (i = 0; i < 3; i++) {
status = get_status_for_port(ohci, i);
if (status < 0)
return status;
self_id |= ((status & 0x3) << (6 - (i * 2)));
enum phy_packet_self_id_port_status status;
int err;
err = get_status_for_port(ohci, i, &status);
if (err < 0)
return err;
self_id_sequence_set_port_status(&self_id, 1, i, status);
}
self_id |= initiated_reset(ohci);
phy_packet_self_id_zero_set_initiated_reset(&self_id, initiated_reset(ohci));
pos = get_self_id_pos(ohci, self_id, self_id_count);
if (pos >= 0) {
......@@ -1933,7 +2005,7 @@ static void bus_reset_work(struct work_struct *work)
struct fw_ohci *ohci =
container_of(work, struct fw_ohci, bus_reset_work);
int self_id_count, generation, new_generation, i, j;
u32 reg;
u32 reg, quadlet;
void *free_rom = NULL;
dma_addr_t free_rom_bus = 0;
bool is_new_root;
......@@ -1958,7 +2030,7 @@ static void bus_reset_work(struct work_struct *work)
ohci->is_root = is_new_root;
reg = reg_read(ohci, OHCI1394_SelfIDCount);
if (reg & OHCI1394_SelfIDCount_selfIDError) {
if (ohci1394_self_id_count_is_error(reg)) {
ohci_notice(ohci, "self ID receive error\n");
return;
}
......@@ -1968,19 +2040,20 @@ static void bus_reset_work(struct work_struct *work)
* the inverted quadlets and a header quadlet, we shift one
* bit extra to get the actual number of self IDs.
*/
self_id_count = (reg >> 3) & 0xff;
self_id_count = ohci1394_self_id_count_get_size(reg) >> 1;
if (self_id_count > 252) {
ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
return;
}
generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff;
quadlet = cond_le32_to_cpu(ohci->self_id[0], has_be_header_quirk(ohci));
generation = ohci1394_self_id_receive_q0_get_generation(quadlet);
rmb();
for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
u32 id = cond_le32_to_cpu(ohci->self_id[i]);
u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]);
u32 id = cond_le32_to_cpu(ohci->self_id[i], has_be_header_quirk(ohci));
u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1], has_be_header_quirk(ohci));
if (id != ~id2) {
/*
......@@ -2032,7 +2105,8 @@ static void bus_reset_work(struct work_struct *work)
* of self IDs.
*/
new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
reg = reg_read(ohci, OHCI1394_SelfIDCount);
new_generation = ohci1394_self_id_count_get_generation(reg);
if (new_generation != generation) {
ohci_notice(ohci, "new bus reset, discarding self ids\n");
return;
......@@ -2130,13 +2204,21 @@ static irqreturn_t irq_handler(int irq, void *data)
*/
reg_write(ohci, OHCI1394_IntEventClear,
event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
trace_irqs(ohci->card.index, event);
log_irqs(ohci, event);
// The flag is masked again at bus_reset_work() scheduled by selfID event.
if (event & OHCI1394_busReset)
reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
if (event & OHCI1394_selfIDComplete)
if (event & OHCI1394_selfIDComplete) {
if (trace_self_id_complete_enabled()) {
u32 reg = reg_read(ohci, OHCI1394_SelfIDCount);
trace_self_id_complete(ohci->card.index, reg, ohci->self_id,
has_be_header_quirk(ohci));
}
queue_work(selfid_workqueue, &ohci->bus_reset_work);
}
if (event & OHCI1394_RQPkt)
tasklet_schedule(&ohci->ar_request_ctx.tasklet);
......@@ -2781,8 +2863,13 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
}
}
static void flush_iso_completions(struct iso_context *ctx)
static void flush_iso_completions(struct iso_context *ctx, enum fw_iso_context_completions_cause cause)
{
trace_isoc_inbound_single_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header,
ctx->header_length);
trace_isoc_outbound_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header,
ctx->header_length);
ctx->base.callback.sc(&ctx->base, ctx->last_timestamp,
ctx->header_length, ctx->header,
ctx->base.callback_data);
......@@ -2796,7 +2883,7 @@ static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
if (ctx->base.drop_overflow_headers)
return;
flush_iso_completions(ctx);
flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW);
}
ctx_hdr = ctx->header + ctx->header_length;
......@@ -2845,7 +2932,7 @@ static int handle_ir_packet_per_buffer(struct context *context,
copy_iso_headers(ctx, (u32 *) (last + 1));
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
flush_iso_completions(ctx);
flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
return 1;
}
......@@ -2880,6 +2967,9 @@ static int handle_ir_buffer_fill(struct context *context,
completed, DMA_FROM_DEVICE);
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
trace_isoc_inbound_multiple_completions(&ctx->base, completed,
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
ctx->base.callback.mc(&ctx->base,
buffer_dma + completed,
ctx->base.callback_data);
......@@ -2896,6 +2986,9 @@ static void flush_ir_buffer_fill(struct iso_context *ctx)
ctx->mc_buffer_bus & ~PAGE_MASK,
ctx->mc_completed, DMA_FROM_DEVICE);
trace_isoc_inbound_multiple_completions(&ctx->base, ctx->mc_completed,
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH);
ctx->base.callback.mc(&ctx->base,
ctx->mc_buffer_bus + ctx->mc_completed,
ctx->base.callback_data);
......@@ -2960,7 +3053,7 @@ static int handle_it_packet(struct context *context,
if (ctx->header_length + 4 > PAGE_SIZE) {
if (ctx->base.drop_overflow_headers)
return 1;
flush_iso_completions(ctx);
flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW);
}
ctx_hdr = ctx->header + ctx->header_length;
......@@ -2971,7 +3064,7 @@ static int handle_it_packet(struct context *context,
ctx->header_length += 4;
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
flush_iso_completions(ctx);
flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
return 1;
}
......@@ -3536,7 +3629,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
case FW_ISO_CONTEXT_TRANSMIT:
case FW_ISO_CONTEXT_RECEIVE:
if (ctx->header_length != 0)
flush_iso_completions(ctx);
flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH);
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
if (ctx->mc_completed != 0)
......
......@@ -31,7 +31,6 @@
#define OHCI1394_HCControl_softReset 0x00010000
#define OHCI1394_SelfIDBuffer 0x064
#define OHCI1394_SelfIDCount 0x068
#define OHCI1394_SelfIDCount_selfIDError 0x80000000
#define OHCI1394_IRMultiChanMaskHiSet 0x070
#define OHCI1394_IRMultiChanMaskHiClear 0x074
#define OHCI1394_IRMultiChanMaskLoSet 0x078
......@@ -156,4 +155,46 @@
#define OHCI1394_phy_tcode 0xe
// Self-ID DMA.
#define OHCI1394_SelfIDCount_selfIDError_MASK 0x80000000
#define OHCI1394_SelfIDCount_selfIDError_SHIFT 31
#define OHCI1394_SelfIDCount_selfIDGeneration_MASK 0x00ff0000
#define OHCI1394_SelfIDCount_selfIDGeneration_SHIFT 16
#define OHCI1394_SelfIDCount_selfIDSize_MASK 0x000007fc
#define OHCI1394_SelfIDCount_selfIDSize_SHIFT 2
static inline bool ohci1394_self_id_count_is_error(u32 value)
{
return !!((value & OHCI1394_SelfIDCount_selfIDError_MASK) >> OHCI1394_SelfIDCount_selfIDError_SHIFT);
}
static inline u8 ohci1394_self_id_count_get_generation(u32 value)
{
return (value & OHCI1394_SelfIDCount_selfIDGeneration_MASK) >> OHCI1394_SelfIDCount_selfIDGeneration_SHIFT;
}
// In 1394 OHCI specification, the maximum size of self ID stream is 504 quadlets
// (= 63 devices * 4 self ID packets * 2 quadlets). The selfIDSize field accommodates it and its
// additional first quadlet, since the field is 9 bits (0x1ff = 511).
static inline u32 ohci1394_self_id_count_get_size(u32 value)
{
return (value & OHCI1394_SelfIDCount_selfIDSize_MASK) >> OHCI1394_SelfIDCount_selfIDSize_SHIFT;
}
#define OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_MASK 0x00ff0000
#define OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_SHIFT 16
#define OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_MASK 0x0000ffff
#define OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_SHIFT 0
static inline u8 ohci1394_self_id_receive_q0_get_generation(u32 quadlet0)
{
return (quadlet0 & OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_MASK) >> OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_SHIFT;
}
static inline u16 ohci1394_self_id_receive_q0_get_timestamp(u32 quadlet0)
{
return (quadlet0 & OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_MASK) >> OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_SHIFT;
}
#endif /* _FIREWIRE_OHCI_H */
......@@ -7,6 +7,8 @@
#ifndef _FIREWIRE_PACKET_HEADER_DEFINITIONS_H
#define _FIREWIRE_PACKET_HEADER_DEFINITIONS_H
#include <linux/types.h>
#define ASYNC_HEADER_QUADLET_COUNT 4
#define ASYNC_HEADER_Q0_DESTINATION_SHIFT 16
......
......@@ -10,6 +10,7 @@
#include <linux/firewire-constants.h>
#include "packet-header-definitions.h"
#include "phy-packet-definitions.h"
static void serialize_async_header_common(u32 header[ASYNC_HEADER_QUADLET_COUNT],
unsigned int dst_id, unsigned int tlabel,
......@@ -187,6 +188,89 @@ static void deserialize_isoc_header(u32 header, unsigned int *data_length, unsig
*sy = isoc_header_get_sy(header);
}
static void serialize_phy_packet_self_id_zero(u32 *quadlet, unsigned int packet_identifier,
unsigned int phy_id, bool extended,
bool link_is_active, unsigned int gap_count,
unsigned int scode, bool is_contender,
unsigned int power_class, bool is_initiated_reset,
bool has_more_packets)
{
phy_packet_set_packet_identifier(quadlet, packet_identifier);
phy_packet_self_id_set_phy_id(quadlet, phy_id);
phy_packet_self_id_set_extended(quadlet, extended);
phy_packet_self_id_zero_set_link_active(quadlet, link_is_active);
phy_packet_self_id_zero_set_gap_count(quadlet, gap_count);
phy_packet_self_id_zero_set_scode(quadlet, scode);
phy_packet_self_id_zero_set_contender(quadlet, is_contender);
phy_packet_self_id_zero_set_power_class(quadlet, power_class);
phy_packet_self_id_zero_set_initiated_reset(quadlet, is_initiated_reset);
phy_packet_self_id_set_more_packets(quadlet, has_more_packets);
}
static void deserialize_phy_packet_self_id_zero(u32 quadlet, unsigned int *packet_identifier,
unsigned int *phy_id, bool *extended,
bool *link_is_active, unsigned int *gap_count,
unsigned int *scode, bool *is_contender,
unsigned int *power_class,
bool *is_initiated_reset, bool *has_more_packets)
{
*packet_identifier = phy_packet_get_packet_identifier(quadlet);
*phy_id = phy_packet_self_id_get_phy_id(quadlet);
*extended = phy_packet_self_id_get_extended(quadlet);
*link_is_active = phy_packet_self_id_zero_get_link_active(quadlet);
*gap_count = phy_packet_self_id_zero_get_gap_count(quadlet);
*scode = phy_packet_self_id_zero_get_scode(quadlet);
*is_contender = phy_packet_self_id_zero_get_contender(quadlet);
*power_class = phy_packet_self_id_zero_get_power_class(quadlet);
*is_initiated_reset = phy_packet_self_id_zero_get_initiated_reset(quadlet);
*has_more_packets = phy_packet_self_id_get_more_packets(quadlet);
}
static void serialize_phy_packet_self_id_extended(u32 *quadlet, unsigned int packet_identifier,
unsigned int phy_id, bool extended,
unsigned int sequence, bool has_more_packets)
{
phy_packet_set_packet_identifier(quadlet, packet_identifier);
phy_packet_self_id_set_phy_id(quadlet, phy_id);
phy_packet_self_id_set_extended(quadlet, extended);
phy_packet_self_id_extended_set_sequence(quadlet, sequence);
phy_packet_self_id_set_more_packets(quadlet, has_more_packets);
}
static void deserialize_phy_packet_self_id_extended(u32 quadlet, unsigned int *packet_identifier,
unsigned int *phy_id, bool *extended,
unsigned int *sequence, bool *has_more_packets)
{
*packet_identifier = phy_packet_get_packet_identifier(quadlet);
*phy_id = phy_packet_self_id_get_phy_id(quadlet);
*extended = phy_packet_self_id_get_extended(quadlet);
*sequence = phy_packet_self_id_extended_get_sequence(quadlet);
*has_more_packets = phy_packet_self_id_get_more_packets(quadlet);
}
static void serialize_phy_packet_phy_config(u32 *quadlet, unsigned int packet_identifier,
unsigned int root_id, bool has_force_root_node,
bool has_gap_count_optimization, unsigned int gap_count)
{
phy_packet_set_packet_identifier(quadlet, packet_identifier);
phy_packet_phy_config_set_root_id(quadlet, root_id);
phy_packet_phy_config_set_force_root_node(quadlet, has_force_root_node);
phy_packet_phy_config_set_gap_count_optimization(quadlet, has_gap_count_optimization);
phy_packet_phy_config_set_gap_count(quadlet, gap_count);
}
static void deserialize_phy_packet_phy_config(u32 quadlet, unsigned int *packet_identifier,
unsigned int *root_id, bool *has_force_root_node,
bool *has_gap_count_optimization,
unsigned int *gap_count)
{
*packet_identifier = phy_packet_get_packet_identifier(quadlet);
*root_id = phy_packet_phy_config_get_root_id(quadlet);
*has_force_root_node = phy_packet_phy_config_get_force_root_node(quadlet);
*has_gap_count_optimization = phy_packet_phy_config_get_gap_count_optimization(quadlet);
*gap_count = phy_packet_phy_config_get_gap_count(quadlet);
}
static void test_async_header_write_quadlet_request(struct kunit *test)
{
static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = {
......@@ -559,6 +643,251 @@ static void test_isoc_header(struct kunit *test)
KUNIT_EXPECT_EQ(test, header, expected);
}
static void test_phy_packet_self_id_zero_case0(struct kunit *test)
{
// TSB41AB1/2 with 1 port.
const u32 expected[] = {0x80458c80};
u32 quadlets[] = {0};
unsigned int packet_identifier;
unsigned int phy_id;
bool extended;
bool link_is_active;
unsigned int gap_count;
unsigned int scode;
bool is_contender;
unsigned int power_class;
enum phy_packet_self_id_port_status port_status[3];
bool is_initiated_reset;
bool has_more_packets;
unsigned int port_index;
deserialize_phy_packet_self_id_zero(expected[0], &packet_identifier, &phy_id, &extended,
&link_is_active, &gap_count, &scode, &is_contender,
&power_class, &is_initiated_reset, &has_more_packets);
KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID, packet_identifier);
KUNIT_EXPECT_EQ(test, 0, phy_id);
KUNIT_EXPECT_FALSE(test, extended);
KUNIT_EXPECT_TRUE(test, link_is_active);
KUNIT_EXPECT_EQ(test, 0x05, gap_count);
KUNIT_EXPECT_EQ(test, SCODE_400, scode);
KUNIT_EXPECT_TRUE(test, is_contender);
KUNIT_EXPECT_EQ(test, 0x4, power_class);
KUNIT_EXPECT_FALSE(test, is_initiated_reset);
KUNIT_EXPECT_FALSE(test, has_more_packets);
serialize_phy_packet_self_id_zero(quadlets, packet_identifier, phy_id, extended,
link_is_active, gap_count, scode, is_contender,
power_class, is_initiated_reset, has_more_packets);
for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) {
port_status[port_index] =
self_id_sequence_get_port_status(expected, ARRAY_SIZE(expected), port_index);
}
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[0]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[1]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[2]);
for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) {
self_id_sequence_set_port_status(quadlets, ARRAY_SIZE(quadlets), port_index,
port_status[port_index]);
}
KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected));
}
static void test_phy_packet_self_id_zero_case1(struct kunit *test)
{
// XIO2213 and TSB81BA3E with 3 ports.
const u32 expected[] = {0x817fcc5e};
u32 quadlets[] = {0};
unsigned int packet_identifier;
unsigned int phy_id;
bool extended;
bool link_is_active;
unsigned int gap_count;
unsigned int scode;
bool is_contender;
unsigned int power_class;
enum phy_packet_self_id_port_status port_status[3];
bool is_initiated_reset;
bool has_more_packets;
unsigned int port_index;
deserialize_phy_packet_self_id_zero(expected[0], &packet_identifier, &phy_id, &extended,
&link_is_active, &gap_count, &scode, &is_contender,
&power_class, &is_initiated_reset, &has_more_packets);
KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID, packet_identifier);
KUNIT_EXPECT_EQ(test, 1, phy_id);
KUNIT_EXPECT_FALSE(test, extended);
KUNIT_EXPECT_TRUE(test, link_is_active);
KUNIT_EXPECT_EQ(test, 0x3f, gap_count);
KUNIT_EXPECT_EQ(test, SCODE_800, scode);
KUNIT_EXPECT_TRUE(test, is_contender);
KUNIT_EXPECT_EQ(test, 0x4, power_class);
KUNIT_EXPECT_TRUE(test, is_initiated_reset);
KUNIT_EXPECT_FALSE(test, has_more_packets);
serialize_phy_packet_self_id_zero(quadlets, packet_identifier, phy_id, extended,
link_is_active, gap_count, scode, is_contender,
power_class, is_initiated_reset, has_more_packets);
for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) {
port_status[port_index] =
self_id_sequence_get_port_status(expected, ARRAY_SIZE(expected), port_index);
}
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[0]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[1]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[2]);
for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) {
self_id_sequence_set_port_status(quadlets, ARRAY_SIZE(quadlets), port_index,
port_status[port_index]);
}
KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected));
}
static void test_phy_packet_self_id_zero_and_one(struct kunit *test)
{
// TSB41LV06A with 6 ports.
const u32 expected[] = {
0x803f8459,
0x80815000,
};
u32 quadlets[] = {0, 0};
unsigned int packet_identifier;
unsigned int phy_id;
bool extended;
bool link_is_active;
unsigned int gap_count;
unsigned int scode;
bool is_contender;
unsigned int power_class;
enum phy_packet_self_id_port_status port_status[11];
bool is_initiated_reset;
bool has_more_packets;
unsigned int sequence;
unsigned int port_index;
deserialize_phy_packet_self_id_zero(expected[0], &packet_identifier, &phy_id, &extended,
&link_is_active, &gap_count, &scode, &is_contender,
&power_class, &is_initiated_reset, &has_more_packets);
KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID, packet_identifier);
KUNIT_EXPECT_EQ(test, 0, phy_id);
KUNIT_EXPECT_FALSE(test, extended);
KUNIT_EXPECT_FALSE(test, link_is_active);
KUNIT_EXPECT_EQ(test, 0x3f, gap_count);
KUNIT_EXPECT_EQ(test, SCODE_400, scode);
KUNIT_EXPECT_FALSE(test, is_contender);
KUNIT_EXPECT_EQ(test, 0x4, power_class);
KUNIT_EXPECT_FALSE(test, is_initiated_reset);
KUNIT_EXPECT_TRUE(test, has_more_packets);
serialize_phy_packet_self_id_zero(quadlets, packet_identifier, phy_id, extended,
link_is_active, gap_count, scode, is_contender,
power_class, is_initiated_reset, has_more_packets);
deserialize_phy_packet_self_id_extended(expected[1], &packet_identifier, &phy_id, &extended,
&sequence, &has_more_packets);
KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID, packet_identifier);
KUNIT_EXPECT_EQ(test, 0, phy_id);
KUNIT_EXPECT_TRUE(test, extended);
KUNIT_EXPECT_EQ(test, 0, sequence);
KUNIT_EXPECT_FALSE(test, has_more_packets);
serialize_phy_packet_self_id_extended(&quadlets[1], packet_identifier, phy_id, extended,
sequence, has_more_packets);
for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) {
port_status[port_index] =
self_id_sequence_get_port_status(expected, ARRAY_SIZE(expected), port_index);
}
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[0]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[1]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[2]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[3]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[4]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[5]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[6]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[7]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[8]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[9]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[10]);
for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) {
self_id_sequence_set_port_status(quadlets, ARRAY_SIZE(quadlets), port_index,
port_status[port_index]);
}
KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected));
}
static void test_phy_packet_phy_config_force_root_node(struct kunit *test)
{
const u32 expected = 0x02800000;
u32 quadlet = 0;
unsigned int packet_identifier;
unsigned int root_id;
bool has_force_root_node;
bool has_gap_count_optimization;
unsigned int gap_count;
deserialize_phy_packet_phy_config(expected, &packet_identifier, &root_id,
&has_force_root_node, &has_gap_count_optimization,
&gap_count);
KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG, packet_identifier);
KUNIT_EXPECT_EQ(test, 0x02, root_id);
KUNIT_EXPECT_TRUE(test, has_force_root_node);
KUNIT_EXPECT_FALSE(test, has_gap_count_optimization);
KUNIT_EXPECT_EQ(test, 0, gap_count);
serialize_phy_packet_phy_config(&quadlet, packet_identifier, root_id, has_force_root_node,
has_gap_count_optimization, gap_count);
KUNIT_EXPECT_EQ(test, quadlet, expected);
}
static void test_phy_packet_phy_config_gap_count_optimization(struct kunit *test)
{
const u32 expected = 0x034f0000;
u32 quadlet = 0;
unsigned int packet_identifier;
unsigned int root_id;
bool has_force_root_node;
bool has_gap_count_optimization;
unsigned int gap_count;
deserialize_phy_packet_phy_config(expected, &packet_identifier, &root_id,
&has_force_root_node, &has_gap_count_optimization,
&gap_count);
KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG, packet_identifier);
KUNIT_EXPECT_EQ(test, 0x03, root_id);
KUNIT_EXPECT_FALSE(test, has_force_root_node);
KUNIT_EXPECT_TRUE(test, has_gap_count_optimization);
KUNIT_EXPECT_EQ(test, 0x0f, gap_count);
serialize_phy_packet_phy_config(&quadlet, packet_identifier, root_id, has_force_root_node,
has_gap_count_optimization, gap_count);
KUNIT_EXPECT_EQ(test, quadlet, expected);
}
static struct kunit_case packet_serdes_test_cases[] = {
KUNIT_CASE(test_async_header_write_quadlet_request),
KUNIT_CASE(test_async_header_write_block_request),
......@@ -570,6 +899,11 @@ static struct kunit_case packet_serdes_test_cases[] = {
KUNIT_CASE(test_async_header_lock_request),
KUNIT_CASE(test_async_header_lock_response),
KUNIT_CASE(test_isoc_header),
KUNIT_CASE(test_phy_packet_self_id_zero_case0),
KUNIT_CASE(test_phy_packet_self_id_zero_case1),
KUNIT_CASE(test_phy_packet_self_id_zero_and_one),
KUNIT_CASE(test_phy_packet_phy_config_force_root_node),
KUNIT_CASE(test_phy_packet_phy_config_gap_count_optimization),
{}
};
......
// SPDX-License-Identifier: GPL-2.0-or-later
//
// phy-packet-definitions.h - The definitions of phy packet for IEEE 1394.
//
// Copyright (c) 2024 Takashi Sakamoto
#ifndef _FIREWIRE_PHY_PACKET_DEFINITIONS_H
#define _FIREWIRE_PHY_PACKET_DEFINITIONS_H
#define PACKET_IDENTIFIER_MASK 0xc0000000
#define PACKET_IDENTIFIER_SHIFT 30
static inline unsigned int phy_packet_get_packet_identifier(u32 quadlet)
{
return (quadlet & PACKET_IDENTIFIER_MASK) >> PACKET_IDENTIFIER_SHIFT;
}
static inline void phy_packet_set_packet_identifier(u32 *quadlet, unsigned int packet_identifier)
{
*quadlet &= ~PACKET_IDENTIFIER_MASK;
*quadlet |= (packet_identifier << PACKET_IDENTIFIER_SHIFT) & PACKET_IDENTIFIER_MASK;
}
#define PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG 0
#define PHY_CONFIG_ROOT_ID_MASK 0x3f000000
#define PHY_CONFIG_ROOT_ID_SHIFT 24
#define PHY_CONFIG_FORCE_ROOT_NODE_MASK 0x00800000
#define PHY_CONFIG_FORCE_ROOT_NODE_SHIFT 23
#define PHY_CONFIG_GAP_COUNT_OPTIMIZATION_MASK 0x00400000
#define PHY_CONFIG_GAP_COUNT_OPTIMIZATION_SHIFT 22
#define PHY_CONFIG_GAP_COUNT_MASK 0x003f0000
#define PHY_CONFIG_GAP_COUNT_SHIFT 16
static inline unsigned int phy_packet_phy_config_get_root_id(u32 quadlet)
{
return (quadlet & PHY_CONFIG_ROOT_ID_MASK) >> PHY_CONFIG_ROOT_ID_SHIFT;
}
static inline void phy_packet_phy_config_set_root_id(u32 *quadlet, unsigned int root_id)
{
*quadlet &= ~PHY_CONFIG_ROOT_ID_MASK;
*quadlet |= (root_id << PHY_CONFIG_ROOT_ID_SHIFT) & PHY_CONFIG_ROOT_ID_MASK;
}
static inline bool phy_packet_phy_config_get_force_root_node(u32 quadlet)
{
return (quadlet & PHY_CONFIG_FORCE_ROOT_NODE_MASK) >> PHY_CONFIG_FORCE_ROOT_NODE_SHIFT;
}
static inline void phy_packet_phy_config_set_force_root_node(u32 *quadlet, bool has_force_root_node)
{
*quadlet &= ~PHY_CONFIG_FORCE_ROOT_NODE_MASK;
*quadlet |= (has_force_root_node << PHY_CONFIG_FORCE_ROOT_NODE_SHIFT) & PHY_CONFIG_FORCE_ROOT_NODE_MASK;
}
static inline bool phy_packet_phy_config_get_gap_count_optimization(u32 quadlet)
{
return (quadlet & PHY_CONFIG_GAP_COUNT_OPTIMIZATION_MASK) >> PHY_CONFIG_GAP_COUNT_OPTIMIZATION_SHIFT;
}
static inline void phy_packet_phy_config_set_gap_count_optimization(u32 *quadlet, bool has_gap_count_optimization)
{
*quadlet &= ~PHY_CONFIG_GAP_COUNT_OPTIMIZATION_MASK;
*quadlet |= (has_gap_count_optimization << PHY_CONFIG_GAP_COUNT_OPTIMIZATION_SHIFT) & PHY_CONFIG_GAP_COUNT_OPTIMIZATION_MASK;
}
static inline unsigned int phy_packet_phy_config_get_gap_count(u32 quadlet)
{
return (quadlet & PHY_CONFIG_GAP_COUNT_MASK) >> PHY_CONFIG_GAP_COUNT_SHIFT;
}
static inline void phy_packet_phy_config_set_gap_count(u32 *quadlet, unsigned int gap_count)
{
*quadlet &= ~PHY_CONFIG_GAP_COUNT_MASK;
*quadlet |= (gap_count << PHY_CONFIG_GAP_COUNT_SHIFT) & PHY_CONFIG_GAP_COUNT_MASK;
}
#define PHY_PACKET_PACKET_IDENTIFIER_SELF_ID 2
#define SELF_ID_PHY_ID_MASK 0x3f000000
#define SELF_ID_PHY_ID_SHIFT 24
#define SELF_ID_EXTENDED_MASK 0x00800000
#define SELF_ID_EXTENDED_SHIFT 23
#define SELF_ID_MORE_PACKETS_MASK 0x00000001
#define SELF_ID_MORE_PACKETS_SHIFT 0
#define SELF_ID_ZERO_LINK_ACTIVE_MASK 0x00400000
#define SELF_ID_ZERO_LINK_ACTIVE_SHIFT 22
#define SELF_ID_ZERO_GAP_COUNT_MASK 0x003f0000
#define SELF_ID_ZERO_GAP_COUNT_SHIFT 16
#define SELF_ID_ZERO_SCODE_MASK 0x0000c000
#define SELF_ID_ZERO_SCODE_SHIFT 14
#define SELF_ID_ZERO_CONTENDER_MASK 0x00000800
#define SELF_ID_ZERO_CONTENDER_SHIFT 11
#define SELF_ID_ZERO_POWER_CLASS_MASK 0x00000700
#define SELF_ID_ZERO_POWER_CLASS_SHIFT 8
#define SELF_ID_ZERO_INITIATED_RESET_MASK 0x00000002
#define SELF_ID_ZERO_INITIATED_RESET_SHIFT 1
#define SELF_ID_EXTENDED_SEQUENCE_MASK 0x00700000
#define SELF_ID_EXTENDED_SEQUENCE_SHIFT 20
#define SELF_ID_PORT_STATUS_MASK 0x3
#define SELF_ID_SEQUENCE_MAXIMUM_QUADLET_COUNT 4
static inline unsigned int phy_packet_self_id_get_phy_id(u32 quadlet)
{
return (quadlet & SELF_ID_PHY_ID_MASK) >> SELF_ID_PHY_ID_SHIFT;
}
static inline void phy_packet_self_id_set_phy_id(u32 *quadlet, unsigned int phy_id)
{
*quadlet &= ~SELF_ID_PHY_ID_MASK;
*quadlet |= (phy_id << SELF_ID_PHY_ID_SHIFT) & SELF_ID_PHY_ID_MASK;
}
static inline bool phy_packet_self_id_get_extended(u32 quadlet)
{
return (quadlet & SELF_ID_EXTENDED_MASK) >> SELF_ID_EXTENDED_SHIFT;
}
static inline void phy_packet_self_id_set_extended(u32 *quadlet, bool extended)
{
*quadlet &= ~SELF_ID_EXTENDED_MASK;
*quadlet |= (extended << SELF_ID_EXTENDED_SHIFT) & SELF_ID_EXTENDED_MASK;
}
static inline bool phy_packet_self_id_zero_get_link_active(u32 quadlet)
{
return (quadlet & SELF_ID_ZERO_LINK_ACTIVE_MASK) >> SELF_ID_ZERO_LINK_ACTIVE_SHIFT;
}
static inline void phy_packet_self_id_zero_set_link_active(u32 *quadlet, bool is_active)
{
*quadlet &= ~SELF_ID_ZERO_LINK_ACTIVE_MASK;
*quadlet |= (is_active << SELF_ID_ZERO_LINK_ACTIVE_SHIFT) & SELF_ID_ZERO_LINK_ACTIVE_MASK;
}
static inline unsigned int phy_packet_self_id_zero_get_gap_count(u32 quadlet)
{
return (quadlet & SELF_ID_ZERO_GAP_COUNT_MASK) >> SELF_ID_ZERO_GAP_COUNT_SHIFT;
}
static inline void phy_packet_self_id_zero_set_gap_count(u32 *quadlet, unsigned int gap_count)
{
*quadlet &= ~SELF_ID_ZERO_GAP_COUNT_MASK;
*quadlet |= (gap_count << SELF_ID_ZERO_GAP_COUNT_SHIFT) & SELF_ID_ZERO_GAP_COUNT_MASK;
}
static inline unsigned int phy_packet_self_id_zero_get_scode(u32 quadlet)
{
return (quadlet & SELF_ID_ZERO_SCODE_MASK) >> SELF_ID_ZERO_SCODE_SHIFT;
}
static inline void phy_packet_self_id_zero_set_scode(u32 *quadlet, unsigned int speed)
{
*quadlet &= ~SELF_ID_ZERO_SCODE_MASK;
*quadlet |= (speed << SELF_ID_ZERO_SCODE_SHIFT) & SELF_ID_ZERO_SCODE_MASK;
}
static inline bool phy_packet_self_id_zero_get_contender(u32 quadlet)
{
return (quadlet & SELF_ID_ZERO_CONTENDER_MASK) >> SELF_ID_ZERO_CONTENDER_SHIFT;
}
static inline void phy_packet_self_id_zero_set_contender(u32 *quadlet, bool is_contender)
{
*quadlet &= ~SELF_ID_ZERO_CONTENDER_MASK;
*quadlet |= (is_contender << SELF_ID_ZERO_CONTENDER_SHIFT) & SELF_ID_ZERO_CONTENDER_MASK;
}
static inline unsigned int phy_packet_self_id_zero_get_power_class(u32 quadlet)
{
return (quadlet & SELF_ID_ZERO_POWER_CLASS_MASK) >> SELF_ID_ZERO_POWER_CLASS_SHIFT;
}
static inline void phy_packet_self_id_zero_set_power_class(u32 *quadlet, unsigned int power_class)
{
*quadlet &= ~SELF_ID_ZERO_POWER_CLASS_MASK;
*quadlet |= (power_class << SELF_ID_ZERO_POWER_CLASS_SHIFT) & SELF_ID_ZERO_POWER_CLASS_MASK;
}
static inline bool phy_packet_self_id_zero_get_initiated_reset(u32 quadlet)
{
return (quadlet & SELF_ID_ZERO_INITIATED_RESET_MASK) >> SELF_ID_ZERO_INITIATED_RESET_SHIFT;
}
static inline void phy_packet_self_id_zero_set_initiated_reset(u32 *quadlet, bool is_initiated_reset)
{
*quadlet &= ~SELF_ID_ZERO_INITIATED_RESET_MASK;
*quadlet |= (is_initiated_reset << SELF_ID_ZERO_INITIATED_RESET_SHIFT) & SELF_ID_ZERO_INITIATED_RESET_MASK;
}
static inline bool phy_packet_self_id_get_more_packets(u32 quadlet)
{
return (quadlet & SELF_ID_MORE_PACKETS_MASK) >> SELF_ID_MORE_PACKETS_SHIFT;
}
static inline void phy_packet_self_id_set_more_packets(u32 *quadlet, bool is_more_packets)
{
*quadlet &= ~SELF_ID_MORE_PACKETS_MASK;
*quadlet |= (is_more_packets << SELF_ID_MORE_PACKETS_SHIFT) & SELF_ID_MORE_PACKETS_MASK;
}
static inline unsigned int phy_packet_self_id_extended_get_sequence(u32 quadlet)
{
return (quadlet & SELF_ID_EXTENDED_SEQUENCE_MASK) >> SELF_ID_EXTENDED_SEQUENCE_SHIFT;
}
static inline void phy_packet_self_id_extended_set_sequence(u32 *quadlet, unsigned int sequence)
{
*quadlet &= ~SELF_ID_EXTENDED_SEQUENCE_MASK;
*quadlet |= (sequence << SELF_ID_EXTENDED_SHIFT) & SELF_ID_EXTENDED_SEQUENCE_MASK;
}
struct self_id_sequence_enumerator {
const u32 *cursor;
unsigned int quadlet_count;
};
static inline const u32 *self_id_sequence_enumerator_next(
struct self_id_sequence_enumerator *enumerator, unsigned int *quadlet_count)
{
const u32 *self_id_sequence, *cursor;
u32 quadlet;
unsigned int count;
unsigned int sequence;
if (enumerator->cursor == NULL || enumerator->quadlet_count == 0)
return ERR_PTR(-ENODATA);
cursor = enumerator->cursor;
count = 1;
quadlet = *cursor;
sequence = 0;
while (phy_packet_self_id_get_more_packets(quadlet)) {
if (count >= enumerator->quadlet_count ||
count >= SELF_ID_SEQUENCE_MAXIMUM_QUADLET_COUNT)
return ERR_PTR(-EPROTO);
++cursor;
++count;
quadlet = *cursor;
if (!phy_packet_self_id_get_extended(quadlet) ||
sequence != phy_packet_self_id_extended_get_sequence(quadlet))
return ERR_PTR(-EPROTO);
++sequence;
}
*quadlet_count = count;
self_id_sequence = enumerator->cursor;
enumerator->cursor += count;
enumerator->quadlet_count -= count;
return self_id_sequence;
}
enum phy_packet_self_id_port_status {
PHY_PACKET_SELF_ID_PORT_STATUS_NONE = 0,
PHY_PACKET_SELF_ID_PORT_STATUS_NCONN = 1,
PHY_PACKET_SELF_ID_PORT_STATUS_PARENT = 2,
PHY_PACKET_SELF_ID_PORT_STATUS_CHILD = 3,
};
static inline unsigned int self_id_sequence_get_port_capacity(unsigned int quadlet_count)
{
return quadlet_count * 8 - 5;
}
static inline enum phy_packet_self_id_port_status self_id_sequence_get_port_status(
const u32 *self_id_sequence, unsigned int quadlet_count, unsigned int port_index)
{
unsigned int index, shift;
index = (port_index + 5) / 8;
shift = 16 - ((port_index + 5) % 8) * 2;
if (index < quadlet_count && index < SELF_ID_SEQUENCE_MAXIMUM_QUADLET_COUNT)
return (self_id_sequence[index] >> shift) & SELF_ID_PORT_STATUS_MASK;
return PHY_PACKET_SELF_ID_PORT_STATUS_NONE;
}
static inline void self_id_sequence_set_port_status(u32 *self_id_sequence, unsigned int quadlet_count,
unsigned int port_index,
enum phy_packet_self_id_port_status status)
{
unsigned int index, shift;
index = (port_index + 5) / 8;
shift = 16 - ((port_index + 5) % 8) * 2;
if (index < quadlet_count) {
self_id_sequence[index] &= ~(SELF_ID_PORT_STATUS_MASK << shift);
self_id_sequence[index] |= status << shift;
}
}
#endif // _FIREWIRE_PHY_PACKET_DEFINITIONS_H
// SPDX-License-Identifier: GPL-2.0-or-later
//
// self-id-sequence-helper-test.c - An application of Kunit to test helpers of self ID sequence.
//
// Copyright (c) 2024 Takashi Sakamoto
#include <kunit/test.h>
#include "phy-packet-definitions.h"
static void test_self_id_sequence_enumerator_valid(struct kunit *test)
{
static const u32 valid_sequences[] = {
0x00000000,
0x00000001, 0x00800000,
0x00000001, 0x00800001, 0x00900000,
0x00000000,
};
struct self_id_sequence_enumerator enumerator;
const u32 *entry;
unsigned int quadlet_count;
enumerator.cursor = valid_sequences;
enumerator.quadlet_count = ARRAY_SIZE(valid_sequences);
entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
KUNIT_EXPECT_PTR_EQ(test, entry, &valid_sequences[0]);
KUNIT_EXPECT_EQ(test, quadlet_count, 1);
KUNIT_EXPECT_EQ(test, enumerator.quadlet_count, 6);
entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
KUNIT_EXPECT_PTR_EQ(test, entry, &valid_sequences[1]);
KUNIT_EXPECT_EQ(test, quadlet_count, 2);
KUNIT_EXPECT_EQ(test, enumerator.quadlet_count, 4);
entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
KUNIT_EXPECT_PTR_EQ(test, entry, &valid_sequences[3]);
KUNIT_EXPECT_EQ(test, quadlet_count, 3);
KUNIT_EXPECT_EQ(test, enumerator.quadlet_count, 1);
entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
KUNIT_EXPECT_PTR_EQ(test, entry, &valid_sequences[6]);
KUNIT_EXPECT_EQ(test, quadlet_count, 1);
KUNIT_EXPECT_EQ(test, enumerator.quadlet_count, 0);
entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
KUNIT_EXPECT_EQ(test, PTR_ERR(entry), -ENODATA);
}
static void test_self_id_sequence_enumerator_invalid(struct kunit *test)
{
static const u32 invalid_sequences[] = {
0x00000001,
};
struct self_id_sequence_enumerator enumerator;
const u32 *entry;
unsigned int count;
enumerator.cursor = invalid_sequences;
enumerator.quadlet_count = ARRAY_SIZE(invalid_sequences);
entry = self_id_sequence_enumerator_next(&enumerator, &count);
KUNIT_EXPECT_EQ(test, PTR_ERR(entry), -EPROTO);
}
static void test_self_id_sequence_get_port_status(struct kunit *test)
{
static const u32 expected[] = {
0x000000e5,
0x00839e79,
0x0091e79d,
0x00a279e4,
};
u32 quadlets [] = {
0x00000001,
0x00800001,
0x00900001,
0x00a00000,
};
enum phy_packet_self_id_port_status port_status[28];
unsigned int port_capacity;
unsigned int port_index;
KUNIT_ASSERT_EQ(test, ARRAY_SIZE(expected), ARRAY_SIZE(quadlets));
// With an extra port.
port_capacity = self_id_sequence_get_port_capacity(ARRAY_SIZE(expected)) + 1;
KUNIT_ASSERT_EQ(test, port_capacity, ARRAY_SIZE(port_status));
for (port_index = 0; port_index < port_capacity; ++port_index) {
port_status[port_index] =
self_id_sequence_get_port_status(expected, ARRAY_SIZE(expected), port_index);
self_id_sequence_set_port_status(quadlets, ARRAY_SIZE(quadlets), port_index,
port_status[port_index]);
}
// Self ID zero.
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[0]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[1]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[2]);
// Self ID one.
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[3]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[4]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[5]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[6]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[7]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[8]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[9]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[10]);
// Self ID two.
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[11]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[12]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[13]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[14]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[15]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[16]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[17]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[18]);
// Self ID three.
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[19]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[20]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[21]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[22]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[23]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[24]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[25]);
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[26]);
// Our of order.
KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[27]);
KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected));
}
static struct kunit_case self_id_sequence_helper_test_cases[] = {
KUNIT_CASE(test_self_id_sequence_enumerator_valid),
KUNIT_CASE(test_self_id_sequence_enumerator_invalid),
KUNIT_CASE(test_self_id_sequence_get_port_status),
{}
};
static struct kunit_suite self_id_sequence_helper_test_suite = {
.name = "self-id-sequence-helper",
.test_cases = self_id_sequence_helper_test_cases,
};
kunit_test_suite(self_id_sequence_helper_test_suite);
MODULE_DESCRIPTION("Unit test suite for helpers of self ID sequence");
MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-or-later
// Copyright (c) 2024 Takashi Sakamoto
#undef TRACE_SYSTEM
#define TRACE_SYSTEM firewire
#if !defined(_FIREWIRE_TRACE_EVENT_H) || defined(TRACE_HEADER_MULTI_READ)
......@@ -11,7 +12,7 @@
#include <linux/firewire-constants.h>
#include "../../../drivers/firewire/packet-header-definitions.h"
// Some macros are defined in 'drivers/firewire/packet-header-definitions.h'.
// The content of TP_printk field is preprocessed, then put to the module binary.
#define ASYNC_HEADER_GET_DESTINATION(header) \
......@@ -366,6 +367,544 @@ TRACE_EVENT(bus_reset_handle,
)
);
// Some macros are defined in 'drivers/firewire/phy-packet-definitions.h'.
// The content of TP_printk field is preprocessed, then put to the module binary.
#define PHY_PACKET_SELF_ID_GET_PHY_ID(quads) \
((((const u32 *)quads)[0] & SELF_ID_PHY_ID_MASK) >> SELF_ID_PHY_ID_SHIFT)
#define PHY_PACKET_SELF_ID_GET_LINK_ACTIVE(quads) \
((((const u32 *)quads)[0] & SELF_ID_ZERO_LINK_ACTIVE_MASK) >> SELF_ID_ZERO_LINK_ACTIVE_SHIFT)
#define PHY_PACKET_SELF_ID_GET_GAP_COUNT(quads) \
((((const u32 *)quads)[0] & SELF_ID_ZERO_GAP_COUNT_MASK) >> SELF_ID_ZERO_GAP_COUNT_SHIFT)
#define PHY_PACKET_SELF_ID_GET_SCODE(quads) \
((((const u32 *)quads)[0] & SELF_ID_ZERO_SCODE_MASK) >> SELF_ID_ZERO_SCODE_SHIFT)
#define PHY_PACKET_SELF_ID_GET_CONTENDER(quads) \
((((const u32 *)quads)[0] & SELF_ID_ZERO_CONTENDER_MASK) >> SELF_ID_ZERO_CONTENDER_SHIFT)
#define PHY_PACKET_SELF_ID_GET_POWER_CLASS(quads) \
((((const u32 *)quads)[0] & SELF_ID_ZERO_POWER_CLASS_MASK) >> SELF_ID_ZERO_POWER_CLASS_SHIFT)
#define PHY_PACKET_SELF_ID_GET_INITIATED_RESET(quads) \
((((const u32 *)quads)[0] & SELF_ID_ZERO_INITIATED_RESET_MASK) >> SELF_ID_ZERO_INITIATED_RESET_SHIFT)
TRACE_EVENT(self_id_sequence,
TP_PROTO(unsigned int card_index, const u32 *self_id_sequence, unsigned int quadlet_count, unsigned int generation),
TP_ARGS(card_index, self_id_sequence, quadlet_count, generation),
TP_STRUCT__entry(
__field(u8, card_index)
__field(u8, generation)
__dynamic_array(u8, port_status, self_id_sequence_get_port_capacity(quadlet_count))
__dynamic_array(u32, self_id_sequence, quadlet_count)
),
TP_fast_assign(
__entry->card_index = card_index;
__entry->generation = generation;
{
u8 *port_status = __get_dynamic_array(port_status);
unsigned int port_index;
for (port_index = 0; port_index < __get_dynamic_array_len(port_status); ++port_index) {
port_status[port_index] =
self_id_sequence_get_port_status(self_id_sequence,
quadlet_count, port_index);
}
}
memcpy(__get_dynamic_array(self_id_sequence), self_id_sequence,
__get_dynamic_array_len(self_id_sequence));
),
TP_printk(
"card_index=%u generation=%u phy_id=0x%02x link_active=%s gap_count=%u scode=%u contender=%s power_class=%u initiated_reset=%s port_status=%s self_id_sequence=%s",
__entry->card_index,
__entry->generation,
PHY_PACKET_SELF_ID_GET_PHY_ID(__get_dynamic_array(self_id_sequence)),
PHY_PACKET_SELF_ID_GET_LINK_ACTIVE(__get_dynamic_array(self_id_sequence)) ? "true" : "false",
PHY_PACKET_SELF_ID_GET_GAP_COUNT(__get_dynamic_array(self_id_sequence)),
PHY_PACKET_SELF_ID_GET_SCODE(__get_dynamic_array(self_id_sequence)),
PHY_PACKET_SELF_ID_GET_CONTENDER(__get_dynamic_array(self_id_sequence)) ? "true" : "false",
PHY_PACKET_SELF_ID_GET_POWER_CLASS(__get_dynamic_array(self_id_sequence)),
PHY_PACKET_SELF_ID_GET_INITIATED_RESET(__get_dynamic_array(self_id_sequence)) ? "true" : "false",
__print_array(__get_dynamic_array(port_status), __get_dynamic_array_len(port_status), 1),
__print_array(__get_dynamic_array(self_id_sequence),
__get_dynamic_array_len(self_id_sequence) / QUADLET_SIZE, QUADLET_SIZE)
)
);
#undef PHY_PACKET_SELF_ID_GET_PHY_ID
#undef PHY_PACKET_SELF_ID_GET_LINK_ACTIVE
#undef PHY_PACKET_SELF_ID_GET_GAP_COUNT
#undef PHY_PACKET_SELF_ID_GET_SCODE
#undef PHY_PACKET_SELF_ID_GET_CONTENDER
#undef PHY_PACKET_SELF_ID_GET_POWER_CLASS
#undef PHY_PACKET_SELF_ID_GET_INITIATED_RESET
TRACE_EVENT_CONDITION(isoc_outbound_allocate,
TP_PROTO(const struct fw_iso_context *ctx, unsigned int channel, unsigned int scode),
TP_ARGS(ctx, channel, scode),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT),
TP_STRUCT__entry(
__field(u64, context)
__field(u8, card_index)
__field(u8, channel)
__field(u8, scode)
),
TP_fast_assign(
__entry->context = (uintptr_t)ctx;
__entry->card_index = ctx->card->index;
__entry->channel = channel;
__entry->scode = scode;
),
TP_printk(
"context=0x%llx card_index=%u channel=%u scode=%u",
__entry->context,
__entry->card_index,
__entry->channel,
__entry->scode
)
);
TRACE_EVENT_CONDITION(isoc_inbound_single_allocate,
TP_PROTO(const struct fw_iso_context *ctx, unsigned int channel, unsigned int header_size),
TP_ARGS(ctx, channel, header_size),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE),
TP_STRUCT__entry(
__field(u64, context)
__field(u8, card_index)
__field(u8, channel)
__field(u8, header_size)
),
TP_fast_assign(
__entry->context = (uintptr_t)ctx;
__entry->card_index = ctx->card->index;
__entry->channel = channel;
__entry->header_size = header_size;
),
TP_printk(
"context=0x%llx card_index=%u channel=%u header_size=%u",
__entry->context,
__entry->card_index,
__entry->channel,
__entry->header_size
)
);
TRACE_EVENT_CONDITION(isoc_inbound_multiple_allocate,
TP_PROTO(const struct fw_iso_context *ctx),
TP_ARGS(ctx),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL),
TP_STRUCT__entry(
__field(u64, context)
__field(u8, card_index)
),
TP_fast_assign(
__entry->context = (uintptr_t)ctx;
__entry->card_index = ctx->card->index;
),
TP_printk(
"context=0x%llx card_index=%u",
__entry->context,
__entry->card_index
)
);
DECLARE_EVENT_CLASS(isoc_destroy_template,
TP_PROTO(const struct fw_iso_context *ctx),
TP_ARGS(ctx),
TP_STRUCT__entry(
__field(u64, context)
__field(u8, card_index)
),
TP_fast_assign(
__entry->context = (uintptr_t)ctx;
__entry->card_index = ctx->card->index;
),
TP_printk(
"context=0x%llx card_index=%u",
__entry->context,
__entry->card_index
)
)
DEFINE_EVENT_CONDITION(isoc_destroy_template, isoc_outbound_destroy,
TP_PROTO(const struct fw_iso_context *ctx),
TP_ARGS(ctx),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT)
);
DEFINE_EVENT_CONDITION(isoc_destroy_template, isoc_inbound_single_destroy,
TP_PROTO(const struct fw_iso_context *ctx),
TP_ARGS(ctx),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
);
DEFINE_EVENT_CONDITION(isoc_destroy_template, isoc_inbound_multiple_destroy,
TP_PROTO(const struct fw_iso_context *ctx),
TP_ARGS(ctx),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
);
TRACE_EVENT(isoc_inbound_multiple_channels,
TP_PROTO(const struct fw_iso_context *ctx, u64 channels),
TP_ARGS(ctx, channels),
TP_STRUCT__entry(
__field(u64, context)
__field(u8, card_index)
__field(u64, channels)
),
TP_fast_assign(
__entry->context = (uintptr_t)ctx;
__entry->card_index = ctx->card->index;
__entry->channels = channels;
),
TP_printk(
"context=0x%llx card_index=%u channels=0x%016llx",
__entry->context,
__entry->card_index,
__entry->channels
)
);
TRACE_EVENT_CONDITION(isoc_outbound_start,
TP_PROTO(const struct fw_iso_context *ctx, int cycle_match),
TP_ARGS(ctx, cycle_match),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT),
TP_STRUCT__entry(
__field(u64, context)
__field(u8, card_index)
__field(bool, cycle_match)
__field(u16, cycle)
),
TP_fast_assign(
__entry->context = (uintptr_t)ctx;
__entry->card_index = ctx->card->index;
__entry->cycle_match = cycle_match < 0 ? false : true;
__entry->cycle = __entry->cycle_match ? (u16)cycle_match : 0;
),
TP_printk(
"context=0x%llx card_index=%u cycle_match=%s cycle=0x%04x",
__entry->context,
__entry->card_index,
__entry->cycle_match ? "true" : "false",
__entry->cycle
)
);
DECLARE_EVENT_CLASS(isoc_inbound_start_template,
TP_PROTO(const struct fw_iso_context *ctx, int cycle_match, unsigned int sync, unsigned int tags),
TP_ARGS(ctx, cycle_match, sync, tags),
TP_STRUCT__entry(
__field(u64, context)
__field(u8, card_index)
__field(bool, cycle_match)
__field(u16, cycle)
__field(u8, sync)
__field(u8, tags)
),
TP_fast_assign(
__entry->context = (uintptr_t)ctx;
__entry->card_index = ctx->card->index;
__entry->cycle_match = cycle_match < 0 ? false : true;
__entry->cycle = __entry->cycle_match ? (u16)cycle_match : 0;
__entry->sync = sync;
__entry->tags = tags;
),
TP_printk(
"context=0x%llx card_index=%u cycle_match=%s cycle=0x%04x sync=%u tags=%s",
__entry->context,
__entry->card_index,
__entry->cycle_match ? "true" : "false",
__entry->cycle,
__entry->sync,
__print_flags(__entry->tags, "|",
{ FW_ISO_CONTEXT_MATCH_TAG0, "0" },
{ FW_ISO_CONTEXT_MATCH_TAG1, "1" },
{ FW_ISO_CONTEXT_MATCH_TAG2, "2" },
{ FW_ISO_CONTEXT_MATCH_TAG3, "3" }
)
)
);
DEFINE_EVENT_CONDITION(isoc_inbound_start_template, isoc_inbound_single_start,
TP_PROTO(const struct fw_iso_context *ctx, int cycle_match, unsigned int sync, unsigned int tags),
TP_ARGS(ctx, cycle_match, sync, tags),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
);
DEFINE_EVENT_CONDITION(isoc_inbound_start_template, isoc_inbound_multiple_start,
TP_PROTO(const struct fw_iso_context *ctx, int cycle_match, unsigned int sync, unsigned int tags),
TP_ARGS(ctx, cycle_match, sync, tags),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
);
DECLARE_EVENT_CLASS(isoc_stop_template,
TP_PROTO(const struct fw_iso_context *ctx),
TP_ARGS(ctx),
TP_STRUCT__entry(
__field(u64, context)
__field(u8, card_index)
),
TP_fast_assign(
__entry->context = (uintptr_t)ctx;
__entry->card_index = ctx->card->index;
),
TP_printk(
"context=0x%llx card_index=%u",
__entry->context,
__entry->card_index
)
)
DEFINE_EVENT_CONDITION(isoc_stop_template, isoc_outbound_stop,
TP_PROTO(const struct fw_iso_context *ctx),
TP_ARGS(ctx),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT)
);
DEFINE_EVENT_CONDITION(isoc_stop_template, isoc_inbound_single_stop,
TP_PROTO(const struct fw_iso_context *ctx),
TP_ARGS(ctx),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
);
DEFINE_EVENT_CONDITION(isoc_stop_template, isoc_inbound_multiple_stop,
TP_PROTO(const struct fw_iso_context *ctx),
TP_ARGS(ctx),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
);
DECLARE_EVENT_CLASS(isoc_flush_template,
TP_PROTO(const struct fw_iso_context *ctx),
TP_ARGS(ctx),
TP_STRUCT__entry(
__field(u64, context)
__field(u8, card_index)
),
TP_fast_assign(
__entry->context = (uintptr_t)ctx;
__entry->card_index = ctx->card->index;
),
TP_printk(
"context=0x%llx card_index=%u",
__entry->context,
__entry->card_index
)
);
DEFINE_EVENT_CONDITION(isoc_flush_template, isoc_outbound_flush,
TP_PROTO(const struct fw_iso_context *ctx),
TP_ARGS(ctx),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT)
);
DEFINE_EVENT_CONDITION(isoc_flush_template, isoc_inbound_single_flush,
TP_PROTO(const struct fw_iso_context *ctx),
TP_ARGS(ctx),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
);
DEFINE_EVENT_CONDITION(isoc_flush_template, isoc_inbound_multiple_flush,
TP_PROTO(const struct fw_iso_context *ctx),
TP_ARGS(ctx),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
);
DECLARE_EVENT_CLASS(isoc_flush_completions_template,
TP_PROTO(const struct fw_iso_context *ctx),
TP_ARGS(ctx),
TP_STRUCT__entry(
__field(u64, context)
__field(u8, card_index)
),
TP_fast_assign(
__entry->context = (uintptr_t)ctx;
__entry->card_index = ctx->card->index;
),
TP_printk(
"context=0x%llx card_index=%u",
__entry->context,
__entry->card_index
)
);
DEFINE_EVENT_CONDITION(isoc_flush_completions_template, isoc_outbound_flush_completions,
TP_PROTO(const struct fw_iso_context *ctx),
TP_ARGS(ctx),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT)
);
DEFINE_EVENT_CONDITION(isoc_flush_completions_template, isoc_inbound_single_flush_completions,
TP_PROTO(const struct fw_iso_context *ctx),
TP_ARGS(ctx),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
);
DEFINE_EVENT_CONDITION(isoc_flush_completions_template, isoc_inbound_multiple_flush_completions,
TP_PROTO(const struct fw_iso_context *ctx),
TP_ARGS(ctx),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
);
#define TP_STRUCT__entry_iso_packet(ctx, buffer_offset, packet) \
TP_STRUCT__entry( \
__field(u64, context) \
__field(u8, card_index) \
__field(u32, buffer_offset) \
__field(bool, interrupt) \
__field(bool, skip) \
__field(u8, sy) \
__field(u8, tag) \
__dynamic_array(u32, header, packet->header_length / QUADLET_SIZE) \
)
#define TP_fast_assign_iso_packet(ctx, buffer_offset, packet) \
TP_fast_assign( \
__entry->context = (uintptr_t)ctx; \
__entry->card_index = ctx->card->index; \
__entry->buffer_offset = buffer_offset; \
__entry->interrupt = packet->interrupt; \
__entry->skip = packet->skip; \
__entry->sy = packet->sy; \
__entry->tag = packet->tag; \
memcpy(__get_dynamic_array(header), packet->header, \
__get_dynamic_array_len(header)); \
)
TRACE_EVENT_CONDITION(isoc_outbound_queue,
TP_PROTO(const struct fw_iso_context *ctx, unsigned long buffer_offset, const struct fw_iso_packet *packet),
TP_ARGS(ctx, buffer_offset, packet),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT),
TP_STRUCT__entry_iso_packet(ctx, buffer_offset, packet),
TP_fast_assign_iso_packet(ctx, buffer_offset, packet),
TP_printk(
"context=0x%llx card_index=%u buffer_offset=0x%x interrupt=%s skip=%s sy=%d tag=%u header=%s",
__entry->context,
__entry->card_index,
__entry->buffer_offset,
__entry->interrupt ? "true" : "false",
__entry->skip ? "true" : "false",
__entry->sy,
__entry->tag,
__print_array(__get_dynamic_array(header),
__get_dynamic_array_len(header) / QUADLET_SIZE, QUADLET_SIZE)
)
);
TRACE_EVENT_CONDITION(isoc_inbound_single_queue,
TP_PROTO(const struct fw_iso_context *ctx, unsigned long buffer_offset, const struct fw_iso_packet *packet),
TP_ARGS(ctx, buffer_offset, packet),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE),
TP_STRUCT__entry_iso_packet(ctx, buffer_offset, packet),
TP_fast_assign_iso_packet(ctx, buffer_offset, packet),
TP_printk(
"context=0x%llx card_index=%u buffer_offset=0x%x interrupt=%s skip=%s",
__entry->context,
__entry->card_index,
__entry->buffer_offset,
__entry->interrupt ? "true" : "false",
__entry->skip ? "true" : "false"
)
);
TRACE_EVENT_CONDITION(isoc_inbound_multiple_queue,
TP_PROTO(const struct fw_iso_context *ctx, unsigned long buffer_offset, const struct fw_iso_packet *packet),
TP_ARGS(ctx, buffer_offset, packet),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL),
TP_STRUCT__entry_iso_packet(ctx, buffer_offset, packet),
TP_fast_assign_iso_packet(ctx, buffer_offset, packet),
TP_printk(
"context=0x%llx card_index=%u buffer_offset=0x%x interrupt=%s",
__entry->context,
__entry->card_index,
__entry->buffer_offset,
__entry->interrupt ? "true" : "false"
)
);
#undef TP_STRUCT__entry_iso_packet
#undef TP_fast_assign_iso_packet
#ifndef show_cause
enum fw_iso_context_completions_cause {
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH = 0,
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ,
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW,
};
#define show_cause(cause) \
__print_symbolic(cause, \
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH, "FLUSH" }, \
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ, "IRQ" }, \
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW, "HEADER_OVERFLOW" } \
)
#endif
DECLARE_EVENT_CLASS(isoc_single_completions_template,
TP_PROTO(const struct fw_iso_context *ctx, u16 timestamp, enum fw_iso_context_completions_cause cause, const u32 *header, unsigned int header_length),
TP_ARGS(ctx, timestamp, cause, header, header_length),
TP_STRUCT__entry(
__field(u64, context)
__field(u8, card_index)
__field(u16, timestamp)
__field(u8, cause)
__dynamic_array(u32, header, header_length / QUADLET_SIZE)
),
TP_fast_assign(
__entry->context = (uintptr_t)ctx;
__entry->card_index = ctx->card->index;
__entry->timestamp = timestamp;
__entry->cause = cause;
memcpy(__get_dynamic_array(header), header, __get_dynamic_array_len(header));
),
TP_printk(
"context=0x%llx card_index=%u timestamp=0x%04x cause=%s header=%s",
__entry->context,
__entry->card_index,
__entry->timestamp,
show_cause(__entry->cause),
__print_array(__get_dynamic_array(header),
__get_dynamic_array_len(header) / QUADLET_SIZE, QUADLET_SIZE)
)
)
DEFINE_EVENT_CONDITION(isoc_single_completions_template, isoc_outbound_completions,
TP_PROTO(const struct fw_iso_context *ctx, u16 timestamp, enum fw_iso_context_completions_cause cause, const u32 *header, unsigned int header_length),
TP_ARGS(ctx, timestamp, cause, header, header_length),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT)
);
DEFINE_EVENT_CONDITION(isoc_single_completions_template, isoc_inbound_single_completions,
TP_PROTO(const struct fw_iso_context *ctx, u16 timestamp, enum fw_iso_context_completions_cause cause, const u32 *header, unsigned int header_length),
TP_ARGS(ctx, timestamp, cause, header, header_length),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
);
TRACE_EVENT(isoc_inbound_multiple_completions,
TP_PROTO(const struct fw_iso_context *ctx, unsigned int completed, enum fw_iso_context_completions_cause cause),
TP_ARGS(ctx, completed, cause),
TP_STRUCT__entry(
__field(u64, context)
__field(u8, card_index)
__field(u16, completed)
__field(u8, cause)
),
TP_fast_assign(
__entry->context = (uintptr_t)ctx;
__entry->card_index = ctx->card->index;
__entry->completed = completed;
__entry->cause = cause;
),
TP_printk(
"context=0x%llx card_index=%u completed=%u cause=%s",
__entry->context,
__entry->card_index,
__entry->completed,
show_cause(__entry->cause)
)
);
#undef QUADLET_SIZE
#endif // _FIREWIRE_TRACE_EVENT_H
......
// SPDX-License-Identifier: GPL-2.0-or-later
// Copyright (c) 2024 Takashi Sakamoto
#undef TRACE_SYSTEM
#define TRACE_SYSTEM firewire_ohci
#if !defined(_FIREWIRE_OHCI_TRACE_EVENT_H) || defined(TRACE_HEADER_MULTI_READ)
#define _FIREWIRE_OHCI_TRACE_EVENT_H
#include <linux/tracepoint.h>
// Some macros and helper functions are defined in 'drivers/firewire/ohci.c'.
TRACE_EVENT(irqs,
TP_PROTO(unsigned int card_index, u32 events),
TP_ARGS(card_index, events),
TP_STRUCT__entry(
__field(u8, card_index)
__field(u32, events)
),
TP_fast_assign(
__entry->card_index = card_index;
__entry->events = events;
),
TP_printk(
"card_index=%u events=%s",
__entry->card_index,
__print_flags(__entry->events, "|",
{ OHCI1394_selfIDComplete, "selfIDComplete" },
{ OHCI1394_RQPkt, "RQPkt" },
{ OHCI1394_RSPkt, "RSPkt" },
{ OHCI1394_reqTxComplete, "reqTxComplete" },
{ OHCI1394_respTxComplete, "respTxComplete" },
{ OHCI1394_isochRx, "isochRx" },
{ OHCI1394_isochTx, "isochTx" },
{ OHCI1394_postedWriteErr, "postedWriteErr" },
{ OHCI1394_cycleTooLong, "cycleTooLong" },
{ OHCI1394_cycle64Seconds, "cycle64Seconds" },
{ OHCI1394_cycleInconsistent, "cycleInconsistent" },
{ OHCI1394_regAccessFail, "regAccessFail" },
{ OHCI1394_unrecoverableError, "unrecoverableError" },
{ OHCI1394_busReset, "busReset" }
)
)
);
#define QUADLET_SIZE 4
#define SELF_ID_COUNT_IS_ERROR(reg) \
(!!(((reg) & OHCI1394_SelfIDCount_selfIDError_MASK) >> OHCI1394_SelfIDCount_selfIDError_SHIFT))
#define SELF_ID_COUNT_GET_GENERATION(reg) \
(((reg) & OHCI1394_SelfIDCount_selfIDGeneration_MASK) >> OHCI1394_SelfIDCount_selfIDGeneration_SHIFT)
#define SELF_ID_RECEIVE_Q0_GET_GENERATION(quadlet) \
(((quadlet) & OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_MASK) >> OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_SHIFT)
#define SELF_ID_RECEIVE_Q0_GET_TIMESTAMP(quadlet) \
(((quadlet) & OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_MASK) >> OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_SHIFT)
TRACE_EVENT(self_id_complete,
TP_PROTO(unsigned int card_index, u32 reg, const __le32 *self_id_receive, bool has_be_header_quirk),
TP_ARGS(card_index, reg, self_id_receive, has_be_header_quirk),
TP_STRUCT__entry(
__field(u8, card_index)
__field(u32, reg)
__dynamic_array(u32, self_id_receive, ohci1394_self_id_count_get_size(reg))
),
TP_fast_assign(
__entry->card_index = card_index;
__entry->reg = reg;
{
u32 *ptr = __get_dynamic_array(self_id_receive);
int i;
for (i = 0; i < __get_dynamic_array_len(self_id_receive) / QUADLET_SIZE; ++i)
ptr[i] = cond_le32_to_cpu(self_id_receive[i], has_be_header_quirk);
}
),
TP_printk(
"card_index=%u is_error=%s generation_at_bus_reset=%u generation_at_completion=%u timestamp=0x%04x packet_data=%s",
__entry->card_index,
SELF_ID_COUNT_IS_ERROR(__entry->reg) ? "true" : "false",
SELF_ID_COUNT_GET_GENERATION(__entry->reg),
SELF_ID_RECEIVE_Q0_GET_GENERATION(((const u32 *)__get_dynamic_array(self_id_receive))[0]),
SELF_ID_RECEIVE_Q0_GET_TIMESTAMP(((const u32 *)__get_dynamic_array(self_id_receive))[0]),
__print_array(((const u32 *)__get_dynamic_array(self_id_receive)) + 1,
(__get_dynamic_array_len(self_id_receive) / QUADLET_SIZE) - 1, QUADLET_SIZE)
)
);
#undef SELF_ID_COUNT_IS_ERROR
#undef SELF_ID_COUNT_GET_GENERATION
#undef SELF_ID_RECEIVE_Q0_GET_GENERATION
#undef SELF_ID_RECEIVE_Q0_GET_TIMESTAMP
#undef QUADLET_SIZE
#endif // _FIREWIRE_OHCI_TRACE_EVENT_H
#include <trace/define_trace.h>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment