arm-cmn.c 77.5 KB
Newer Older
1 2 3 4 5 6 7
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2016-2020 Arm Limited
// CMN-600 Coherent Mesh Network PMU driver

#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
8
#include <linux/debugfs.h>
9 10
#include <linux/interrupt.h>
#include <linux/io.h>
11
#include <linux/io-64-nonatomic-lo-hi.h>
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sort.h>

/* Common register stuff */
#define CMN_NODE_INFO			0x0000
#define CMN_NI_NODE_TYPE		GENMASK_ULL(15, 0)
#define CMN_NI_NODE_ID			GENMASK_ULL(31, 16)
#define CMN_NI_LOGICAL_ID		GENMASK_ULL(47, 32)

#define CMN_NODEID_DEVID(reg)		((reg) & 3)
28
#define CMN_NODEID_EXT_DEVID(reg)	((reg) & 1)
29
#define CMN_NODEID_PID(reg)		(((reg) >> 2) & 1)
30 31
#define CMN_NODEID_EXT_PID(reg)		(((reg) >> 1) & 3)
#define CMN_NODEID_1x1_PID(reg)		(((reg) >> 2) & 7)
32 33 34 35 36 37 38
#define CMN_NODEID_X(reg, bits)		((reg) >> (3 + (bits)))
#define CMN_NODEID_Y(reg, bits)		(((reg) >> 3) & ((1U << (bits)) - 1))

#define CMN_CHILD_INFO			0x0080
#define CMN_CI_CHILD_COUNT		GENMASK_ULL(15, 0)
#define CMN_CI_CHILD_PTR_OFFSET		GENMASK_ULL(31, 16)

39
#define CMN_CHILD_NODE_ADDR		GENMASK(29, 0)
40 41
#define CMN_CHILD_NODE_EXTERNAL		BIT(31)

42
#define CMN_MAX_DIMENSION		12
43
#define CMN_MAX_XPS			(CMN_MAX_DIMENSION * CMN_MAX_DIMENSION)
44
#define CMN_MAX_DTMS			(CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
45

46
/* The CFG node has various info besides the discovery tree */
47 48 49 50 51
#define CMN_CFGM_PERIPH_ID_01		0x0008
#define CMN_CFGM_PID0_PART_0		GENMASK_ULL(7, 0)
#define CMN_CFGM_PID1_PART_1		GENMASK_ULL(35, 32)
#define CMN_CFGM_PERIPH_ID_23		0x0010
#define CMN_CFGM_PID2_REVISION		GENMASK_ULL(7, 4)
52

53 54 55 56 57
#define CMN_CFGM_INFO_GLOBAL		0x900
#define CMN_INFO_MULTIPLE_DTM_EN	BIT_ULL(63)
#define CMN_INFO_RSP_VC_NUM		GENMASK_ULL(53, 52)
#define CMN_INFO_DAT_VC_NUM		GENMASK_ULL(51, 50)

58 59 60 61
#define CMN_CFGM_INFO_GLOBAL_1		0x908
#define CMN_INFO_SNP_VC_NUM		GENMASK_ULL(3, 2)
#define CMN_INFO_REQ_VC_NUM		GENMASK_ULL(1, 0)

62
/* XPs also have some local topology info which has uses too */
63
#define CMN_MXP__CONNECT_INFO(p)	(0x0008 + 8 * (p))
64
#define CMN__CONNECT_INFO_DEVICE_TYPE	GENMASK_ULL(4, 0)
65

66 67 68
#define CMN_MAX_PORTS			6
#define CI700_CONNECT_INFO_P2_5_OFFSET	0x10

69
/* PMU registers occupy the 3rd 4KB page of each node's region */
70 71 72 73
#define CMN_PMU_OFFSET			0x2000

/* For most nodes, this is all there is */
#define CMN_PMU_EVENT_SEL		0x000
74
#define CMN__PMU_CBUSY_SNTHROTTLE_SEL	GENMASK_ULL(44, 42)
75 76
#define CMN__PMU_SN_HOME_SEL		GENMASK_ULL(40, 39)
#define CMN__PMU_HBT_LBT_SEL		GENMASK_ULL(38, 37)
77
#define CMN__PMU_CLASS_OCCUP_ID		GENMASK_ULL(36, 35)
78 79
/* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */
#define CMN__PMU_OCCUP1_ID		GENMASK_ULL(34, 32)
80 81 82

/* HN-Ps are weird... */
#define CMN_HNP_PMU_EVENT_SEL		0x008
83 84 85 86

/* DTMs live in the PMU space of XP registers */
#define CMN_DTM_WPn(n)			(0x1A0 + (n) * 0x18)
#define CMN_DTM_WPn_CONFIG(n)		(CMN_DTM_WPn(n) + 0x00)
87 88
#define CMN_DTM_WPn_CONFIG_WP_CHN_NUM	GENMASK_ULL(20, 19)
#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2	GENMASK_ULL(18, 17)
89 90 91 92 93
#define CMN_DTM_WPn_CONFIG_WP_COMBINE	BIT(9)
#define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE	BIT(8)
#define CMN600_WPn_CONFIG_WP_COMBINE	BIT(6)
#define CMN600_WPn_CONFIG_WP_EXCLUSIVE	BIT(5)
#define CMN_DTM_WPn_CONFIG_WP_GRP	GENMASK_ULL(5, 4)
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
#define CMN_DTM_WPn_CONFIG_WP_CHN_SEL	GENMASK_ULL(3, 1)
#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL	BIT(0)
#define CMN_DTM_WPn_VAL(n)		(CMN_DTM_WPn(n) + 0x08)
#define CMN_DTM_WPn_MASK(n)		(CMN_DTM_WPn(n) + 0x10)

#define CMN_DTM_PMU_CONFIG		0x210
#define CMN__PMEVCNT0_INPUT_SEL		GENMASK_ULL(37, 32)
#define CMN__PMEVCNT0_INPUT_SEL_WP	0x00
#define CMN__PMEVCNT0_INPUT_SEL_XP	0x04
#define CMN__PMEVCNT0_INPUT_SEL_DEV	0x10
#define CMN__PMEVCNT0_GLOBAL_NUM	GENMASK_ULL(18, 16)
#define CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(n)	((n) * 4)
#define CMN__PMEVCNT_PAIRED(n)		BIT(4 + (n))
#define CMN__PMEVCNT23_COMBINED		BIT(2)
#define CMN__PMEVCNT01_COMBINED		BIT(1)
#define CMN_DTM_PMU_CONFIG_PMU_EN	BIT(0)

#define CMN_DTM_PMEVCNT			0x220

#define CMN_DTM_PMEVCNTSR		0x240

115 116 117
#define CMN650_DTM_UNIT_INFO		0x0910
#define CMN_DTM_UNIT_INFO		0x0960
#define CMN_DTM_UNIT_INFO_DTC_DOMAIN	GENMASK_ULL(1, 0)
118

119
#define CMN_DTM_NUM_COUNTERS		4
120 121
/* Want more local counters? Why not replicate the whole DTM! Ugh... */
#define CMN_DTM_OFFSET(n)		((n) * 0x200)
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161

/* The DTC node is where the magic happens */
#define CMN_DT_DTC_CTL			0x0a00
#define CMN_DT_DTC_CTL_DT_EN		BIT(0)

/* DTC counters are paired in 64-bit registers on a 16-byte stride. Yuck */
#define _CMN_DT_CNT_REG(n)		((((n) / 2) * 4 + (n) % 2) * 4)
#define CMN_DT_PMEVCNT(n)		(CMN_PMU_OFFSET + _CMN_DT_CNT_REG(n))
#define CMN_DT_PMCCNTR			(CMN_PMU_OFFSET + 0x40)

#define CMN_DT_PMEVCNTSR(n)		(CMN_PMU_OFFSET + 0x50 + _CMN_DT_CNT_REG(n))
#define CMN_DT_PMCCNTRSR		(CMN_PMU_OFFSET + 0x90)

#define CMN_DT_PMCR			(CMN_PMU_OFFSET + 0x100)
#define CMN_DT_PMCR_PMU_EN		BIT(0)
#define CMN_DT_PMCR_CNTR_RST		BIT(5)
#define CMN_DT_PMCR_OVFL_INTR_EN	BIT(6)

#define CMN_DT_PMOVSR			(CMN_PMU_OFFSET + 0x118)
#define CMN_DT_PMOVSR_CLR		(CMN_PMU_OFFSET + 0x120)

#define CMN_DT_PMSSR			(CMN_PMU_OFFSET + 0x128)
#define CMN_DT_PMSSR_SS_STATUS(n)	BIT(n)

#define CMN_DT_PMSRR			(CMN_PMU_OFFSET + 0x130)
#define CMN_DT_PMSRR_SS_REQ		BIT(0)

#define CMN_DT_NUM_COUNTERS		8
#define CMN_MAX_DTCS			4

/*
 * Even in the worst case a DTC counter can't wrap in fewer than 2^42 cycles,
 * so throwing away one bit to make overflow handling easy is no big deal.
 */
#define CMN_COUNTER_INIT		0x80000000
/* Similarly for the 40-bit cycle counter */
#define CMN_CC_INIT			0x8000000000ULL


/* Event attributes */
162
#define CMN_CONFIG_TYPE			GENMASK_ULL(15, 0)
163 164
#define CMN_CONFIG_EVENTID		GENMASK_ULL(26, 16)
#define CMN_CONFIG_OCCUPID		GENMASK_ULL(30, 27)
165 166
#define CMN_CONFIG_BYNODEID		BIT_ULL(31)
#define CMN_CONFIG_NODEID		GENMASK_ULL(47, 32)
167 168 169 170 171 172 173

#define CMN_EVENT_TYPE(event)		FIELD_GET(CMN_CONFIG_TYPE, (event)->attr.config)
#define CMN_EVENT_EVENTID(event)	FIELD_GET(CMN_CONFIG_EVENTID, (event)->attr.config)
#define CMN_EVENT_OCCUPID(event)	FIELD_GET(CMN_CONFIG_OCCUPID, (event)->attr.config)
#define CMN_EVENT_BYNODEID(event)	FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config)
#define CMN_EVENT_NODEID(event)		FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config)

174
#define CMN_CONFIG_WP_COMBINE		GENMASK_ULL(30, 27)
175 176
#define CMN_CONFIG_WP_DEV_SEL		GENMASK_ULL(50, 48)
#define CMN_CONFIG_WP_CHN_SEL		GENMASK_ULL(55, 51)
177
/* Note that we don't yet support the tertiary match group on newer IPs */
178 179
#define CMN_CONFIG_WP_GRP		BIT_ULL(56)
#define CMN_CONFIG_WP_EXCLUSIVE		BIT_ULL(57)
180 181
#define CMN_CONFIG1_WP_VAL		GENMASK_ULL(63, 0)
#define CMN_CONFIG2_WP_MASK		GENMASK_ULL(63, 0)
182 183 184 185 186 187 188 189 190 191 192 193 194 195

#define CMN_EVENT_WP_COMBINE(event)	FIELD_GET(CMN_CONFIG_WP_COMBINE, (event)->attr.config)
#define CMN_EVENT_WP_DEV_SEL(event)	FIELD_GET(CMN_CONFIG_WP_DEV_SEL, (event)->attr.config)
#define CMN_EVENT_WP_CHN_SEL(event)	FIELD_GET(CMN_CONFIG_WP_CHN_SEL, (event)->attr.config)
#define CMN_EVENT_WP_GRP(event)		FIELD_GET(CMN_CONFIG_WP_GRP, (event)->attr.config)
#define CMN_EVENT_WP_EXCLUSIVE(event)	FIELD_GET(CMN_CONFIG_WP_EXCLUSIVE, (event)->attr.config)
#define CMN_EVENT_WP_VAL(event)		FIELD_GET(CMN_CONFIG1_WP_VAL, (event)->attr.config1)
#define CMN_EVENT_WP_MASK(event)	FIELD_GET(CMN_CONFIG2_WP_MASK, (event)->attr.config2)

/* Made-up event IDs for watchpoint direction */
#define CMN_WP_UP			0
#define CMN_WP_DOWN			2


196
/* Internal values for encoding event support */
197 198
enum cmn_model {
	CMN600 = 1,
199
	CMN650 = 2,
200
	CMN700 = 4,
201 202 203 204
	CI700 = 8,
	/* ...and then we can use bitmap tricks for commonality */
	CMN_ANY = -1,
	NOT_CMN600 = -2,
205
	CMN_650ON = CMN650 | CMN700,
206 207
};

208 209 210 211 212 213 214 215
/* Actual part numbers and revision IDs defined by the hardware */
enum cmn_part {
	PART_CMN600 = 0x434,
	PART_CMN650 = 0x436,
	PART_CMN700 = 0x43c,
	PART_CI700 = 0x43a,
};

216
/* CMN-600 r0px shouldn't exist in silicon, thankfully */
217
enum cmn_revision {
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
	REV_CMN600_R1P0,
	REV_CMN600_R1P1,
	REV_CMN600_R1P2,
	REV_CMN600_R1P3,
	REV_CMN600_R2P0,
	REV_CMN600_R3P0,
	REV_CMN600_R3P1,
	REV_CMN650_R0P0 = 0,
	REV_CMN650_R1P0,
	REV_CMN650_R1P1,
	REV_CMN650_R2P0,
	REV_CMN650_R1P2,
	REV_CMN700_R0P0 = 0,
	REV_CMN700_R1P0,
	REV_CMN700_R2P0,
233
	REV_CMN700_R3P0,
234 235 236
	REV_CI700_R0P0 = 0,
	REV_CI700_R1P0,
	REV_CI700_R2P0,
237 238 239 240 241 242 243 244 245 246 247
};

enum cmn_node_type {
	CMN_TYPE_INVALID,
	CMN_TYPE_DVM,
	CMN_TYPE_CFG,
	CMN_TYPE_DTC,
	CMN_TYPE_HNI,
	CMN_TYPE_HNF,
	CMN_TYPE_XP,
	CMN_TYPE_SBSX,
248 249 250
	CMN_TYPE_MPAM_S,
	CMN_TYPE_MPAM_NS,
	CMN_TYPE_RNI,
251 252
	CMN_TYPE_RND = 0xd,
	CMN_TYPE_RNSAM = 0xf,
253
	CMN_TYPE_MTSX,
254
	CMN_TYPE_HNP,
255
	CMN_TYPE_CXRA = 0x100,
256 257 258 259 260 261
	CMN_TYPE_CXHA,
	CMN_TYPE_CXLA,
	CMN_TYPE_CCRA,
	CMN_TYPE_CCHA,
	CMN_TYPE_CCLA,
	CMN_TYPE_CCLA_RNI,
262 263 264
	CMN_TYPE_HNS = 0x200,
	CMN_TYPE_HNS_MPAM_S,
	CMN_TYPE_HNS_MPAM_NS,
265 266 267 268
	/* Not a real node type */
	CMN_TYPE_WP = 0x7770
};

269 270 271
enum cmn_filter_select {
	SEL_NONE = -1,
	SEL_OCCUP1ID,
272 273
	SEL_CLASS_OCCUP_ID,
	SEL_CBUSY_SNTHROTTLE_SEL,
274 275
	SEL_HBT_LBT_SEL,
	SEL_SN_HOME_SEL,
276 277 278
	SEL_MAX
};

279 280 281 282 283
struct arm_cmn_node {
	void __iomem *pmu_base;
	u16 id, logid;
	enum cmn_node_type type;

284 285 286 287 288 289 290
	u8 dtm;
	s8 dtc;
	/* DN/HN-F/CXHA */
	struct {
		u8 val : 4;
		u8 count : 4;
	} occupid[SEL_MAX];
291 292 293
	union {
		u8 event[4];
		__le32 event_sel;
294 295
		u16 event_w[4];
		__le64 event_sel_w;
296 297 298
	};
};

299 300 301 302 303 304 305 306 307 308
struct arm_cmn_dtm {
	void __iomem *base;
	u32 pmu_config_low;
	union {
		u8 input_sel[4];
		__le32 pmu_config_high;
	};
	s8 wp_event[4];
};

309 310
struct arm_cmn_dtc {
	void __iomem *base;
311
	int irq;
312 313 314 315 316 317 318 319 320 321 322 323 324
	int irq_friend;
	bool cc_active;

	struct perf_event *counters[CMN_DT_NUM_COUNTERS];
	struct perf_event *cycles;
};

#define CMN_STATE_DISABLED	BIT(0)
#define CMN_STATE_TXN		BIT(1)

struct arm_cmn {
	struct device *dev;
	void __iomem *base;
325
	unsigned int state;
326 327

	enum cmn_revision rev;
328
	enum cmn_part part;
329 330 331 332
	u8 mesh_x;
	u8 mesh_y;
	u16 num_xps;
	u16 num_dns;
333 334 335 336 337
	bool multi_dtm;
	u8 ports_used;
	struct {
		unsigned int rsp_vc_num : 2;
		unsigned int dat_vc_num : 2;
338 339
		unsigned int snp_vc_num : 2;
		unsigned int req_vc_num : 2;
340 341
	};

342 343 344
	struct arm_cmn_node *xps;
	struct arm_cmn_node *dns;

345
	struct arm_cmn_dtm *dtms;
346 347 348 349 350 351 352
	struct arm_cmn_dtc *dtc;
	unsigned int num_dtcs;

	int cpu;
	struct hlist_node cpuhp_node;

	struct pmu pmu;
353
	struct dentry *debug;
354 355 356 357 358 359
};

#define to_cmn(p)	container_of(p, struct arm_cmn, pmu)

static int arm_cmn_hp_state;

360 361 362 363 364 365 366 367 368
struct arm_cmn_nodeid {
	u8 x;
	u8 y;
	u8 port;
	u8 dev;
};

static int arm_cmn_xyidbits(const struct arm_cmn *cmn)
{
369
	return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1) | 2);
370 371 372 373 374 375
}

static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id)
{
	struct arm_cmn_nodeid nid;

376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
	if (cmn->num_xps == 1) {
		nid.x = 0;
		nid.y = 0;
		nid.port = CMN_NODEID_1x1_PID(id);
		nid.dev = CMN_NODEID_DEVID(id);
	} else {
		int bits = arm_cmn_xyidbits(cmn);

		nid.x = CMN_NODEID_X(id, bits);
		nid.y = CMN_NODEID_Y(id, bits);
		if (cmn->ports_used & 0xc) {
			nid.port = CMN_NODEID_EXT_PID(id);
			nid.dev = CMN_NODEID_EXT_DEVID(id);
		} else {
			nid.port = CMN_NODEID_PID(id);
			nid.dev = CMN_NODEID_DEVID(id);
		}
	}
394 395 396
	return nid;
}

397 398
static struct arm_cmn_node *arm_cmn_node_to_xp(const struct arm_cmn *cmn,
					       const struct arm_cmn_node *dn)
399 400 401 402
{
	struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
	int xp_idx = cmn->mesh_x * nid.y + nid.x;

403
	return cmn->xps + xp_idx;
404 405 406 407
}
static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
					 enum cmn_node_type type)
{
408
	struct arm_cmn_node *dn;
409

410 411 412
	for (dn = cmn->dns; dn->type; dn++)
		if (dn->type == type)
			return dn;
413 414 415
	return NULL;
}

416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
static enum cmn_model arm_cmn_model(const struct arm_cmn *cmn)
{
	switch (cmn->part) {
	case PART_CMN600:
		return CMN600;
	case PART_CMN650:
		return CMN650;
	case PART_CMN700:
		return CMN700;
	case PART_CI700:
		return CI700;
	default:
		return 0;
	};
}

432 433 434 435 436 437
static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn,
				       const struct arm_cmn_node *xp, int port)
{
	int offset = CMN_MXP__CONNECT_INFO(port);

	if (port >= 2) {
438
		if (cmn->part == PART_CMN600 || cmn->part == PART_CMN650)
439 440 441 442 443
			return 0;
		/*
		 * CI-700 may have extra ports, but still has the
		 * mesh_port_connect_info registers in the way.
		 */
444
		if (cmn->part == PART_CI700)
445 446 447 448 449 450
			offset += CI700_CONNECT_INFO_P2_5_OFFSET;
	}

	return readl_relaxed(xp->pmu_base - CMN_PMU_OFFSET + offset);
}

451
static struct dentry *arm_cmn_debugfs;
452 453 454 455

#ifdef CONFIG_DEBUG_FS
static const char *arm_cmn_device_type(u8 type)
{
456
	switch(FIELD_GET(CMN__CONNECT_INFO_DEVICE_TYPE, type)) {
457
		case 0x00: return "        |";
458 459 460 461 462 463 464 465 466
		case 0x01: return "  RN-I  |";
		case 0x02: return "  RN-D  |";
		case 0x04: return " RN-F_B |";
		case 0x05: return "RN-F_B_E|";
		case 0x06: return " RN-F_A |";
		case 0x07: return "RN-F_A_E|";
		case 0x08: return "  HN-T  |";
		case 0x09: return "  HN-I  |";
		case 0x0a: return "  HN-D  |";
467
		case 0x0b: return "  HN-P  |";
468 469 470 471 472 473 474 475 476 477 478 479
		case 0x0c: return "  SN-F  |";
		case 0x0d: return "  SBSX  |";
		case 0x0e: return "  HN-F  |";
		case 0x0f: return " SN-F_E |";
		case 0x10: return " SN-F_D |";
		case 0x11: return "  CXHA  |";
		case 0x12: return "  CXRA  |";
		case 0x13: return "  CXRH  |";
		case 0x14: return " RN-F_D |";
		case 0x15: return "RN-F_D_E|";
		case 0x16: return " RN-F_C |";
		case 0x17: return "RN-F_C_E|";
480 481
		case 0x18: return " RN-F_E |";
		case 0x19: return "RN-F_E_E|";
482
		case 0x1c: return "  MTSX  |";
483 484
		case 0x1d: return "  HN-V  |";
		case 0x1e: return "  CCG   |";
485
		default:   return "  ????  |";
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
	}
}

static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d)
{
	struct arm_cmn *cmn = s->private;
	struct arm_cmn_node *dn;

	for (dn = cmn->dns; dn->type; dn++) {
		struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);

		if (dn->type == CMN_TYPE_XP)
			continue;
		/* Ignore the extra components that will overlap on some ports */
		if (dn->type < CMN_TYPE_HNI)
			continue;

		if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d)
			continue;

		seq_printf(s, "   #%-2d  |", dn->logid);
		return;
	}
	seq_puts(s, "        |");
}

static int arm_cmn_map_show(struct seq_file *s, void *data)
{
	struct arm_cmn *cmn = s->private;
	int x, y, p, pmax = fls(cmn->ports_used);

	seq_puts(s, "     X");
	for (x = 0; x < cmn->mesh_x; x++)
		seq_printf(s, "    %d    ", x);
	seq_puts(s, "\nY P D+");
	y = cmn->mesh_y;
	while (y--) {
		int xp_base = cmn->mesh_x * y;
524
		u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION];
525 526 527 528 529 530 531

		for (x = 0; x < cmn->mesh_x; x++)
			seq_puts(s, "--------+");

		seq_printf(s, "\n%d    |", y);
		for (x = 0; x < cmn->mesh_x; x++) {
			struct arm_cmn_node *xp = cmn->xps + xp_base + x;
532 533 534

			for (p = 0; p < CMN_MAX_PORTS; p++)
				port[p][x] = arm_cmn_device_connect_info(cmn, xp, p);
535 536 537 538 539
			seq_printf(s, " XP #%-2d |", xp_base + x);
		}

		seq_puts(s, "\n     |");
		for (x = 0; x < cmn->mesh_x; x++) {
540
			s8 dtc = cmn->xps[xp_base + x].dtc;
541

542
			if (dtc < 0)
543 544
				seq_puts(s, " DTC ?? |");
			else
545
				seq_printf(s, " DTC %d  |", dtc);
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585
		}
		seq_puts(s, "\n     |");
		for (x = 0; x < cmn->mesh_x; x++)
			seq_puts(s, "........|");

		for (p = 0; p < pmax; p++) {
			seq_printf(s, "\n  %d  |", p);
			for (x = 0; x < cmn->mesh_x; x++)
				seq_puts(s, arm_cmn_device_type(port[p][x]));
			seq_puts(s, "\n    0|");
			for (x = 0; x < cmn->mesh_x; x++)
				arm_cmn_show_logid(s, x, y, p, 0);
			seq_puts(s, "\n    1|");
			for (x = 0; x < cmn->mesh_x; x++)
				arm_cmn_show_logid(s, x, y, p, 1);
		}
		seq_puts(s, "\n-----+");
	}
	for (x = 0; x < cmn->mesh_x; x++)
		seq_puts(s, "--------+");
	seq_puts(s, "\n");
	return 0;
}
DEFINE_SHOW_ATTRIBUTE(arm_cmn_map);

static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id)
{
	const char *name  = "map";

	if (id > 0)
		name = devm_kasprintf(cmn->dev, GFP_KERNEL, "map_%d", id);
	if (!name)
		return;

	cmn->debug = debugfs_create_file(name, 0444, arm_cmn_debugfs, cmn, &arm_cmn_map_fops);
}
#else
static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {}
#endif

586 587
struct arm_cmn_hw_event {
	struct arm_cmn_node *dn;
588
	u64 dtm_idx[4];
589
	s8 dtc_idx[CMN_MAX_DTCS];
590
	u8 num_dns;
591
	u8 dtm_offset;
592
	bool wide_sel;
593
	enum cmn_filter_select filter_sel;
594 595 596 597 598
};

#define for_each_hw_dn(hw, dn, i) \
	for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++)

599 600 601 602
/* @i is the DTC number, @idx is the counter index on that DTC */
#define for_each_hw_dtc_idx(hw, i, idx) \
	for (int i = 0, idx; i < CMN_MAX_DTCS; i++) if ((idx = hw->dtc_idx[i]) >= 0)

603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
static struct arm_cmn_hw_event *to_cmn_hw(struct perf_event *event)
{
	BUILD_BUG_ON(sizeof(struct arm_cmn_hw_event) > offsetof(struct hw_perf_event, target));
	return (struct arm_cmn_hw_event *)&event->hw;
}

static void arm_cmn_set_index(u64 x[], unsigned int pos, unsigned int val)
{
	x[pos / 32] |= (u64)val << ((pos % 32) * 2);
}

static unsigned int arm_cmn_get_index(u64 x[], unsigned int pos)
{
	return (x[pos / 32] >> ((pos % 32) * 2)) & 3;
}

struct arm_cmn_event_attr {
	struct device_attribute attr;
621
	enum cmn_model model;
622
	enum cmn_node_type type;
623
	enum cmn_filter_select fsel;
624
	u16 eventid;
625 626 627 628 629 630 631 632 633
	u8 occupid;
};

struct arm_cmn_format_attr {
	struct device_attribute attr;
	u64 field;
	int config;
};

634
#define _CMN_EVENT_ATTR(_model, _name, _type, _eventid, _occupid, _fsel)\
635 636
	(&((struct arm_cmn_event_attr[]) {{				\
		.attr = __ATTR(_name, 0444, arm_cmn_event_show, NULL),	\
637
		.model = _model,					\
638 639 640
		.type = _type,						\
		.eventid = _eventid,					\
		.occupid = _occupid,					\
641
		.fsel = _fsel,						\
642
	}})[0].attr.attr)
643 644
#define CMN_EVENT_ATTR(_model, _name, _type, _eventid)			\
	_CMN_EVENT_ATTR(_model, _name, _type, _eventid, 0, SEL_NONE)
645 646 647 648 649 650 651 652 653

static ssize_t arm_cmn_event_show(struct device *dev,
				  struct device_attribute *attr, char *buf)
{
	struct arm_cmn_event_attr *eattr;

	eattr = container_of(attr, typeof(*eattr), attr);

	if (eattr->type == CMN_TYPE_DTC)
654
		return sysfs_emit(buf, "type=0x%x\n", eattr->type);
655 656

	if (eattr->type == CMN_TYPE_WP)
657 658 659
		return sysfs_emit(buf,
				  "type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n",
				  eattr->type, eattr->eventid);
660

661
	if (eattr->fsel > SEL_NONE)
662 663
		return sysfs_emit(buf, "type=0x%x,eventid=0x%x,occupid=0x%x\n",
				  eattr->type, eattr->eventid, eattr->occupid);
664

665 666
	return sysfs_emit(buf, "type=0x%x,eventid=0x%x\n", eattr->type,
			  eattr->eventid);
667 668 669 670 671 672 673 674 675
}

static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
					     struct attribute *attr,
					     int unused)
{
	struct device *dev = kobj_to_dev(kobj);
	struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
	struct arm_cmn_event_attr *eattr;
676 677
	enum cmn_node_type type;
	u16 eventid;
678 679 680

	eattr = container_of(attr, typeof(*eattr), attr.attr);

681
	if (!(eattr->model & arm_cmn_model(cmn)))
682 683
		return 0;

684 685 686
	type = eattr->type;
	eventid = eattr->eventid;

687
	/* Watchpoints aren't nodes, so avoid confusion */
688
	if (type == CMN_TYPE_WP)
689 690 691
		return attr->mode;

	/* Hide XP events for unused interfaces/channels */
692 693 694
	if (type == CMN_TYPE_XP) {
		unsigned int intf = (eventid >> 2) & 7;
		unsigned int chan = eventid >> 5;
695 696 697 698

		if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3)))
			return 0;

699
		if (chan == 4 && cmn->part == PART_CMN600)
700 701
			return 0;

702
		if ((chan == 5 && cmn->rsp_vc_num < 2) ||
703 704 705
		    (chan == 6 && cmn->dat_vc_num < 2) ||
		    (chan == 7 && cmn->snp_vc_num < 2) ||
		    (chan == 8 && cmn->req_vc_num < 2))
706 707
			return 0;
	}
708 709

	/* Revision-specific differences */
710 711
	if (cmn->part == PART_CMN600) {
		if (cmn->rev < REV_CMN600_R1P3) {
712 713 714
			if (type == CMN_TYPE_CXRA && eventid > 0x10)
				return 0;
		}
715
		if (cmn->rev < REV_CMN600_R1P2) {
716 717 718 719 720
			if (type == CMN_TYPE_HNF && eventid == 0x1b)
				return 0;
			if (type == CMN_TYPE_CXRA || type == CMN_TYPE_CXHA)
				return 0;
		}
721 722
	} else if (cmn->part == PART_CMN650) {
		if (cmn->rev < REV_CMN650_R2P0 || cmn->rev == REV_CMN650_R1P2) {
723 724 725 726 727 728 729
			if (type == CMN_TYPE_HNF && eventid > 0x22)
				return 0;
			if (type == CMN_TYPE_SBSX && eventid == 0x17)
				return 0;
			if (type == CMN_TYPE_RNI && eventid > 0x10)
				return 0;
		}
730 731
	} else if (cmn->part == PART_CMN700) {
		if (cmn->rev < REV_CMN700_R2P0) {
732 733 734 735 736 737 738
			if (type == CMN_TYPE_HNF && eventid > 0x2c)
				return 0;
			if (type == CMN_TYPE_CCHA && eventid > 0x74)
				return 0;
			if (type == CMN_TYPE_CCLA && eventid > 0x27)
				return 0;
		}
739
		if (cmn->rev < REV_CMN700_R1P0) {
740 741 742
			if (type == CMN_TYPE_HNF && eventid > 0x2b)
				return 0;
		}
743 744
	}

745
	if (!arm_cmn_node(cmn, type))
746 747 748 749 750
		return 0;

	return attr->mode;
}

751 752
#define _CMN_EVENT_DVM(_model, _name, _event, _occup, _fsel)	\
	_CMN_EVENT_ATTR(_model, dn_##_name, CMN_TYPE_DVM, _event, _occup, _fsel)
753
#define CMN_EVENT_DTC(_name)					\
754
	CMN_EVENT_ATTR(CMN_ANY, dtc_##_name, CMN_TYPE_DTC, 0)
755 756
#define CMN_EVENT_HNF(_model, _name, _event)			\
	CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event)
757
#define CMN_EVENT_HNI(_name, _event)				\
758
	CMN_EVENT_ATTR(CMN_ANY, hni_##_name, CMN_TYPE_HNI, _event)
759
#define CMN_EVENT_HNP(_name, _event)				\
760
	CMN_EVENT_ATTR(CMN_ANY, hnp_##_name, CMN_TYPE_HNP, _event)
761
#define __CMN_EVENT_XP(_name, _event)				\
762
	CMN_EVENT_ATTR(CMN_ANY, mxp_##_name, CMN_TYPE_XP, _event)
763
#define CMN_EVENT_SBSX(_model, _name, _event)			\
764
	CMN_EVENT_ATTR(_model, sbsx_##_name, CMN_TYPE_SBSX, _event)
765
#define CMN_EVENT_RNID(_model, _name, _event)			\
766
	CMN_EVENT_ATTR(_model, rnid_##_name, CMN_TYPE_RNI, _event)
767
#define CMN_EVENT_MTSX(_name, _event)				\
768
	CMN_EVENT_ATTR(CMN_ANY, mtsx_##_name, CMN_TYPE_MTSX, _event)
769
#define CMN_EVENT_CXRA(_model, _name, _event)				\
770
	CMN_EVENT_ATTR(_model, cxra_##_name, CMN_TYPE_CXRA, _event)
771
#define CMN_EVENT_CXHA(_name, _event)				\
772
	CMN_EVENT_ATTR(CMN_ANY, cxha_##_name, CMN_TYPE_CXHA, _event)
773 774 775 776 777 778 779 780
#define CMN_EVENT_CCRA(_name, _event)				\
	CMN_EVENT_ATTR(CMN_ANY, ccra_##_name, CMN_TYPE_CCRA, _event)
#define CMN_EVENT_CCHA(_name, _event)				\
	CMN_EVENT_ATTR(CMN_ANY, ccha_##_name, CMN_TYPE_CCHA, _event)
#define CMN_EVENT_CCLA(_name, _event)				\
	CMN_EVENT_ATTR(CMN_ANY, ccla_##_name, CMN_TYPE_CCLA, _event)
#define CMN_EVENT_CCLA_RNI(_name, _event)				\
	CMN_EVENT_ATTR(CMN_ANY, ccla_rni_##_name, CMN_TYPE_CCLA_RNI, _event)
781 782
#define CMN_EVENT_HNS(_name, _event)				\
	CMN_EVENT_ATTR(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event)
783 784

#define CMN_EVENT_DVM(_model, _name, _event)			\
785 786 787 788 789
	_CMN_EVENT_DVM(_model, _name, _event, 0, SEL_NONE)
#define CMN_EVENT_DVM_OCC(_model, _name, _event)			\
	_CMN_EVENT_DVM(_model, _name##_all, _event, 0, SEL_OCCUP1ID),	\
	_CMN_EVENT_DVM(_model, _name##_dvmop, _event, 1, SEL_OCCUP1ID),	\
	_CMN_EVENT_DVM(_model, _name##_dvmsync, _event, 2, SEL_OCCUP1ID)
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812

#define CMN_EVENT_HN_OCC(_model, _name, _type, _event)		\
	_CMN_EVENT_ATTR(_model, _name##_all, _type, _event, 0, SEL_OCCUP1ID), \
	_CMN_EVENT_ATTR(_model, _name##_read, _type, _event, 1, SEL_OCCUP1ID), \
	_CMN_EVENT_ATTR(_model, _name##_write, _type, _event, 2, SEL_OCCUP1ID), \
	_CMN_EVENT_ATTR(_model, _name##_atomic, _type, _event, 3, SEL_OCCUP1ID), \
	_CMN_EVENT_ATTR(_model, _name##_stash, _type, _event, 4, SEL_OCCUP1ID)
#define CMN_EVENT_HN_CLS(_model, _name, _type, _event)			\
	_CMN_EVENT_ATTR(_model, _name##_class0, _type, _event, 0, SEL_CLASS_OCCUP_ID), \
	_CMN_EVENT_ATTR(_model, _name##_class1, _type, _event, 1, SEL_CLASS_OCCUP_ID), \
	_CMN_EVENT_ATTR(_model, _name##_class2, _type, _event, 2, SEL_CLASS_OCCUP_ID), \
	_CMN_EVENT_ATTR(_model, _name##_class3, _type, _event, 3, SEL_CLASS_OCCUP_ID)
#define CMN_EVENT_HN_SNT(_model, _name, _type, _event)			\
	_CMN_EVENT_ATTR(_model, _name##_all, _type, _event, 0, SEL_CBUSY_SNTHROTTLE_SEL), \
	_CMN_EVENT_ATTR(_model, _name##_group0_read, _type, _event, 1, SEL_CBUSY_SNTHROTTLE_SEL), \
	_CMN_EVENT_ATTR(_model, _name##_group0_write, _type, _event, 2, SEL_CBUSY_SNTHROTTLE_SEL), \
	_CMN_EVENT_ATTR(_model, _name##_group1_read, _type, _event, 3, SEL_CBUSY_SNTHROTTLE_SEL), \
	_CMN_EVENT_ATTR(_model, _name##_group1_write, _type, _event, 4, SEL_CBUSY_SNTHROTTLE_SEL), \
	_CMN_EVENT_ATTR(_model, _name##_read, _type, _event, 5, SEL_CBUSY_SNTHROTTLE_SEL), \
	_CMN_EVENT_ATTR(_model, _name##_write, _type, _event, 6, SEL_CBUSY_SNTHROTTLE_SEL)

#define CMN_EVENT_HNF_OCC(_model, _name, _event)			\
	CMN_EVENT_HN_OCC(_model, hnf_##_name, CMN_TYPE_HNF, _event)
813
#define CMN_EVENT_HNF_CLS(_model, _name, _event)			\
814
	CMN_EVENT_HN_CLS(_model, hnf_##_name, CMN_TYPE_HNF, _event)
815
#define CMN_EVENT_HNF_SNT(_model, _name, _event)			\
816 817
	CMN_EVENT_HN_SNT(_model, hnf_##_name, CMN_TYPE_HNF, _event)

818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
#define CMN_EVENT_HNS_OCC(_name, _event)				\
	CMN_EVENT_HN_OCC(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event),	\
	_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_rxsnp, CMN_TYPE_HNS, _event, 5, SEL_OCCUP1ID), \
	_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_lbt, CMN_TYPE_HNS, _event, 6, SEL_OCCUP1ID), \
	_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_hbt, CMN_TYPE_HNS, _event, 7, SEL_OCCUP1ID)
#define CMN_EVENT_HNS_CLS( _name, _event)				\
	CMN_EVENT_HN_CLS(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event)
#define CMN_EVENT_HNS_SNT(_name, _event)				\
	CMN_EVENT_HN_SNT(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event)
#define CMN_EVENT_HNS_HBT(_name, _event)				\
	_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_all, CMN_TYPE_HNS, _event, 0, SEL_HBT_LBT_SEL), \
	_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_hbt, CMN_TYPE_HNS, _event, 1, SEL_HBT_LBT_SEL), \
	_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_lbt, CMN_TYPE_HNS, _event, 2, SEL_HBT_LBT_SEL)
#define CMN_EVENT_HNS_SNH(_name, _event)				\
	_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_all, CMN_TYPE_HNS, _event, 0, SEL_SN_HOME_SEL), \
	_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_sn, CMN_TYPE_HNS, _event, 1, SEL_SN_HOME_SEL), \
	_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_home, CMN_TYPE_HNS, _event, 2, SEL_SN_HOME_SEL)
835

836
#define _CMN_EVENT_XP_MESH(_name, _event)			\
837 838 839
	__CMN_EVENT_XP(e_##_name, (_event) | (0 << 2)),		\
	__CMN_EVENT_XP(w_##_name, (_event) | (1 << 2)),		\
	__CMN_EVENT_XP(n_##_name, (_event) | (2 << 2)),		\
840 841 842
	__CMN_EVENT_XP(s_##_name, (_event) | (3 << 2))

#define _CMN_EVENT_XP_PORT(_name, _event)			\
843
	__CMN_EVENT_XP(p0_##_name, (_event) | (4 << 2)),	\
844 845 846
	__CMN_EVENT_XP(p1_##_name, (_event) | (5 << 2)),	\
	__CMN_EVENT_XP(p2_##_name, (_event) | (6 << 2)),	\
	__CMN_EVENT_XP(p3_##_name, (_event) | (7 << 2))
847

848 849 850 851
#define _CMN_EVENT_XP(_name, _event)				\
	_CMN_EVENT_XP_MESH(_name, _event),			\
	_CMN_EVENT_XP_PORT(_name, _event)

852 853 854 855 856
/* Good thing there are only 3 fundamental XP events... */
#define CMN_EVENT_XP(_name, _event)				\
	_CMN_EVENT_XP(req_##_name, (_event) | (0 << 5)),	\
	_CMN_EVENT_XP(rsp_##_name, (_event) | (1 << 5)),	\
	_CMN_EVENT_XP(snp_##_name, (_event) | (2 << 5)),	\
857 858 859
	_CMN_EVENT_XP(dat_##_name, (_event) | (3 << 5)),	\
	_CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)),	\
	_CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)),	\
860 861 862
	_CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)),	\
	_CMN_EVENT_XP(snp2_##_name, (_event) | (7 << 5)),	\
	_CMN_EVENT_XP(req2_##_name, (_event) | (8 << 5))
863

864 865 866 867
#define CMN_EVENT_XP_DAT(_name, _event)				\
	_CMN_EVENT_XP_PORT(dat_##_name, (_event) | (3 << 5)),	\
	_CMN_EVENT_XP_PORT(dat2_##_name, (_event) | (6 << 5))

868 869 870 871 872 873 874 875 876

static struct attribute *arm_cmn_event_attrs[] = {
	CMN_EVENT_DTC(cycles),

	/*
	 * DVM node events conflict with HN-I events in the equivalent PMU
	 * slot, but our lazy short-cut of using the DTM counter index for
	 * the PMU index as well happens to avoid that by construction.
	 */
877 878 879 880
	CMN_EVENT_DVM(CMN600, rxreq_dvmop,		0x01),
	CMN_EVENT_DVM(CMN600, rxreq_dvmsync,		0x02),
	CMN_EVENT_DVM(CMN600, rxreq_dvmop_vmid_filtered, 0x03),
	CMN_EVENT_DVM(CMN600, rxreq_retried,		0x04),
881
	CMN_EVENT_DVM_OCC(CMN600, rxreq_trk_occupancy,	0x05),
882 883 884 885 886 887 888 889 890 891 892
	CMN_EVENT_DVM(NOT_CMN600, dvmop_tlbi,		0x01),
	CMN_EVENT_DVM(NOT_CMN600, dvmop_bpi,		0x02),
	CMN_EVENT_DVM(NOT_CMN600, dvmop_pici,		0x03),
	CMN_EVENT_DVM(NOT_CMN600, dvmop_vici,		0x04),
	CMN_EVENT_DVM(NOT_CMN600, dvmsync,		0x05),
	CMN_EVENT_DVM(NOT_CMN600, vmid_filtered,	0x06),
	CMN_EVENT_DVM(NOT_CMN600, rndop_filtered,	0x07),
	CMN_EVENT_DVM(NOT_CMN600, retry,		0x08),
	CMN_EVENT_DVM(NOT_CMN600, txsnp_flitv,		0x09),
	CMN_EVENT_DVM(NOT_CMN600, txsnp_stall,		0x0a),
	CMN_EVENT_DVM(NOT_CMN600, trkfull,		0x0b),
893
	CMN_EVENT_DVM_OCC(NOT_CMN600, trk_occupancy,	0x0c),
894 895 896 897 898 899 900 901
	CMN_EVENT_DVM_OCC(CMN700, trk_occupancy_cxha,	0x0d),
	CMN_EVENT_DVM_OCC(CMN700, trk_occupancy_pdn,	0x0e),
	CMN_EVENT_DVM(CMN700, trk_alloc,		0x0f),
	CMN_EVENT_DVM(CMN700, trk_cxha_alloc,		0x10),
	CMN_EVENT_DVM(CMN700, trk_pdn_alloc,		0x11),
	CMN_EVENT_DVM(CMN700, txsnp_stall_limit,	0x12),
	CMN_EVENT_DVM(CMN700, rxsnp_stall_starv,	0x13),
	CMN_EVENT_DVM(CMN700, txsnp_sync_stall_op,	0x14),
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916

	CMN_EVENT_HNF(CMN_ANY, cache_miss,		0x01),
	CMN_EVENT_HNF(CMN_ANY, slc_sf_cache_access,	0x02),
	CMN_EVENT_HNF(CMN_ANY, cache_fill,		0x03),
	CMN_EVENT_HNF(CMN_ANY, pocq_retry,		0x04),
	CMN_EVENT_HNF(CMN_ANY, pocq_reqs_recvd,		0x05),
	CMN_EVENT_HNF(CMN_ANY, sf_hit,			0x06),
	CMN_EVENT_HNF(CMN_ANY, sf_evictions,		0x07),
	CMN_EVENT_HNF(CMN_ANY, dir_snoops_sent,		0x08),
	CMN_EVENT_HNF(CMN_ANY, brd_snoops_sent,		0x09),
	CMN_EVENT_HNF(CMN_ANY, slc_eviction,		0x0a),
	CMN_EVENT_HNF(CMN_ANY, slc_fill_invalid_way,	0x0b),
	CMN_EVENT_HNF(CMN_ANY, mc_retries,		0x0c),
	CMN_EVENT_HNF(CMN_ANY, mc_reqs,			0x0d),
	CMN_EVENT_HNF(CMN_ANY, qos_hh_retry,		0x0e),
917
	CMN_EVENT_HNF_OCC(CMN_ANY, qos_pocq_occupancy,	0x0f),
918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933
	CMN_EVENT_HNF(CMN_ANY, pocq_addrhaz,		0x10),
	CMN_EVENT_HNF(CMN_ANY, pocq_atomic_addrhaz,	0x11),
	CMN_EVENT_HNF(CMN_ANY, ld_st_swp_adq_full,	0x12),
	CMN_EVENT_HNF(CMN_ANY, cmp_adq_full,		0x13),
	CMN_EVENT_HNF(CMN_ANY, txdat_stall,		0x14),
	CMN_EVENT_HNF(CMN_ANY, txrsp_stall,		0x15),
	CMN_EVENT_HNF(CMN_ANY, seq_full,		0x16),
	CMN_EVENT_HNF(CMN_ANY, seq_hit,			0x17),
	CMN_EVENT_HNF(CMN_ANY, snp_sent,		0x18),
	CMN_EVENT_HNF(CMN_ANY, sfbi_dir_snp_sent,	0x19),
	CMN_EVENT_HNF(CMN_ANY, sfbi_brd_snp_sent,	0x1a),
	CMN_EVENT_HNF(CMN_ANY, snp_sent_untrk,		0x1b),
	CMN_EVENT_HNF(CMN_ANY, intv_dirty,		0x1c),
	CMN_EVENT_HNF(CMN_ANY, stash_snp_sent,		0x1d),
	CMN_EVENT_HNF(CMN_ANY, stash_data_pull,		0x1e),
	CMN_EVENT_HNF(CMN_ANY, snp_fwded,		0x1f),
934 935 936
	CMN_EVENT_HNF(NOT_CMN600, atomic_fwd,		0x20),
	CMN_EVENT_HNF(NOT_CMN600, mpam_hardlim,		0x21),
	CMN_EVENT_HNF(NOT_CMN600, mpam_softlim,		0x22),
937 938 939 940 941 942 943 944 945 946 947 948 949
	CMN_EVENT_HNF(CMN_650ON, snp_sent_cluster,	0x23),
	CMN_EVENT_HNF(CMN_650ON, sf_imprecise_evict,	0x24),
	CMN_EVENT_HNF(CMN_650ON, sf_evict_shared_line,	0x25),
	CMN_EVENT_HNF_CLS(CMN700, pocq_class_occup,	0x26),
	CMN_EVENT_HNF_CLS(CMN700, pocq_class_retry,	0x27),
	CMN_EVENT_HNF_CLS(CMN700, class_mc_reqs,	0x28),
	CMN_EVENT_HNF_CLS(CMN700, class_cgnt_cmin,	0x29),
	CMN_EVENT_HNF_SNT(CMN700, sn_throttle,		0x2a),
	CMN_EVENT_HNF_SNT(CMN700, sn_throttle_min,	0x2b),
	CMN_EVENT_HNF(CMN700, sf_precise_to_imprecise,	0x2c),
	CMN_EVENT_HNF(CMN700, snp_intv_cln,		0x2d),
	CMN_EVENT_HNF(CMN700, nc_excl,			0x2e),
	CMN_EVENT_HNF(CMN700, excl_mon_ovfl,		0x2f),
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970

	CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl,		0x20),
	CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl,		0x21),
	CMN_EVENT_HNI(rdt_rd_occ_cnt_ovfl,		0x22),
	CMN_EVENT_HNI(rdt_wr_occ_cnt_ovfl,		0x23),
	CMN_EVENT_HNI(wdb_occ_cnt_ovfl,			0x24),
	CMN_EVENT_HNI(rrt_rd_alloc,			0x25),
	CMN_EVENT_HNI(rrt_wr_alloc,			0x26),
	CMN_EVENT_HNI(rdt_rd_alloc,			0x27),
	CMN_EVENT_HNI(rdt_wr_alloc,			0x28),
	CMN_EVENT_HNI(wdb_alloc,			0x29),
	CMN_EVENT_HNI(txrsp_retryack,			0x2a),
	CMN_EVENT_HNI(arvalid_no_arready,		0x2b),
	CMN_EVENT_HNI(arready_no_arvalid,		0x2c),
	CMN_EVENT_HNI(awvalid_no_awready,		0x2d),
	CMN_EVENT_HNI(awready_no_awvalid,		0x2e),
	CMN_EVENT_HNI(wvalid_no_wready,			0x2f),
	CMN_EVENT_HNI(txdat_stall,			0x30),
	CMN_EVENT_HNI(nonpcie_serialization,		0x31),
	CMN_EVENT_HNI(pcie_serialization,		0x32),

971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
	/*
	 * HN-P events squat on top of the HN-I similarly to DVM events, except
	 * for being crammed into the same physical node as well. And of course
	 * where would the fun be if the same events were in the same order...
	 */
	CMN_EVENT_HNP(rrt_wr_occ_cnt_ovfl,		0x01),
	CMN_EVENT_HNP(rdt_wr_occ_cnt_ovfl,		0x02),
	CMN_EVENT_HNP(wdb_occ_cnt_ovfl,			0x03),
	CMN_EVENT_HNP(rrt_wr_alloc,			0x04),
	CMN_EVENT_HNP(rdt_wr_alloc,			0x05),
	CMN_EVENT_HNP(wdb_alloc,			0x06),
	CMN_EVENT_HNP(awvalid_no_awready,		0x07),
	CMN_EVENT_HNP(awready_no_awvalid,		0x08),
	CMN_EVENT_HNP(wvalid_no_wready,			0x09),
	CMN_EVENT_HNP(rrt_rd_occ_cnt_ovfl,		0x11),
	CMN_EVENT_HNP(rdt_rd_occ_cnt_ovfl,		0x12),
	CMN_EVENT_HNP(rrt_rd_alloc,			0x13),
	CMN_EVENT_HNP(rdt_rd_alloc,			0x14),
	CMN_EVENT_HNP(arvalid_no_arready,		0x15),
	CMN_EVENT_HNP(arready_no_arvalid,		0x16),

992 993
	CMN_EVENT_XP(txflit_valid,			0x01),
	CMN_EVENT_XP(txflit_stall,			0x02),
994
	CMN_EVENT_XP_DAT(partial_dat_flit,		0x03),
995
	/* We treat watchpoints as a special made-up class of XP events */
996 997
	CMN_EVENT_ATTR(CMN_ANY, watchpoint_up, CMN_TYPE_WP, CMN_WP_UP),
	CMN_EVENT_ATTR(CMN_ANY, watchpoint_down, CMN_TYPE_WP, CMN_WP_DOWN),
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010

	CMN_EVENT_SBSX(CMN_ANY, rd_req,			0x01),
	CMN_EVENT_SBSX(CMN_ANY, wr_req,			0x02),
	CMN_EVENT_SBSX(CMN_ANY, cmo_req,		0x03),
	CMN_EVENT_SBSX(CMN_ANY, txrsp_retryack,		0x04),
	CMN_EVENT_SBSX(CMN_ANY, txdat_flitv,		0x05),
	CMN_EVENT_SBSX(CMN_ANY, txrsp_flitv,		0x06),
	CMN_EVENT_SBSX(CMN_ANY, rd_req_trkr_occ_cnt_ovfl, 0x11),
	CMN_EVENT_SBSX(CMN_ANY, wr_req_trkr_occ_cnt_ovfl, 0x12),
	CMN_EVENT_SBSX(CMN_ANY, cmo_req_trkr_occ_cnt_ovfl, 0x13),
	CMN_EVENT_SBSX(CMN_ANY, wdb_occ_cnt_ovfl,	0x14),
	CMN_EVENT_SBSX(CMN_ANY, rd_axi_trkr_occ_cnt_ovfl, 0x15),
	CMN_EVENT_SBSX(CMN_ANY, cmo_axi_trkr_occ_cnt_ovfl, 0x16),
1011
	CMN_EVENT_SBSX(NOT_CMN600, rdb_occ_cnt_ovfl,	0x17),
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
	CMN_EVENT_SBSX(CMN_ANY, arvalid_no_arready,	0x21),
	CMN_EVENT_SBSX(CMN_ANY, awvalid_no_awready,	0x22),
	CMN_EVENT_SBSX(CMN_ANY, wvalid_no_wready,	0x23),
	CMN_EVENT_SBSX(CMN_ANY, txdat_stall,		0x24),
	CMN_EVENT_SBSX(CMN_ANY, txrsp_stall,		0x25),

	CMN_EVENT_RNID(CMN_ANY, s0_rdata_beats,		0x01),
	CMN_EVENT_RNID(CMN_ANY, s1_rdata_beats,		0x02),
	CMN_EVENT_RNID(CMN_ANY, s2_rdata_beats,		0x03),
	CMN_EVENT_RNID(CMN_ANY, rxdat_flits,		0x04),
	CMN_EVENT_RNID(CMN_ANY, txdat_flits,		0x05),
	CMN_EVENT_RNID(CMN_ANY, txreq_flits_total,	0x06),
	CMN_EVENT_RNID(CMN_ANY, txreq_flits_retried,	0x07),
	CMN_EVENT_RNID(CMN_ANY, rrt_occ_ovfl,		0x08),
	CMN_EVENT_RNID(CMN_ANY, wrt_occ_ovfl,		0x09),
	CMN_EVENT_RNID(CMN_ANY, txreq_flits_replayed,	0x0a),
	CMN_EVENT_RNID(CMN_ANY, wrcancel_sent,		0x0b),
	CMN_EVENT_RNID(CMN_ANY, s0_wdata_beats,		0x0c),
	CMN_EVENT_RNID(CMN_ANY, s1_wdata_beats,		0x0d),
	CMN_EVENT_RNID(CMN_ANY, s2_wdata_beats,		0x0e),
	CMN_EVENT_RNID(CMN_ANY, rrt_alloc,		0x0f),
	CMN_EVENT_RNID(CMN_ANY, wrt_alloc,		0x10),
	CMN_EVENT_RNID(CMN600, rdb_unord,		0x11),
	CMN_EVENT_RNID(CMN600, rdb_replay,		0x12),
	CMN_EVENT_RNID(CMN600, rdb_hybrid,		0x13),
	CMN_EVENT_RNID(CMN600, rdb_ord,			0x14),
1038 1039 1040 1041 1042 1043
	CMN_EVENT_RNID(NOT_CMN600, padb_occ_ovfl,	0x11),
	CMN_EVENT_RNID(NOT_CMN600, rpdb_occ_ovfl,	0x12),
	CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice1, 0x13),
	CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice2, 0x14),
	CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice3, 0x15),
	CMN_EVENT_RNID(NOT_CMN600, wrt_throttled,	0x16),
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
	CMN_EVENT_RNID(CMN700, ldb_full,		0x17),
	CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice0, 0x18),
	CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice1, 0x19),
	CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice2, 0x1a),
	CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice3, 0x1b),
	CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice0, 0x1c),
	CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice1, 0x1d),
	CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice2, 0x1e),
	CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice3, 0x1f),
	CMN_EVENT_RNID(CMN700, rrt_burst_alloc,		0x20),
	CMN_EVENT_RNID(CMN700, awid_hash,		0x21),
	CMN_EVENT_RNID(CMN700, atomic_alloc,		0x22),
	CMN_EVENT_RNID(CMN700, atomic_occ_ovfl,		0x23),
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069

	CMN_EVENT_MTSX(tc_lookup,			0x01),
	CMN_EVENT_MTSX(tc_fill,				0x02),
	CMN_EVENT_MTSX(tc_miss,				0x03),
	CMN_EVENT_MTSX(tdb_forward,			0x04),
	CMN_EVENT_MTSX(tcq_hazard,			0x05),
	CMN_EVENT_MTSX(tcq_rd_alloc,			0x06),
	CMN_EVENT_MTSX(tcq_wr_alloc,			0x07),
	CMN_EVENT_MTSX(tcq_cmo_alloc,			0x08),
	CMN_EVENT_MTSX(axi_rd_req,			0x09),
	CMN_EVENT_MTSX(axi_wr_req,			0x0a),
	CMN_EVENT_MTSX(tcq_occ_cnt_ovfl,		0x0b),
	CMN_EVENT_MTSX(tdb_occ_cnt_ovfl,		0x0c),
1070

1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
	CMN_EVENT_CXRA(CMN_ANY, rht_occ,		0x01),
	CMN_EVENT_CXRA(CMN_ANY, sht_occ,		0x02),
	CMN_EVENT_CXRA(CMN_ANY, rdb_occ,		0x03),
	CMN_EVENT_CXRA(CMN_ANY, wdb_occ,		0x04),
	CMN_EVENT_CXRA(CMN_ANY, ssb_occ,		0x05),
	CMN_EVENT_CXRA(CMN_ANY, snp_bcasts,		0x06),
	CMN_EVENT_CXRA(CMN_ANY, req_chains,		0x07),
	CMN_EVENT_CXRA(CMN_ANY, req_chain_avglen,	0x08),
	CMN_EVENT_CXRA(CMN_ANY, chirsp_stalls,		0x09),
	CMN_EVENT_CXRA(CMN_ANY, chidat_stalls,		0x0a),
	CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link0, 0x0b),
	CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link1, 0x0c),
	CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link2, 0x0d),
	CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link0, 0x0e),
	CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link1, 0x0f),
	CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link2, 0x10),
	CMN_EVENT_CXRA(CMN_ANY, external_chirsp_stalls,	0x11),
	CMN_EVENT_CXRA(CMN_ANY, external_chidat_stalls,	0x12),
	CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link0, 0x13),
	CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link1, 0x14),
	CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link2, 0x15),

	CMN_EVENT_CXHA(rddatbyp,			0x21),
	CMN_EVENT_CXHA(chirsp_up_stall,			0x22),
	CMN_EVENT_CXHA(chidat_up_stall,			0x23),
	CMN_EVENT_CXHA(snppcrd_link0_stall,		0x24),
	CMN_EVENT_CXHA(snppcrd_link1_stall,		0x25),
	CMN_EVENT_CXHA(snppcrd_link2_stall,		0x26),
	CMN_EVENT_CXHA(reqtrk_occ,			0x27),
	CMN_EVENT_CXHA(rdb_occ,				0x28),
	CMN_EVENT_CXHA(rdbyp_occ,			0x29),
	CMN_EVENT_CXHA(wdb_occ,				0x2a),
	CMN_EVENT_CXHA(snptrk_occ,			0x2b),
	CMN_EVENT_CXHA(sdb_occ,				0x2c),
	CMN_EVENT_CXHA(snphaz_occ,			0x2d),

1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
	CMN_EVENT_CCRA(rht_occ,				0x41),
	CMN_EVENT_CCRA(sht_occ,				0x42),
	CMN_EVENT_CCRA(rdb_occ,				0x43),
	CMN_EVENT_CCRA(wdb_occ,				0x44),
	CMN_EVENT_CCRA(ssb_occ,				0x45),
	CMN_EVENT_CCRA(snp_bcasts,			0x46),
	CMN_EVENT_CCRA(req_chains,			0x47),
	CMN_EVENT_CCRA(req_chain_avglen,		0x48),
	CMN_EVENT_CCRA(chirsp_stalls,			0x49),
	CMN_EVENT_CCRA(chidat_stalls,			0x4a),
	CMN_EVENT_CCRA(cxreq_pcrd_stalls_link0,		0x4b),
	CMN_EVENT_CCRA(cxreq_pcrd_stalls_link1,		0x4c),
	CMN_EVENT_CCRA(cxreq_pcrd_stalls_link2,		0x4d),
	CMN_EVENT_CCRA(cxdat_pcrd_stalls_link0,		0x4e),
	CMN_EVENT_CCRA(cxdat_pcrd_stalls_link1,		0x4f),
	CMN_EVENT_CCRA(cxdat_pcrd_stalls_link2,		0x50),
	CMN_EVENT_CCRA(external_chirsp_stalls,		0x51),
	CMN_EVENT_CCRA(external_chidat_stalls,		0x52),
	CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link0,	0x53),
	CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link1,	0x54),
	CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link2,	0x55),
	CMN_EVENT_CCRA(rht_alloc,			0x56),
	CMN_EVENT_CCRA(sht_alloc,			0x57),
	CMN_EVENT_CCRA(rdb_alloc,			0x58),
	CMN_EVENT_CCRA(wdb_alloc,			0x59),
	CMN_EVENT_CCRA(ssb_alloc,			0x5a),

	CMN_EVENT_CCHA(rddatbyp,			0x61),
	CMN_EVENT_CCHA(chirsp_up_stall,			0x62),
	CMN_EVENT_CCHA(chidat_up_stall,			0x63),
	CMN_EVENT_CCHA(snppcrd_link0_stall,		0x64),
	CMN_EVENT_CCHA(snppcrd_link1_stall,		0x65),
	CMN_EVENT_CCHA(snppcrd_link2_stall,		0x66),
	CMN_EVENT_CCHA(reqtrk_occ,			0x67),
	CMN_EVENT_CCHA(rdb_occ,				0x68),
	CMN_EVENT_CCHA(rdbyp_occ,			0x69),
	CMN_EVENT_CCHA(wdb_occ,				0x6a),
	CMN_EVENT_CCHA(snptrk_occ,			0x6b),
	CMN_EVENT_CCHA(sdb_occ,				0x6c),
	CMN_EVENT_CCHA(snphaz_occ,			0x6d),
	CMN_EVENT_CCHA(reqtrk_alloc,			0x6e),
	CMN_EVENT_CCHA(rdb_alloc,			0x6f),
	CMN_EVENT_CCHA(rdbyp_alloc,			0x70),
	CMN_EVENT_CCHA(wdb_alloc,			0x71),
	CMN_EVENT_CCHA(snptrk_alloc,			0x72),
	CMN_EVENT_CCHA(sdb_alloc,			0x73),
	CMN_EVENT_CCHA(snphaz_alloc,			0x74),
	CMN_EVENT_CCHA(pb_rhu_req_occ,			0x75),
	CMN_EVENT_CCHA(pb_rhu_req_alloc,		0x76),
	CMN_EVENT_CCHA(pb_rhu_pcie_req_occ,		0x77),
	CMN_EVENT_CCHA(pb_rhu_pcie_req_alloc,		0x78),
	CMN_EVENT_CCHA(pb_pcie_wr_req_occ,		0x79),
	CMN_EVENT_CCHA(pb_pcie_wr_req_alloc,		0x7a),
	CMN_EVENT_CCHA(pb_pcie_reg_req_occ,		0x7b),
	CMN_EVENT_CCHA(pb_pcie_reg_req_alloc,		0x7c),
	CMN_EVENT_CCHA(pb_pcie_rsvd_req_occ,		0x7d),
	CMN_EVENT_CCHA(pb_pcie_rsvd_req_alloc,		0x7e),
	CMN_EVENT_CCHA(pb_rhu_dat_occ,			0x7f),
	CMN_EVENT_CCHA(pb_rhu_dat_alloc,		0x80),
	CMN_EVENT_CCHA(pb_rhu_pcie_dat_occ,		0x81),
	CMN_EVENT_CCHA(pb_rhu_pcie_dat_alloc,		0x82),
	CMN_EVENT_CCHA(pb_pcie_wr_dat_occ,		0x83),
	CMN_EVENT_CCHA(pb_pcie_wr_dat_alloc,		0x84),

	CMN_EVENT_CCLA(rx_cxs,				0x21),
	CMN_EVENT_CCLA(tx_cxs,				0x22),
	CMN_EVENT_CCLA(rx_cxs_avg_size,			0x23),
	CMN_EVENT_CCLA(tx_cxs_avg_size,			0x24),
	CMN_EVENT_CCLA(tx_cxs_lcrd_backpressure,	0x25),
	CMN_EVENT_CCLA(link_crdbuf_occ,			0x26),
	CMN_EVENT_CCLA(link_crdbuf_alloc,		0x27),
	CMN_EVENT_CCLA(pfwd_rcvr_cxs,			0x28),
	CMN_EVENT_CCLA(pfwd_sndr_num_flits,		0x29),
	CMN_EVENT_CCLA(pfwd_sndr_stalls_static_crd,	0x2a),
	CMN_EVENT_CCLA(pfwd_sndr_stalls_dynmaic_crd,	0x2b),

1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
	CMN_EVENT_HNS_HBT(cache_miss,			0x01),
	CMN_EVENT_HNS_HBT(slc_sf_cache_access,		0x02),
	CMN_EVENT_HNS_HBT(cache_fill,			0x03),
	CMN_EVENT_HNS_HBT(pocq_retry,			0x04),
	CMN_EVENT_HNS_HBT(pocq_reqs_recvd,		0x05),
	CMN_EVENT_HNS_HBT(sf_hit,			0x06),
	CMN_EVENT_HNS_HBT(sf_evictions,			0x07),
	CMN_EVENT_HNS(dir_snoops_sent,			0x08),
	CMN_EVENT_HNS(brd_snoops_sent,			0x09),
	CMN_EVENT_HNS_HBT(slc_eviction,			0x0a),
	CMN_EVENT_HNS_HBT(slc_fill_invalid_way,		0x0b),
	CMN_EVENT_HNS(mc_retries_local,			0x0c),
	CMN_EVENT_HNS_SNH(mc_reqs_local,		0x0d),
	CMN_EVENT_HNS(qos_hh_retry,			0x0e),
	CMN_EVENT_HNS_OCC(qos_pocq_occupancy,		0x0f),
	CMN_EVENT_HNS(pocq_addrhaz,			0x10),
	CMN_EVENT_HNS(pocq_atomic_addrhaz,		0x11),
	CMN_EVENT_HNS(ld_st_swp_adq_full,		0x12),
	CMN_EVENT_HNS(cmp_adq_full,			0x13),
	CMN_EVENT_HNS(txdat_stall,			0x14),
	CMN_EVENT_HNS(txrsp_stall,			0x15),
	CMN_EVENT_HNS(seq_full,				0x16),
	CMN_EVENT_HNS(seq_hit,				0x17),
	CMN_EVENT_HNS(snp_sent,				0x18),
	CMN_EVENT_HNS(sfbi_dir_snp_sent,		0x19),
	CMN_EVENT_HNS(sfbi_brd_snp_sent,		0x1a),
	CMN_EVENT_HNS(intv_dirty,			0x1c),
	CMN_EVENT_HNS(stash_snp_sent,			0x1d),
	CMN_EVENT_HNS(stash_data_pull,			0x1e),
	CMN_EVENT_HNS(snp_fwded,			0x1f),
	CMN_EVENT_HNS(atomic_fwd,			0x20),
	CMN_EVENT_HNS(mpam_hardlim,			0x21),
	CMN_EVENT_HNS(mpam_softlim,			0x22),
	CMN_EVENT_HNS(snp_sent_cluster,			0x23),
	CMN_EVENT_HNS(sf_imprecise_evict,		0x24),
	CMN_EVENT_HNS(sf_evict_shared_line,		0x25),
	CMN_EVENT_HNS_CLS(pocq_class_occup,		0x26),
	CMN_EVENT_HNS_CLS(pocq_class_retry,		0x27),
	CMN_EVENT_HNS_CLS(class_mc_reqs_local,		0x28),
	CMN_EVENT_HNS_CLS(class_cgnt_cmin,		0x29),
	CMN_EVENT_HNS_SNT(sn_throttle,			0x2a),
	CMN_EVENT_HNS_SNT(sn_throttle_min,		0x2b),
	CMN_EVENT_HNS(sf_precise_to_imprecise,		0x2c),
	CMN_EVENT_HNS(snp_intv_cln,			0x2d),
	CMN_EVENT_HNS(nc_excl,				0x2e),
	CMN_EVENT_HNS(excl_mon_ovfl,			0x2f),
	CMN_EVENT_HNS(snp_req_recvd,			0x30),
	CMN_EVENT_HNS(snp_req_byp_pocq,			0x31),
	CMN_EVENT_HNS(dir_ccgha_snp_sent,		0x32),
	CMN_EVENT_HNS(brd_ccgha_snp_sent,		0x33),
	CMN_EVENT_HNS(ccgha_snp_stall,			0x34),
	CMN_EVENT_HNS(lbt_req_hardlim,			0x35),
	CMN_EVENT_HNS(hbt_req_hardlim,			0x36),
	CMN_EVENT_HNS(sf_reupdate,			0x37),
	CMN_EVENT_HNS(excl_sf_imprecise,		0x38),
	CMN_EVENT_HNS(snp_pocq_addrhaz,			0x39),
	CMN_EVENT_HNS(mc_retries_remote,		0x3a),
	CMN_EVENT_HNS_SNH(mc_reqs_remote,		0x3b),
	CMN_EVENT_HNS_CLS(class_mc_reqs_remote,		0x3c),

1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
	NULL
};

static const struct attribute_group arm_cmn_event_attrs_group = {
	.name = "events",
	.attrs = arm_cmn_event_attrs,
	.is_visible = arm_cmn_event_attr_is_visible,
};

static ssize_t arm_cmn_format_show(struct device *dev,
				   struct device_attribute *attr, char *buf)
{
	struct arm_cmn_format_attr *fmt = container_of(attr, typeof(*fmt), attr);
	int lo = __ffs(fmt->field), hi = __fls(fmt->field);

	if (lo == hi)
1259
		return sysfs_emit(buf, "config:%d\n", lo);
1260 1261

	if (!fmt->config)
1262
		return sysfs_emit(buf, "config:%d-%d\n", lo, hi);
1263

1264
	return sysfs_emit(buf, "config%d:%d-%d\n", fmt->config, lo, hi);
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
}

#define _CMN_FORMAT_ATTR(_name, _cfg, _fld)				\
	(&((struct arm_cmn_format_attr[]) {{				\
		.attr = __ATTR(_name, 0444, arm_cmn_format_show, NULL),	\
		.config = _cfg,						\
		.field = _fld,						\
	}})[0].attr.attr)
#define CMN_FORMAT_ATTR(_name, _fld)	_CMN_FORMAT_ATTR(_name, 0, _fld)

static struct attribute *arm_cmn_format_attrs[] = {
	CMN_FORMAT_ATTR(type, CMN_CONFIG_TYPE),
	CMN_FORMAT_ATTR(eventid, CMN_CONFIG_EVENTID),
	CMN_FORMAT_ATTR(occupid, CMN_CONFIG_OCCUPID),
	CMN_FORMAT_ATTR(bynodeid, CMN_CONFIG_BYNODEID),
	CMN_FORMAT_ATTR(nodeid, CMN_CONFIG_NODEID),

	CMN_FORMAT_ATTR(wp_dev_sel, CMN_CONFIG_WP_DEV_SEL),
	CMN_FORMAT_ATTR(wp_chn_sel, CMN_CONFIG_WP_CHN_SEL),
	CMN_FORMAT_ATTR(wp_grp, CMN_CONFIG_WP_GRP),
	CMN_FORMAT_ATTR(wp_exclusive, CMN_CONFIG_WP_EXCLUSIVE),
	CMN_FORMAT_ATTR(wp_combine, CMN_CONFIG_WP_COMBINE),

	_CMN_FORMAT_ATTR(wp_val, 1, CMN_CONFIG1_WP_VAL),
	_CMN_FORMAT_ATTR(wp_mask, 2, CMN_CONFIG2_WP_MASK),

	NULL
};

static const struct attribute_group arm_cmn_format_attrs_group = {
	.name = "format",
	.attrs = arm_cmn_format_attrs,
};

static ssize_t arm_cmn_cpumask_show(struct device *dev,
				    struct device_attribute *attr, char *buf)
{
	struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));

	return cpumap_print_to_pagebuf(true, buf, cpumask_of(cmn->cpu));
}

static struct device_attribute arm_cmn_cpumask_attr =
		__ATTR(cpumask, 0444, arm_cmn_cpumask_show, NULL);

1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
static ssize_t arm_cmn_identifier_show(struct device *dev,
				       struct device_attribute *attr, char *buf)
{
	struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));

	return sysfs_emit(buf, "%03x%02x\n", cmn->part, cmn->rev);
}

static struct device_attribute arm_cmn_identifier_attr =
		__ATTR(identifier, 0444, arm_cmn_identifier_show, NULL);

static struct attribute *arm_cmn_other_attrs[] = {
1322
	&arm_cmn_cpumask_attr.attr,
1323
	&arm_cmn_identifier_attr.attr,
1324 1325 1326
	NULL,
};

1327 1328
static const struct attribute_group arm_cmn_other_attrs_group = {
	.attrs = arm_cmn_other_attrs,
1329 1330 1331 1332 1333
};

static const struct attribute_group *arm_cmn_attr_groups[] = {
	&arm_cmn_event_attrs_group,
	&arm_cmn_format_attrs_group,
1334
	&arm_cmn_other_attrs_group,
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350
	NULL
};

static int arm_cmn_wp_idx(struct perf_event *event)
{
	return CMN_EVENT_EVENTID(event) + CMN_EVENT_WP_GRP(event);
}

static u32 arm_cmn_wp_config(struct perf_event *event)
{
	u32 config;
	u32 dev = CMN_EVENT_WP_DEV_SEL(event);
	u32 chn = CMN_EVENT_WP_CHN_SEL(event);
	u32 grp = CMN_EVENT_WP_GRP(event);
	u32 exc = CMN_EVENT_WP_EXCLUSIVE(event);
	u32 combine = CMN_EVENT_WP_COMBINE(event);
1351
	bool is_cmn600 = to_cmn(event->pmu)->part == PART_CMN600;
1352 1353 1354 1355

	config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) |
		 FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) |
		 FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) |
1356
		 FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL2, dev >> 1);
1357 1358 1359
	if (exc)
		config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_EXCLUSIVE :
				      CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE;
1360
	if (combine && !grp)
1361 1362
		config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_COMBINE :
				      CMN_DTM_WPn_CONFIG_WP_COMBINE;
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
	return config;
}

static void arm_cmn_set_state(struct arm_cmn *cmn, u32 state)
{
	if (!cmn->state)
		writel_relaxed(0, cmn->dtc[0].base + CMN_DT_PMCR);
	cmn->state |= state;
}

static void arm_cmn_clear_state(struct arm_cmn *cmn, u32 state)
{
	cmn->state &= ~state;
	if (!cmn->state)
		writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN,
			       cmn->dtc[0].base + CMN_DT_PMCR);
}

static void arm_cmn_pmu_enable(struct pmu *pmu)
{
	arm_cmn_clear_state(to_cmn(pmu), CMN_STATE_DISABLED);
}

static void arm_cmn_pmu_disable(struct pmu *pmu)
{
	arm_cmn_set_state(to_cmn(pmu), CMN_STATE_DISABLED);
}

static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw,
			    bool snapshot)
{
1394
	struct arm_cmn_dtm *dtm = NULL;
1395
	struct arm_cmn_node *dn;
1396 1397
	unsigned int i, offset, dtm_idx;
	u64 reg, count = 0;
1398 1399 1400

	offset = snapshot ? CMN_DTM_PMEVCNTSR : CMN_DTM_PMEVCNT;
	for_each_hw_dn(hw, dn, i) {
1401
		if (dtm != &cmn->dtms[dn->dtm]) {
1402
			dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset;
1403 1404 1405 1406
			reg = readq_relaxed(dtm->base + offset);
		}
		dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
		count += (u16)(reg >> (dtm_idx * 16));
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
	}
	return count;
}

static u64 arm_cmn_read_cc(struct arm_cmn_dtc *dtc)
{
	u64 val = readq_relaxed(dtc->base + CMN_DT_PMCCNTR);

	writeq_relaxed(CMN_CC_INIT, dtc->base + CMN_DT_PMCCNTR);
	return (val - CMN_CC_INIT) & ((CMN_CC_INIT << 1) - 1);
}

static u32 arm_cmn_read_counter(struct arm_cmn_dtc *dtc, int idx)
{
	u32 val, pmevcnt = CMN_DT_PMEVCNT(idx);

	val = readl_relaxed(dtc->base + pmevcnt);
	writel_relaxed(CMN_COUNTER_INIT, dtc->base + pmevcnt);
	return val - CMN_COUNTER_INIT;
}

static void arm_cmn_init_counter(struct perf_event *event)
{
	struct arm_cmn *cmn = to_cmn(event->pmu);
	struct arm_cmn_hw_event *hw = to_cmn_hw(event);
	u64 count;

1434 1435 1436
	for_each_hw_dtc_idx(hw, i, idx) {
		writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + CMN_DT_PMEVCNT(idx));
		cmn->dtc[i].counters[idx] = event;
1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449
	}

	count = arm_cmn_read_dtm(cmn, hw, false);
	local64_set(&event->hw.prev_count, count);
}

static void arm_cmn_event_read(struct perf_event *event)
{
	struct arm_cmn *cmn = to_cmn(event->pmu);
	struct arm_cmn_hw_event *hw = to_cmn_hw(event);
	u64 delta, new, prev;
	unsigned long flags;

1450 1451
	if (CMN_EVENT_TYPE(event) == CMN_TYPE_DTC) {
		delta = arm_cmn_read_cc(cmn->dtc + hw->dtc_idx[0]);
1452 1453 1454 1455 1456 1457 1458 1459 1460
		local64_add(delta, &event->count);
		return;
	}
	new = arm_cmn_read_dtm(cmn, hw, false);
	prev = local64_xchg(&event->hw.prev_count, new);

	delta = new - prev;

	local_irq_save(flags);
1461 1462
	for_each_hw_dtc_idx(hw, i, idx) {
		new = arm_cmn_read_counter(cmn->dtc + i, idx);
1463 1464 1465 1466 1467 1468
		delta += new << 16;
	}
	local_irq_restore(flags);
	local64_add(delta, &event->count);
}

1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
static int arm_cmn_set_event_sel_hi(struct arm_cmn_node *dn,
				    enum cmn_filter_select fsel, u8 occupid)
{
	u64 reg;

	if (fsel == SEL_NONE)
		return 0;

	if (!dn->occupid[fsel].count) {
		dn->occupid[fsel].val = occupid;
1479 1480
		reg = FIELD_PREP(CMN__PMU_CBUSY_SNTHROTTLE_SEL,
				 dn->occupid[SEL_CBUSY_SNTHROTTLE_SEL].val) |
1481 1482 1483 1484
		      FIELD_PREP(CMN__PMU_SN_HOME_SEL,
				 dn->occupid[SEL_SN_HOME_SEL].val) |
		      FIELD_PREP(CMN__PMU_HBT_LBT_SEL,
				 dn->occupid[SEL_HBT_LBT_SEL].val) |
1485 1486 1487
		      FIELD_PREP(CMN__PMU_CLASS_OCCUP_ID,
				 dn->occupid[SEL_CLASS_OCCUP_ID].val) |
		      FIELD_PREP(CMN__PMU_OCCUP1_ID,
1488 1489 1490 1491 1492 1493 1494 1495 1496
				 dn->occupid[SEL_OCCUP1ID].val);
		writel_relaxed(reg >> 32, dn->pmu_base + CMN_PMU_EVENT_SEL + 4);
	} else if (dn->occupid[fsel].val != occupid) {
		return -EBUSY;
	}
	dn->occupid[fsel].count++;
	return 0;
}

1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508
static void arm_cmn_set_event_sel_lo(struct arm_cmn_node *dn, int dtm_idx,
				     int eventid, bool wide_sel)
{
	if (wide_sel) {
		dn->event_w[dtm_idx] = eventid;
		writeq_relaxed(le64_to_cpu(dn->event_sel_w), dn->pmu_base + CMN_PMU_EVENT_SEL);
	} else {
		dn->event[dtm_idx] = eventid;
		writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL);
	}
}

1509 1510 1511 1512 1513 1514 1515 1516 1517
static void arm_cmn_event_start(struct perf_event *event, int flags)
{
	struct arm_cmn *cmn = to_cmn(event->pmu);
	struct arm_cmn_hw_event *hw = to_cmn_hw(event);
	struct arm_cmn_node *dn;
	enum cmn_node_type type = CMN_EVENT_TYPE(event);
	int i;

	if (type == CMN_TYPE_DTC) {
1518
		i = hw->dtc_idx[0];
1519 1520 1521 1522 1523 1524 1525 1526
		writeq_relaxed(CMN_CC_INIT, cmn->dtc[i].base + CMN_DT_PMCCNTR);
		cmn->dtc[i].cc_active = true;
	} else if (type == CMN_TYPE_WP) {
		int wp_idx = arm_cmn_wp_idx(event);
		u64 val = CMN_EVENT_WP_VAL(event);
		u64 mask = CMN_EVENT_WP_MASK(event);

		for_each_hw_dn(hw, dn, i) {
1527 1528 1529 1530
			void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);

			writeq_relaxed(val, base + CMN_DTM_WPn_VAL(wp_idx));
			writeq_relaxed(mask, base + CMN_DTM_WPn_MASK(wp_idx));
1531 1532 1533 1534
		}
	} else for_each_hw_dn(hw, dn, i) {
		int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);

1535 1536
		arm_cmn_set_event_sel_lo(dn, dtm_idx, CMN_EVENT_EVENTID(event),
					 hw->wide_sel);
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548
	}
}

static void arm_cmn_event_stop(struct perf_event *event, int flags)
{
	struct arm_cmn *cmn = to_cmn(event->pmu);
	struct arm_cmn_hw_event *hw = to_cmn_hw(event);
	struct arm_cmn_node *dn;
	enum cmn_node_type type = CMN_EVENT_TYPE(event);
	int i;

	if (type == CMN_TYPE_DTC) {
1549
		i = hw->dtc_idx[0];
1550 1551 1552 1553 1554
		cmn->dtc[i].cc_active = false;
	} else if (type == CMN_TYPE_WP) {
		int wp_idx = arm_cmn_wp_idx(event);

		for_each_hw_dn(hw, dn, i) {
1555 1556 1557 1558
			void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);

			writeq_relaxed(0, base + CMN_DTM_WPn_MASK(wp_idx));
			writeq_relaxed(~0ULL, base + CMN_DTM_WPn_VAL(wp_idx));
1559 1560 1561 1562
		}
	} else for_each_hw_dn(hw, dn, i) {
		int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);

1563
		arm_cmn_set_event_sel_lo(dn, dtm_idx, 0, hw->wide_sel);
1564 1565 1566 1567 1568 1569
	}

	arm_cmn_event_read(event);
}

struct arm_cmn_val {
1570
	u8 dtm_count[CMN_MAX_DTMS];
1571
	u8 occupid[CMN_MAX_DTMS][SEL_MAX];
1572
	u8 wp[CMN_MAX_DTMS][4];
1573
	int dtc_count[CMN_MAX_DTCS];
1574 1575 1576
	bool cycles;
};

1577 1578
static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val,
				  struct perf_event *event)
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
{
	struct arm_cmn_hw_event *hw = to_cmn_hw(event);
	struct arm_cmn_node *dn;
	enum cmn_node_type type;
	int i;

	if (is_software_event(event))
		return;

	type = CMN_EVENT_TYPE(event);
	if (type == CMN_TYPE_DTC) {
		val->cycles = true;
		return;
	}

1594 1595
	for_each_hw_dtc_idx(hw, dtc, idx)
		val->dtc_count[dtc]++;
1596 1597

	for_each_hw_dn(hw, dn, i) {
1598
		int wp_idx, dtm = dn->dtm, sel = hw->filter_sel;
1599

1600
		val->dtm_count[dtm]++;
1601 1602 1603

		if (sel > SEL_NONE)
			val->occupid[dtm][sel] = CMN_EVENT_OCCUPID(event) + 1;
1604 1605 1606 1607 1608

		if (type != CMN_TYPE_WP)
			continue;

		wp_idx = arm_cmn_wp_idx(event);
1609
		val->wp[dtm][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1;
1610 1611 1612
	}
}

1613
static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
1614 1615 1616 1617 1618
{
	struct arm_cmn_hw_event *hw = to_cmn_hw(event);
	struct arm_cmn_node *dn;
	struct perf_event *sibling, *leader = event->group_leader;
	enum cmn_node_type type;
1619 1620
	struct arm_cmn_val *val;
	int i, ret = -EINVAL;
1621 1622 1623 1624 1625 1626 1627

	if (leader == event)
		return 0;

	if (event->pmu != leader->pmu && !is_software_event(leader))
		return -EINVAL;

1628 1629 1630
	val = kzalloc(sizeof(*val), GFP_KERNEL);
	if (!val)
		return -ENOMEM;
1631

1632
	arm_cmn_val_add_event(cmn, val, leader);
1633
	for_each_sibling_event(sibling, leader)
1634
		arm_cmn_val_add_event(cmn, val, sibling);
1635 1636

	type = CMN_EVENT_TYPE(event);
1637 1638 1639 1640
	if (type == CMN_TYPE_DTC) {
		ret = val->cycles ? -EINVAL : 0;
		goto done;
	}
1641

1642 1643 1644
	for (i = 0; i < CMN_MAX_DTCS; i++)
		if (val->dtc_count[i] == CMN_DT_NUM_COUNTERS)
			goto done;
1645 1646

	for_each_hw_dn(hw, dn, i) {
1647
		int wp_idx, wp_cmb, dtm = dn->dtm, sel = hw->filter_sel;
1648

1649 1650
		if (val->dtm_count[dtm] == CMN_DTM_NUM_COUNTERS)
			goto done;
1651

1652 1653
		if (sel > SEL_NONE && val->occupid[dtm][sel] &&
		    val->occupid[dtm][sel] != CMN_EVENT_OCCUPID(event) + 1)
1654
			goto done;
1655 1656 1657 1658 1659

		if (type != CMN_TYPE_WP)
			continue;

		wp_idx = arm_cmn_wp_idx(event);
1660 1661
		if (val->wp[dtm][wp_idx])
			goto done;
1662

1663
		wp_cmb = val->wp[dtm][wp_idx ^ 1];
1664
		if (wp_cmb && wp_cmb != CMN_EVENT_WP_COMBINE(event) + 1)
1665
			goto done;
1666 1667
	}

1668 1669 1670 1671
	ret = 0;
done:
	kfree(val);
	return ret;
1672 1673
}

1674
static enum cmn_filter_select arm_cmn_filter_sel(const struct arm_cmn *cmn,
1675 1676 1677 1678
						 enum cmn_node_type type,
						 unsigned int eventid)
{
	struct arm_cmn_event_attr *e;
1679
	enum cmn_model model = arm_cmn_model(cmn);
1680

1681
	for (int i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) {
1682 1683 1684 1685 1686 1687 1688 1689
		e = container_of(arm_cmn_event_attrs[i], typeof(*e), attr.attr);
		if (e->model & model && e->type == type && e->eventid == eventid)
			return e->fsel;
	}
	return SEL_NONE;
}


1690 1691 1692 1693
static int arm_cmn_event_init(struct perf_event *event)
{
	struct arm_cmn *cmn = to_cmn(event->pmu);
	struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1694
	struct arm_cmn_node *dn;
1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711
	enum cmn_node_type type;
	bool bynodeid;
	u16 nodeid, eventid;

	if (event->attr.type != event->pmu->type)
		return -ENOENT;

	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
		return -EINVAL;

	event->cpu = cmn->cpu;
	if (event->cpu < 0)
		return -EINVAL;

	type = CMN_EVENT_TYPE(event);
	/* DTC events (i.e. cycles) already have everything they need */
	if (type == CMN_TYPE_DTC)
1712
		return arm_cmn_validate_group(cmn, event);
1713

1714
	eventid = CMN_EVENT_EVENTID(event);
1715 1716 1717 1718 1719 1720
	/* For watchpoints we need the actual XP node here */
	if (type == CMN_TYPE_WP) {
		type = CMN_TYPE_XP;
		/* ...and we need a "real" direction */
		if (eventid != CMN_WP_UP && eventid != CMN_WP_DOWN)
			return -EINVAL;
1721 1722 1723
		/* ...but the DTM may depend on which port we're watching */
		if (cmn->multi_dtm)
			hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2;
1724
	} else if (type == CMN_TYPE_XP && cmn->part == PART_CMN700) {
1725
		hw->wide_sel = true;
1726 1727
	}

1728
	/* This is sufficiently annoying to recalculate, so cache it */
1729
	hw->filter_sel = arm_cmn_filter_sel(cmn, type, eventid);
1730

1731 1732 1733 1734
	bynodeid = CMN_EVENT_BYNODEID(event);
	nodeid = CMN_EVENT_NODEID(event);

	hw->dn = arm_cmn_node(cmn, type);
1735 1736
	if (!hw->dn)
		return -EINVAL;
1737 1738

	memset(hw->dtc_idx, -1, sizeof(hw->dtc_idx));
1739
	for (dn = hw->dn; dn->type == type; dn++) {
1740
		if (bynodeid && dn->id != nodeid) {
1741
			hw->dn++;
1742
			continue;
1743
		}
1744
		hw->num_dns++;
1745 1746 1747 1748 1749
		if (dn->dtc < 0)
			memset(hw->dtc_idx, 0, cmn->num_dtcs);
		else
			hw->dtc_idx[dn->dtc] = 0;

1750 1751
		if (bynodeid)
			break;
1752 1753 1754
	}

	if (!hw->num_dns) {
1755
		struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, nodeid);
1756 1757

		dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n",
1758
			nodeid, nid.x, nid.y, nid.port, nid.dev, type);
1759 1760 1761
		return -EINVAL;
	}

1762
	return arm_cmn_validate_group(cmn, event);
1763 1764 1765 1766 1767 1768 1769 1770 1771
}

static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event,
				int i)
{
	struct arm_cmn_hw_event *hw = to_cmn_hw(event);
	enum cmn_node_type type = CMN_EVENT_TYPE(event);

	while (i--) {
1772
		struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm] + hw->dtm_offset;
1773 1774 1775
		unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);

		if (type == CMN_TYPE_WP)
1776
			dtm->wp_event[arm_cmn_wp_idx(event)] = -1;
1777

1778 1779
		if (hw->filter_sel > SEL_NONE)
			hw->dn[i].occupid[hw->filter_sel].count--;
1780

1781 1782
		dtm->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx);
		writel_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG);
1783 1784 1785
	}
	memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx));

1786 1787
	for_each_hw_dtc_idx(hw, j, idx)
		cmn->dtc[j].counters[idx] = NULL;
1788 1789 1790 1791 1792 1793 1794 1795
}

static int arm_cmn_event_add(struct perf_event *event, int flags)
{
	struct arm_cmn *cmn = to_cmn(event->pmu);
	struct arm_cmn_hw_event *hw = to_cmn_hw(event);
	struct arm_cmn_node *dn;
	enum cmn_node_type type = CMN_EVENT_TYPE(event);
1796
	unsigned int input_sel, i = 0;
1797 1798 1799 1800 1801 1802 1803

	if (type == CMN_TYPE_DTC) {
		while (cmn->dtc[i].cycles)
			if (++i == cmn->num_dtcs)
				return -ENOSPC;

		cmn->dtc[i].cycles = event;
1804
		hw->dtc_idx[0] = i;
1805 1806 1807 1808 1809 1810

		if (flags & PERF_EF_START)
			arm_cmn_event_start(event, 0);
		return 0;
	}

1811
	/* Grab the global counters first... */
1812
	for_each_hw_dtc_idx(hw, j, idx) {
1813
		if (cmn->part == PART_CMN600 && j > 0) {
1814 1815 1816 1817 1818 1819 1820 1821 1822
			idx = hw->dtc_idx[0];
		} else {
			idx = 0;
			while (cmn->dtc[j].counters[idx])
				if (++idx == CMN_DT_NUM_COUNTERS)
					goto free_dtms;
		}
		hw->dtc_idx[j] = idx;
	}
1823

1824
	/* ...then the local counters to feed them */
1825
	for_each_hw_dn(hw, dn, i) {
1826
		struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset;
1827
		unsigned int dtm_idx, shift, d = max_t(int, dn->dtc, 0);
1828 1829 1830
		u64 reg;

		dtm_idx = 0;
1831
		while (dtm->pmu_config_low & CMN__PMEVCNT_PAIRED(dtm_idx))
1832 1833 1834 1835 1836 1837 1838 1839 1840
			if (++dtm_idx == CMN_DTM_NUM_COUNTERS)
				goto free_dtms;

		if (type == CMN_TYPE_XP) {
			input_sel = CMN__PMEVCNT0_INPUT_SEL_XP + dtm_idx;
		} else if (type == CMN_TYPE_WP) {
			int tmp, wp_idx = arm_cmn_wp_idx(event);
			u32 cfg = arm_cmn_wp_config(event);

1841
			if (dtm->wp_event[wp_idx] >= 0)
1842 1843
				goto free_dtms;

1844
			tmp = dtm->wp_event[wp_idx ^ 1];
1845
			if (tmp >= 0 && CMN_EVENT_WP_COMBINE(event) !=
1846
					CMN_EVENT_WP_COMBINE(cmn->dtc[d].counters[tmp]))
1847 1848 1849
				goto free_dtms;

			input_sel = CMN__PMEVCNT0_INPUT_SEL_WP + wp_idx;
1850
			dtm->wp_event[wp_idx] = hw->dtc_idx[d];
1851
			writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx));
1852
		} else {
1853
			struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
1854

1855 1856 1857
			if (cmn->multi_dtm)
				nid.port %= 2;

1858
			input_sel = CMN__PMEVCNT0_INPUT_SEL_DEV + dtm_idx +
1859
				    (nid.port << 4) + (nid.dev << 2);
1860

1861 1862
			if (arm_cmn_set_event_sel_hi(dn, hw->filter_sel, CMN_EVENT_OCCUPID(event)))
				goto free_dtms;
1863 1864 1865 1866
		}

		arm_cmn_set_index(hw->dtm_idx, i, dtm_idx);

1867
		dtm->input_sel[dtm_idx] = input_sel;
1868
		shift = CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(dtm_idx);
1869
		dtm->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift);
1870
		dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, hw->dtc_idx[d]) << shift;
1871 1872 1873
		dtm->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx);
		reg = (u64)le32_to_cpu(dtm->pmu_config_high) << 32 | dtm->pmu_config_low;
		writeq_relaxed(reg, dtm->base + CMN_DTM_PMU_CONFIG);
1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897
	}

	/* Go go go! */
	arm_cmn_init_counter(event);

	if (flags & PERF_EF_START)
		arm_cmn_event_start(event, 0);

	return 0;

free_dtms:
	arm_cmn_event_clear(cmn, event, i);
	return -ENOSPC;
}

static void arm_cmn_event_del(struct perf_event *event, int flags)
{
	struct arm_cmn *cmn = to_cmn(event->pmu);
	struct arm_cmn_hw_event *hw = to_cmn_hw(event);
	enum cmn_node_type type = CMN_EVENT_TYPE(event);

	arm_cmn_event_stop(event, PERF_EF_UPDATE);

	if (type == CMN_TYPE_DTC)
1898
		cmn->dtc[hw->dtc_idx[0]].cycles = NULL;
1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924
	else
		arm_cmn_event_clear(cmn, event, hw->num_dns);
}

/*
 * We stop the PMU for both add and read, to avoid skew across DTM counters.
 * In theory we could use snapshots to read without stopping, but then it
 * becomes a lot trickier to deal with overlow and racing against interrupts,
 * plus it seems they don't work properly on some hardware anyway :(
 */
static void arm_cmn_start_txn(struct pmu *pmu, unsigned int flags)
{
	arm_cmn_set_state(to_cmn(pmu), CMN_STATE_TXN);
}

static void arm_cmn_end_txn(struct pmu *pmu)
{
	arm_cmn_clear_state(to_cmn(pmu), CMN_STATE_TXN);
}

static int arm_cmn_commit_txn(struct pmu *pmu)
{
	arm_cmn_end_txn(pmu);
	return 0;
}

1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935
static void arm_cmn_migrate(struct arm_cmn *cmn, unsigned int cpu)
{
	unsigned int i;

	perf_pmu_migrate_context(&cmn->pmu, cmn->cpu, cpu);
	for (i = 0; i < cmn->num_dtcs; i++)
		irq_set_affinity(cmn->dtc[i].irq, cpumask_of(cpu));
	cmn->cpu = cpu;
}

static int arm_cmn_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
1936 1937
{
	struct arm_cmn *cmn;
1938
	int node;
1939

1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952
	cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node);
	node = dev_to_node(cmn->dev);
	if (node != NUMA_NO_NODE && cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node)
		arm_cmn_migrate(cmn, cpu);
	return 0;
}

static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
{
	struct arm_cmn *cmn;
	unsigned int target;
	int node;
	cpumask_t mask;
1953

1954 1955
	cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node);
	if (cpu != cmn->cpu)
1956 1957
		return 0;

1958 1959 1960 1961 1962 1963 1964 1965
	node = dev_to_node(cmn->dev);
	if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) &&
	    cpumask_andnot(&mask, &mask, cpumask_of(cpu)))
		target = cpumask_any(&mask);
	else
		target = cpumask_any_but(cpu_online_mask, cpu);
	if (target < nr_cpu_ids)
		arm_cmn_migrate(cmn, target);
1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978
	return 0;
}

static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id)
{
	struct arm_cmn_dtc *dtc = dev_id;
	irqreturn_t ret = IRQ_NONE;

	for (;;) {
		u32 status = readl_relaxed(dtc->base + CMN_DT_PMOVSR);
		u64 delta;
		int i;

1979
		for (i = 0; i < CMN_DT_NUM_COUNTERS; i++) {
1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013
			if (status & (1U << i)) {
				ret = IRQ_HANDLED;
				if (WARN_ON(!dtc->counters[i]))
					continue;
				delta = (u64)arm_cmn_read_counter(dtc, i) << 16;
				local64_add(delta, &dtc->counters[i]->count);
			}
		}

		if (status & (1U << CMN_DT_NUM_COUNTERS)) {
			ret = IRQ_HANDLED;
			if (dtc->cc_active && !WARN_ON(!dtc->cycles)) {
				delta = arm_cmn_read_cc(dtc);
				local64_add(delta, &dtc->cycles->count);
			}
		}

		writel_relaxed(status, dtc->base + CMN_DT_PMOVSR_CLR);

		if (!dtc->irq_friend)
			return ret;
		dtc += dtc->irq_friend;
	}
}

/* We can reasonably accommodate DTCs of the same CMN sharing IRQs */
static int arm_cmn_init_irqs(struct arm_cmn *cmn)
{
	int i, j, irq, err;

	for (i = 0; i < cmn->num_dtcs; i++) {
		irq = cmn->dtc[i].irq;
		for (j = i; j--; ) {
			if (cmn->dtc[j].irq == irq) {
2014
				cmn->dtc[j].irq_friend = i - j;
2015 2016 2017 2018 2019 2020 2021 2022 2023
				goto next;
			}
		}
		err = devm_request_irq(cmn->dev, irq, arm_cmn_handle_irq,
				       IRQF_NOBALANCING | IRQF_NO_THREAD,
				       dev_name(cmn->dev), &cmn->dtc[i]);
		if (err)
			return err;

2024
		err = irq_set_affinity(irq, cpumask_of(cmn->cpu));
2025 2026 2027 2028 2029 2030 2031 2032
		if (err)
			return err;
	next:
		; /* isn't C great? */
	}
	return 0;
}

2033
static void arm_cmn_init_dtm(struct arm_cmn_dtm *dtm, struct arm_cmn_node *xp, int idx)
2034 2035 2036
{
	int i;

2037
	dtm->base = xp->pmu_base + CMN_DTM_OFFSET(idx);
2038
	dtm->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN;
2039
	writeq_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG);
2040
	for (i = 0; i < 4; i++) {
2041 2042 2043
		dtm->wp_event[i] = -1;
		writeq_relaxed(0, dtm->base + CMN_DTM_WPn_MASK(i));
		writeq_relaxed(~0ULL, dtm->base + CMN_DTM_WPn_VAL(i));
2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055
	}
}

static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int idx)
{
	struct arm_cmn_dtc *dtc = cmn->dtc + idx;

	dtc->base = dn->pmu_base - CMN_PMU_OFFSET;
	dtc->irq = platform_get_irq(to_platform_device(cmn->dev), idx);
	if (dtc->irq < 0)
		return dtc->irq;

2056 2057 2058
	writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL);
	writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR);
	writeq_relaxed(0, dtc->base + CMN_DT_PMCCNTR);
2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
	writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR);

	return 0;
}

static int arm_cmn_node_cmp(const void *a, const void *b)
{
	const struct arm_cmn_node *dna = a, *dnb = b;
	int cmp;

	cmp = dna->type - dnb->type;
	if (!cmp)
		cmp = dna->logid - dnb->logid;
	return cmp;
}

static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
{
2077
	struct arm_cmn_node *dn, *xp;
2078 2079 2080 2081 2082 2083 2084 2085 2086 2087
	int dtc_idx = 0;

	cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL);
	if (!cmn->dtc)
		return -ENOMEM;

	sort(cmn->dns, cmn->num_dns, sizeof(cmn->dns[0]), arm_cmn_node_cmp, NULL);

	cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP);

2088 2089 2090 2091 2092 2093 2094
	if (cmn->part == PART_CMN600 && cmn->num_dtcs > 1) {
		/* We do at least know that a DTC's XP must be in that DTC's domain */
		dn = arm_cmn_node(cmn, CMN_TYPE_DTC);
		for (int i = 0; i < cmn->num_dtcs; i++)
			arm_cmn_node_to_xp(cmn, dn + i)->dtc = i;
	}

2095
	for (dn = cmn->dns; dn->type; dn++) {
2096
		if (dn->type == CMN_TYPE_XP)
2097
			continue;
2098

2099
		xp = arm_cmn_node_to_xp(cmn, dn);
2100
		dn->dtc = xp->dtc;
2101
		dn->dtm = xp->dtm;
2102 2103
		if (cmn->multi_dtm)
			dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2;
2104 2105

		if (dn->type == CMN_TYPE_DTC) {
2106 2107
			int err = arm_cmn_init_dtc(cmn, dn, dtc_idx++);

2108 2109 2110
			if (err)
				return err;
		}
2111 2112 2113 2114

		/* To the PMU, RN-Ds don't add anything over RN-Is, so smoosh them together */
		if (dn->type == CMN_TYPE_RND)
			dn->type = CMN_TYPE_RNI;
2115 2116 2117 2118

		/* We split the RN-I off already, so let the CCLA part match CCLA events */
		if (dn->type == CMN_TYPE_CCLA_RNI)
			dn->type = CMN_TYPE_CCLA;
2119 2120
	}

2121
	arm_cmn_set_state(cmn, CMN_STATE_DISABLED);
2122 2123 2124 2125

	return 0;
}

2126 2127 2128 2129 2130 2131 2132 2133 2134 2135
static unsigned int arm_cmn_dtc_domain(struct arm_cmn *cmn, void __iomem *xp_region)
{
	int offset = CMN_DTM_UNIT_INFO;

	if (cmn->part == PART_CMN650 || cmn->part == PART_CI700)
		offset = CMN650_DTM_UNIT_INFO;

	return FIELD_GET(CMN_DTM_UNIT_INFO_DTC_DOMAIN, readl_relaxed(xp_region + offset));
}

2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153
static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_cmn_node *node)
{
	int level;
	u64 reg = readq_relaxed(cmn->base + offset + CMN_NODE_INFO);

	node->type = FIELD_GET(CMN_NI_NODE_TYPE, reg);
	node->id = FIELD_GET(CMN_NI_NODE_ID, reg);
	node->logid = FIELD_GET(CMN_NI_LOGICAL_ID, reg);

	node->pmu_base = cmn->base + offset + CMN_PMU_OFFSET;

	if (node->type == CMN_TYPE_CFG)
		level = 0;
	else if (node->type == CMN_TYPE_XP)
		level = 1;
	else
		level = 2;

2154
	dev_dbg(cmn->dev, "node%*c%#06hx%*ctype:%-#6x id:%-4hd off:%#x\n",
2155 2156 2157 2158
			(level * 2) + 1, ' ', node->id, 5 - (level * 2), ' ',
			node->type, node->logid, offset);
}

2159 2160 2161 2162 2163
static enum cmn_node_type arm_cmn_subtype(enum cmn_node_type type)
{
	switch (type) {
	case CMN_TYPE_HNP:
		return CMN_TYPE_HNI;
2164 2165
	case CMN_TYPE_CCLA_RNI:
		return CMN_TYPE_RNI;
2166 2167 2168 2169 2170
	default:
		return CMN_TYPE_INVALID;
	}
}

2171 2172 2173 2174
static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
{
	void __iomem *cfg_region;
	struct arm_cmn_node cfg, *dn;
2175
	struct arm_cmn_dtm *dtm;
2176
	enum cmn_part part;
2177 2178 2179 2180
	u16 child_count, child_poff;
	u32 xp_offset[CMN_MAX_XPS];
	u64 reg;
	int i, j;
2181
	size_t sz;
2182 2183 2184 2185 2186

	arm_cmn_init_node_info(cmn, rgn_offset, &cfg);
	if (cfg.type != CMN_TYPE_CFG)
		return -ENODEV;

2187
	cfg_region = cmn->base + rgn_offset;
2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200

	reg = readq_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_01);
	part = FIELD_GET(CMN_CFGM_PID0_PART_0, reg);
	part |= FIELD_GET(CMN_CFGM_PID1_PART_1, reg) << 8;
	if (cmn->part && cmn->part != part)
		dev_warn(cmn->dev,
			 "Firmware binding mismatch: expected part number 0x%x, found 0x%x\n",
			 cmn->part, part);
	cmn->part = part;
	if (!arm_cmn_model(cmn))
		dev_warn(cmn->dev, "Unknown part number: 0x%x\n", part);

	reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_23);
2201 2202
	cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg);

2203 2204 2205 2206 2207
	reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL);
	cmn->multi_dtm = reg & CMN_INFO_MULTIPLE_DTM_EN;
	cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg);
	cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg);

2208 2209 2210 2211
	reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL_1);
	cmn->snp_vc_num = FIELD_GET(CMN_INFO_SNP_VC_NUM, reg);
	cmn->req_vc_num = FIELD_GET(CMN_INFO_REQ_VC_NUM, reg);

2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227
	reg = readq_relaxed(cfg_region + CMN_CHILD_INFO);
	child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
	child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg);

	cmn->num_xps = child_count;
	cmn->num_dns = cmn->num_xps;

	/* Pass 1: visit the XPs, enumerate their children */
	for (i = 0; i < cmn->num_xps; i++) {
		reg = readq_relaxed(cfg_region + child_poff + i * 8);
		xp_offset[i] = reg & CMN_CHILD_NODE_ADDR;

		reg = readq_relaxed(cmn->base + xp_offset[i] + CMN_CHILD_INFO);
		cmn->num_dns += FIELD_GET(CMN_CI_CHILD_COUNT, reg);
	}

2228 2229 2230 2231 2232 2233 2234
	/*
	 * Some nodes effectively have two separate types, which we'll handle
	 * by creating one of each internally. For a (very) safe initial upper
	 * bound, account for double the number of non-XP nodes.
	 */
	dn = devm_kcalloc(cmn->dev, cmn->num_dns * 2 - cmn->num_xps,
			  sizeof(*dn), GFP_KERNEL);
2235
	if (!dn)
2236 2237
		return -ENOMEM;

2238 2239 2240 2241 2242
	/* Initial safe upper bound on DTMs for any possible mesh layout */
	i = cmn->num_xps;
	if (cmn->multi_dtm)
		i += cmn->num_xps + 1;
	dtm = devm_kcalloc(cmn->dev, i, sizeof(*dtm), GFP_KERNEL);
2243 2244 2245
	if (!dtm)
		return -ENOMEM;

2246
	/* Pass 2: now we can actually populate the nodes */
2247
	cmn->dns = dn;
2248
	cmn->dtms = dtm;
2249 2250 2251
	for (i = 0; i < cmn->num_xps; i++) {
		void __iomem *xp_region = cmn->base + xp_offset[i];
		struct arm_cmn_node *xp = dn++;
2252
		unsigned int xp_ports = 0;
2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263

		arm_cmn_init_node_info(cmn, xp_offset[i], xp);
		/*
		 * Thanks to the order in which XP logical IDs seem to be
		 * assigned, we can handily infer the mesh X dimension by
		 * looking out for the XP at (0,1) without needing to know
		 * the exact node ID format, which we can later derive.
		 */
		if (xp->id == (1 << 3))
			cmn->mesh_x = xp->logid;

2264
		if (cmn->part == PART_CMN600)
2265
			xp->dtc = -1;
2266
		else
2267
			xp->dtc = arm_cmn_dtc_domain(cmn, xp_region);
2268

2269
		xp->dtm = dtm - cmn->dtms;
2270 2271 2272 2273 2274 2275 2276 2277
		arm_cmn_init_dtm(dtm++, xp, 0);
		/*
		 * Keeping track of connected ports will let us filter out
		 * unnecessary XP events easily. We can also reliably infer the
		 * "extra device ports" configuration for the node ID format
		 * from this, since in that case we will see at least one XP
		 * with port 2 connected, for the HN-D.
		 */
2278 2279 2280
		for (int p = 0; p < CMN_MAX_PORTS; p++)
			if (arm_cmn_device_connect_info(cmn, xp, p))
				xp_ports |= BIT(p);
2281 2282 2283 2284 2285 2286 2287

		if (cmn->multi_dtm && (xp_ports & 0xc))
			arm_cmn_init_dtm(dtm++, xp, 1);
		if (cmn->multi_dtm && (xp_ports & 0x30))
			arm_cmn_init_dtm(dtm++, xp, 2);

		cmn->ports_used |= xp_ports;
2288

2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322
		reg = readq_relaxed(xp_region + CMN_CHILD_INFO);
		child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
		child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg);

		for (j = 0; j < child_count; j++) {
			reg = readq_relaxed(xp_region + child_poff + j * 8);
			/*
			 * Don't even try to touch anything external, since in general
			 * we haven't a clue how to power up arbitrary CHI requesters.
			 * As of CMN-600r1 these could only be RN-SAMs or CXLAs,
			 * neither of which have any PMU events anyway.
			 * (Actually, CXLAs do seem to have grown some events in r1p2,
			 * but they don't go to regular XP DTMs, and they depend on
			 * secure configuration which we can't easily deal with)
			 */
			if (reg & CMN_CHILD_NODE_EXTERNAL) {
				dev_dbg(cmn->dev, "ignoring external node %llx\n", reg);
				continue;
			}

			arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn);

			switch (dn->type) {
			case CMN_TYPE_DTC:
				cmn->num_dtcs++;
				dn++;
				break;
			/* These guys have PMU events */
			case CMN_TYPE_DVM:
			case CMN_TYPE_HNI:
			case CMN_TYPE_HNF:
			case CMN_TYPE_SBSX:
			case CMN_TYPE_RNI:
			case CMN_TYPE_RND:
2323
			case CMN_TYPE_MTSX:
2324 2325
			case CMN_TYPE_CXRA:
			case CMN_TYPE_CXHA:
2326 2327 2328
			case CMN_TYPE_CCRA:
			case CMN_TYPE_CCHA:
			case CMN_TYPE_CCLA:
2329
			case CMN_TYPE_HNS:
2330 2331 2332
				dn++;
				break;
			/* Nothing to see here */
2333 2334
			case CMN_TYPE_MPAM_S:
			case CMN_TYPE_MPAM_NS:
2335 2336
			case CMN_TYPE_RNSAM:
			case CMN_TYPE_CXLA:
2337 2338
			case CMN_TYPE_HNS_MPAM_S:
			case CMN_TYPE_HNS_MPAM_NS:
2339
				break;
2340 2341 2342 2343 2344 2345 2346
			/*
			 * Split "optimised" combination nodes into separate
			 * types for the different event sets. Offsetting the
			 * base address lets us handle the second pmu_event_sel
			 * register via the normal mechanism later.
			 */
			case CMN_TYPE_HNP:
2347
			case CMN_TYPE_CCLA_RNI:
2348 2349 2350 2351 2352
				dn[1] = dn[0];
				dn[0].pmu_base += CMN_HNP_PMU_EVENT_SEL;
				dn[1].type = arm_cmn_subtype(dn->type);
				dn += 2;
				break;
2353 2354
			/* Something has gone horribly wrong */
			default:
2355
				dev_err(cmn->dev, "invalid device node type: 0x%x\n", dn->type);
2356 2357 2358 2359 2360
				return -ENODEV;
			}
		}
	}

2361
	/* Correct for any nodes we added or skipped */
2362 2363
	cmn->num_dns = dn - cmn->dns;

2364
	/* Cheeky +1 to help terminate pointer-based iteration later */
2365 2366 2367 2368 2369
	sz = (void *)(dn + 1) - (void *)cmn->dns;
	dn = devm_krealloc(cmn->dev, cmn->dns, sz, GFP_KERNEL);
	if (dn)
		cmn->dns = dn;

2370 2371 2372 2373 2374
	sz = (void *)dtm - (void *)cmn->dtms;
	dtm = devm_krealloc(cmn->dev, cmn->dtms, sz, GFP_KERNEL);
	if (dtm)
		cmn->dtms = dtm;

2375 2376 2377 2378 2379 2380 2381 2382
	/*
	 * If mesh_x wasn't set during discovery then we never saw
	 * an XP at (0,1), thus we must have an Nx1 configuration.
	 */
	if (!cmn->mesh_x)
		cmn->mesh_x = cmn->num_xps;
	cmn->mesh_y = cmn->num_xps / cmn->mesh_x;

2383 2384 2385 2386
	/* 1x1 config plays havoc with XP event encodings */
	if (cmn->num_xps == 1)
		dev_warn(cmn->dev, "1x1 config not fully supported, translate XP events manually\n");

2387
	dev_dbg(cmn->dev, "periph_id part 0x%03x revision %d\n", cmn->part, cmn->rev);
2388 2389 2390 2391
	reg = cmn->ports_used;
	dev_dbg(cmn->dev, "mesh %dx%d, ID width %d, ports %6pbl%s\n",
		cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn), &reg,
		cmn->multi_dtm ? ", multi-DTM" : "");
2392 2393 2394 2395

	return 0;
}

2396
static int arm_cmn600_acpi_probe(struct platform_device *pdev, struct arm_cmn *cmn)
2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422
{
	struct resource *cfg, *root;

	cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!cfg)
		return -EINVAL;

	root = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	if (!root)
		return -EINVAL;

	if (!resource_contains(cfg, root))
		swap(cfg, root);
	/*
	 * Note that devm_ioremap_resource() is dumb and won't let the platform
	 * device claim cfg when the ACPI companion device has already claimed
	 * root within it. But since they *are* already both claimed in the
	 * appropriate name, we don't really need to do it again here anyway.
	 */
	cmn->base = devm_ioremap(cmn->dev, cfg->start, resource_size(cfg));
	if (!cmn->base)
		return -ENOMEM;

	return root->start - cfg->start;
}

2423
static int arm_cmn600_of_probe(struct device_node *np)
2424 2425 2426
{
	u32 rootnode;

2427
	return of_property_read_u32(np, "arm,root-node", &rootnode) ?: rootnode;
2428 2429 2430 2431 2432 2433 2434
}

static int arm_cmn_probe(struct platform_device *pdev)
{
	struct arm_cmn *cmn;
	const char *name;
	static atomic_t id;
2435
	int err, rootnode, this_id;
2436 2437 2438 2439 2440 2441

	cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL);
	if (!cmn)
		return -ENOMEM;

	cmn->dev = &pdev->dev;
2442
	cmn->part = (unsigned long)device_get_match_data(cmn->dev);
2443 2444
	platform_set_drvdata(pdev, cmn);

2445
	if (cmn->part == PART_CMN600 && has_acpi_companion(cmn->dev)) {
2446 2447 2448 2449 2450 2451
		rootnode = arm_cmn600_acpi_probe(pdev, cmn);
	} else {
		rootnode = 0;
		cmn->base = devm_platform_ioremap_resource(pdev, 0);
		if (IS_ERR(cmn->base))
			return PTR_ERR(cmn->base);
2452
		if (cmn->part == PART_CMN600)
2453 2454
			rootnode = arm_cmn600_of_probe(pdev->dev.of_node);
	}
2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469
	if (rootnode < 0)
		return rootnode;

	err = arm_cmn_discover(cmn, rootnode);
	if (err)
		return err;

	err = arm_cmn_init_dtcs(cmn);
	if (err)
		return err;

	err = arm_cmn_init_irqs(cmn);
	if (err)
		return err;

2470
	cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev));
2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488
	cmn->pmu = (struct pmu) {
		.module = THIS_MODULE,
		.attr_groups = arm_cmn_attr_groups,
		.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
		.task_ctx_nr = perf_invalid_context,
		.pmu_enable = arm_cmn_pmu_enable,
		.pmu_disable = arm_cmn_pmu_disable,
		.event_init = arm_cmn_event_init,
		.add = arm_cmn_event_add,
		.del = arm_cmn_event_del,
		.start = arm_cmn_event_start,
		.stop = arm_cmn_event_stop,
		.read = arm_cmn_event_read,
		.start_txn = arm_cmn_start_txn,
		.commit_txn = arm_cmn_commit_txn,
		.cancel_txn = arm_cmn_end_txn,
	};

2489 2490
	this_id = atomic_fetch_inc(&id);
	name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", this_id);
2491 2492
	if (!name)
		return -ENOMEM;
2493 2494 2495 2496 2497 2498 2499

	err = cpuhp_state_add_instance(arm_cmn_hp_state, &cmn->cpuhp_node);
	if (err)
		return err;

	err = perf_pmu_register(&cmn->pmu, name, -1);
	if (err)
2500
		cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node);
2501 2502
	else
		arm_cmn_debugfs_init(cmn, this_id);
2503

2504 2505 2506 2507 2508 2509 2510 2511 2512 2513
	return err;
}

static int arm_cmn_remove(struct platform_device *pdev)
{
	struct arm_cmn *cmn = platform_get_drvdata(pdev);

	writel_relaxed(0, cmn->dtc[0].base + CMN_DT_DTC_CTL);

	perf_pmu_unregister(&cmn->pmu);
2514
	cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node);
2515
	debugfs_remove(cmn->debug);
2516 2517 2518 2519 2520
	return 0;
}

#ifdef CONFIG_OF
static const struct of_device_id arm_cmn_of_match[] = {
2521 2522 2523 2524
	{ .compatible = "arm,cmn-600", .data = (void *)PART_CMN600 },
	{ .compatible = "arm,cmn-650" },
	{ .compatible = "arm,cmn-700" },
	{ .compatible = "arm,ci-700" },
2525 2526 2527 2528 2529 2530 2531
	{}
};
MODULE_DEVICE_TABLE(of, arm_cmn_of_match);
#endif

#ifdef CONFIG_ACPI
static const struct acpi_device_id arm_cmn_acpi_match[] = {
2532 2533 2534
	{ "ARMHC600", PART_CMN600 },
	{ "ARMHC650" },
	{ "ARMHC700" },
2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554
	{}
};
MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match);
#endif

static struct platform_driver arm_cmn_driver = {
	.driver = {
		.name = "arm-cmn",
		.of_match_table = of_match_ptr(arm_cmn_of_match),
		.acpi_match_table = ACPI_PTR(arm_cmn_acpi_match),
	},
	.probe = arm_cmn_probe,
	.remove = arm_cmn_remove,
};

static int __init arm_cmn_init(void)
{
	int ret;

	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
2555 2556
				      "perf/arm/cmn:online",
				      arm_cmn_pmu_online_cpu,
2557 2558 2559 2560 2561
				      arm_cmn_pmu_offline_cpu);
	if (ret < 0)
		return ret;

	arm_cmn_hp_state = ret;
2562 2563
	arm_cmn_debugfs = debugfs_create_dir("arm-cmn", NULL);

2564
	ret = platform_driver_register(&arm_cmn_driver);
2565
	if (ret) {
2566
		cpuhp_remove_multi_state(arm_cmn_hp_state);
2567 2568
		debugfs_remove(arm_cmn_debugfs);
	}
2569 2570 2571 2572 2573 2574 2575
	return ret;
}

static void __exit arm_cmn_exit(void)
{
	platform_driver_unregister(&arm_cmn_driver);
	cpuhp_remove_multi_state(arm_cmn_hp_state);
2576
	debugfs_remove(arm_cmn_debugfs);
2577 2578 2579 2580 2581 2582 2583 2584
}

module_init(arm_cmn_init);
module_exit(arm_cmn_exit);

MODULE_AUTHOR("Robin Murphy <robin.murphy@arm.com>");
MODULE_DESCRIPTION("Arm CMN-600 PMU driver");
MODULE_LICENSE("GPL v2");