pmb.c 11.1 KB
Newer Older
1 2 3 4 5
/*
 * arch/sh/mm/pmb.c
 *
 * Privileged Space Mapping Buffer (PMB) Support.
 *
Matt Fleming's avatar
Matt Fleming committed
6 7
 * Copyright (C) 2005 - 2010  Paul Mundt
 * Copyright (C) 2010  Matt Fleming
8 9 10 11 12 13 14
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/init.h>
#include <linux/kernel.h>
15 16
#include <linux/sysdev.h>
#include <linux/cpu.h>
17 18 19 20 21 22 23
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/err.h>
Paul Mundt's avatar
Paul Mundt committed
24 25
#include <linux/io.h>
#include <asm/sizes.h>
26 27
#include <asm/system.h>
#include <asm/uaccess.h>
Paul Mundt's avatar
Paul Mundt committed
28
#include <asm/pgtable.h>
29
#include <asm/page.h>
30
#include <asm/mmu.h>
31
#include <asm/mmu_context.h>
32

Paul Mundt's avatar
Paul Mundt committed
33
static void pmb_unmap_entry(struct pmb_entry *);
34

35
static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
Paul Mundt's avatar
Paul Mundt committed
36
static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
37

Paul Mundt's avatar
Paul Mundt committed
38
static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
39 40 41 42
{
	return (entry & PMB_E_MASK) << PMB_E_SHIFT;
}

Paul Mundt's avatar
Paul Mundt committed
43
static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
44 45 46 47
{
	return mk_pmb_entry(entry) | PMB_ADDR;
}

Paul Mundt's avatar
Paul Mundt committed
48
static __always_inline unsigned long mk_pmb_data(unsigned int entry)
49 50 51 52
{
	return mk_pmb_entry(entry) | PMB_DATA;
}

53 54 55 56 57
static int pmb_alloc_entry(void)
{
	unsigned int pos;

repeat:
Paul Mundt's avatar
Paul Mundt committed
58
	pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
59 60 61 62

	if (unlikely(pos > NR_PMB_ENTRIES))
		return -ENOSPC;

Paul Mundt's avatar
Paul Mundt committed
63
	if (test_and_set_bit(pos, pmb_map))
64 65 66 67 68
		goto repeat;

	return pos;
}

69
static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
70
				   unsigned long flags, int entry)
71 72
{
	struct pmb_entry *pmbe;
73 74
	int pos;

75 76 77 78 79
	if (entry == PMB_NO_ENTRY) {
		pos = pmb_alloc_entry();
		if (pos < 0)
			return ERR_PTR(pos);
	} else {
Paul Mundt's avatar
Paul Mundt committed
80
		if (test_and_set_bit(entry, pmb_map))
81 82 83
			return ERR_PTR(-ENOSPC);
		pos = entry;
	}
84

85
	pmbe = &pmb_entry_list[pos];
86 87 88 89 90 91
	if (!pmbe)
		return ERR_PTR(-ENOMEM);

	pmbe->vpn	= vpn;
	pmbe->ppn	= ppn;
	pmbe->flags	= flags;
92
	pmbe->entry	= pos;
93
	pmbe->size	= 0;
94 95 96 97

	return pmbe;
}

98
static void pmb_free(struct pmb_entry *pmbe)
99
{
100 101
	clear_bit(pmbe->entry, pmb_map);
	pmbe->entry = PMB_NO_ENTRY;
102 103 104
}

/*
Paul Mundt's avatar
Paul Mundt committed
105
 * Must be run uncached.
106
 */
Paul Mundt's avatar
Paul Mundt committed
107
static void set_pmb_entry(struct pmb_entry *pmbe)
108
{
Paul Mundt's avatar
Paul Mundt committed
109 110 111
	jump_to_uncached();

	__raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
112

113
#ifdef CONFIG_CACHE_WRITETHROUGH
114 115 116 117 118
	/*
	 * When we are in 32-bit address extended mode, CCR.CB becomes
	 * invalid, so care must be taken to manually adjust cacheable
	 * translations.
	 */
Paul Mundt's avatar
Paul Mundt committed
119 120
	if (likely(pmbe->flags & PMB_C))
		pmbe->flags |= PMB_WT;
121 122
#endif

Paul Mundt's avatar
Paul Mundt committed
123
	__raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
124

125
	back_to_cached();
126 127
}

128
static void clear_pmb_entry(struct pmb_entry *pmbe)
129 130 131 132
{
	unsigned int entry = pmbe->entry;
	unsigned long addr;

133
	jump_to_uncached();
134 135 136

	/* Clear V-bit */
	addr = mk_pmb_addr(entry);
137
	__raw_writel(__raw_readl(addr) & ~PMB_V, addr);
138 139

	addr = mk_pmb_data(entry);
140
	__raw_writel(__raw_readl(addr) & ~PMB_V, addr);
141

142
	back_to_cached();
143 144
}

Paul Mundt's avatar
Paul Mundt committed
145 146 147 148
static struct {
	unsigned long size;
	int flag;
} pmb_sizes[] = {
Paul Mundt's avatar
Paul Mundt committed
149 150 151 152
	{ .size	= SZ_512M, .flag = PMB_SZ_512M, },
	{ .size = SZ_128M, .flag = PMB_SZ_128M, },
	{ .size = SZ_64M,  .flag = PMB_SZ_64M,  },
	{ .size = SZ_16M,  .flag = PMB_SZ_16M,  },
Paul Mundt's avatar
Paul Mundt committed
153 154 155
};

long pmb_remap(unsigned long vaddr, unsigned long phys,
156
	       unsigned long size, pgprot_t prot)
Paul Mundt's avatar
Paul Mundt committed
157
{
158
	struct pmb_entry *pmbp, *pmbe;
Paul Mundt's avatar
Paul Mundt committed
159 160
	unsigned long wanted;
	int pmb_flags, i;
161
	long err;
162 163 164
	u64 flags;

	flags = pgprot_val(prot);
Paul Mundt's avatar
Paul Mundt committed
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182

	/* Convert typical pgprot value to the PMB equivalent */
	if (flags & _PAGE_CACHABLE) {
		if (flags & _PAGE_WT)
			pmb_flags = PMB_WT;
		else
			pmb_flags = PMB_C;
	} else
		pmb_flags = PMB_WT | PMB_UB;

	pmbp = NULL;
	wanted = size;

again:
	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
		if (size < pmb_sizes[i].size)
			continue;

183 184
		pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
				 PMB_NO_ENTRY);
185 186 187 188
		if (IS_ERR(pmbe)) {
			err = PTR_ERR(pmbe);
			goto out;
		}
Paul Mundt's avatar
Paul Mundt committed
189

190
		set_pmb_entry(pmbe);
Paul Mundt's avatar
Paul Mundt committed
191 192 193 194 195

		phys	+= pmb_sizes[i].size;
		vaddr	+= pmb_sizes[i].size;
		size	-= pmb_sizes[i].size;

196 197
		pmbe->size = pmb_sizes[i].size;

Paul Mundt's avatar
Paul Mundt committed
198 199 200 201 202 203 204 205
		/*
		 * Link adjacent entries that span multiple PMB entries
		 * for easier tear-down.
		 */
		if (likely(pmbp))
			pmbp->link = pmbe;

		pmbp = pmbe;
206 207 208 209 210 211 212

		/*
		 * Instead of trying smaller sizes on every iteration
		 * (even if we succeed in allocating space), try using
		 * pmb_sizes[i].size again.
		 */
		i--;
Paul Mundt's avatar
Paul Mundt committed
213 214 215 216 217 218
	}

	if (size >= 0x1000000)
		goto again;

	return wanted - size;
219 220

out:
Paul Mundt's avatar
Paul Mundt committed
221
	pmb_unmap_entry(pmbp);
222 223

	return err;
Paul Mundt's avatar
Paul Mundt committed
224 225 226 227
}

void pmb_unmap(unsigned long addr)
{
Paul Mundt's avatar
Paul Mundt committed
228
	struct pmb_entry *pmbe;
229
	int i;
Paul Mundt's avatar
Paul Mundt committed
230

231
	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
Paul Mundt's avatar
Paul Mundt committed
232
		if (test_bit(i, pmb_map)) {
233
			pmbe = &pmb_entry_list[i];
Paul Mundt's avatar
Paul Mundt committed
234 235
			if (pmbe->vpn == addr) {
				pmb_unmap_entry(pmbe);
236
				break;
Paul Mundt's avatar
Paul Mundt committed
237
			}
238 239
		}
	}
Paul Mundt's avatar
Paul Mundt committed
240
}
Paul Mundt's avatar
Paul Mundt committed
241

Paul Mundt's avatar
Paul Mundt committed
242 243
static void pmb_unmap_entry(struct pmb_entry *pmbe)
{
Paul Mundt's avatar
Paul Mundt committed
244 245 246
	if (unlikely(!pmbe))
		return;

Paul Mundt's avatar
Paul Mundt committed
247 248 249 250
	if (!test_bit(pmbe->entry, pmb_map)) {
		WARN_ON(1);
		return;
	}
Paul Mundt's avatar
Paul Mundt committed
251 252 253 254

	do {
		struct pmb_entry *pmblink = pmbe;

255 256 257 258 259 260 261 262 263 264 265
		/*
		 * We may be called before this pmb_entry has been
		 * entered into the PMB table via set_pmb_entry(), but
		 * that's OK because we've allocated a unique slot for
		 * this entry in pmb_alloc() (even if we haven't filled
		 * it yet).
		 *
		 * Therefore, calling clear_pmb_entry() is safe as no
		 * other mapping can be using that slot.
		 */
		clear_pmb_entry(pmbe);
266

Paul Mundt's avatar
Paul Mundt committed
267 268 269 270 271 272
		pmbe = pmblink->link;

		pmb_free(pmblink);
	} while (pmbe);
}

273
static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
Matt Fleming's avatar
Matt Fleming committed
274
{
275
	return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
Matt Fleming's avatar
Matt Fleming committed
276 277
}

278
static int pmb_synchronize_mappings(void)
279
{
Paul Mundt's avatar
Paul Mundt committed
280
	unsigned int applied = 0;
281 282
	struct pmb_entry *pmbp = NULL;
	int i, j;
283

284
	pr_info("PMB: boot mappings:\n");
285

Matt Fleming's avatar
Matt Fleming committed
286
	/*
287 288 289 290
	 * Run through the initial boot mappings, log the established
	 * ones, and blow away anything that falls outside of the valid
	 * PPN range. Specifically, we only care about existing mappings
	 * that impact the cached/uncached sections.
Matt Fleming's avatar
Matt Fleming committed
291
	 *
292 293 294 295 296
	 * Note that touching these can be a bit of a minefield; the boot
	 * loader can establish multi-page mappings with the same caching
	 * attributes, so we need to ensure that we aren't modifying a
	 * mapping that we're presently executing from, or may execute
	 * from in the case of straddling page boundaries.
Matt Fleming's avatar
Matt Fleming committed
297
	 *
298 299 300
	 * In the future we will have to tidy up after the boot loader by
	 * jumping between the cached and uncached mappings and tearing
	 * down alternating mappings while executing from the other.
Matt Fleming's avatar
Matt Fleming committed
301
	 */
Paul Mundt's avatar
Paul Mundt committed
302
	for (i = 0; i < NR_PMB_ENTRIES; i++) {
Matt Fleming's avatar
Matt Fleming committed
303 304
		unsigned long addr, data;
		unsigned long addr_val, data_val;
305
		unsigned long ppn, vpn, flags;
306
		unsigned int size;
307
		struct pmb_entry *pmbe;
308

Matt Fleming's avatar
Matt Fleming committed
309 310
		addr = mk_pmb_addr(i);
		data = mk_pmb_data(i);
311

Matt Fleming's avatar
Matt Fleming committed
312 313
		addr_val = __raw_readl(addr);
		data_val = __raw_readl(data);
314

Matt Fleming's avatar
Matt Fleming committed
315 316 317 318 319
		/*
		 * Skip over any bogus entries
		 */
		if (!(data_val & PMB_V) || !(addr_val & PMB_V))
			continue;
320

Matt Fleming's avatar
Matt Fleming committed
321 322
		ppn = data_val & PMB_PFN_MASK;
		vpn = addr_val & PMB_PFN_MASK;
Paul Mundt's avatar
Paul Mundt committed
323

Matt Fleming's avatar
Matt Fleming committed
324 325 326
		/*
		 * Only preserve in-range mappings.
		 */
327
		if (!pmb_ppn_in_range(ppn)) {
Matt Fleming's avatar
Matt Fleming committed
328 329 330 331 332
			/*
			 * Invalidate anything out of bounds.
			 */
			__raw_writel(addr_val & ~PMB_V, addr);
			__raw_writel(data_val & ~PMB_V, data);
333
			continue;
Matt Fleming's avatar
Matt Fleming committed
334
		}
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349

		/*
		 * Update the caching attributes if necessary
		 */
		if (data_val & PMB_C) {
#if defined(CONFIG_CACHE_WRITETHROUGH)
			data_val |= PMB_WT;
#elif defined(CONFIG_CACHE_WRITEBACK)
			data_val &= ~PMB_WT;
#else
			data_val &= ~(PMB_C | PMB_WT);
#endif
			__raw_writel(data_val, data);
		}

350 351
		size = data_val & PMB_SZ_MASK;
		flags = size | (data_val & PMB_CACHE_MASK);
352 353 354 355 356 357 358

		pmbe = pmb_alloc(vpn, ppn, flags, i);
		if (IS_ERR(pmbe)) {
			WARN_ON_ONCE(1);
			continue;
		}

359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
		for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
			if (pmb_sizes[j].flag == size)
				pmbe->size = pmb_sizes[j].size;

		/*
		 * Compare the previous entry against the current one to
		 * see if the entries span a contiguous mapping. If so,
		 * setup the entry links accordingly.
		 */
		if (pmbp && ((pmbe->vpn == (pmbp->vpn + pmbp->size)) &&
			     (pmbe->ppn == (pmbp->ppn + pmbp->size))))
			pmbp->link = pmbe;

		pmbp = pmbe;

		pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n",
			vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20,
			(data_val & PMB_C) ? "" : "un");
377 378

		applied++;
Paul Mundt's avatar
Paul Mundt committed
379 380 381 382 383
	}

	return (applied == 0);
}

384
int pmb_init(void)
Paul Mundt's avatar
Paul Mundt committed
385
{
386
	int ret;
Paul Mundt's avatar
Paul Mundt committed
387 388 389 390

	jump_to_uncached();

	/*
Matt Fleming's avatar
Matt Fleming committed
391 392 393
	 * Sync our software copy of the PMB mappings with those in
	 * hardware. The mappings in the hardware PMB were either set up
	 * by the bootloader or very early on by the kernel.
Paul Mundt's avatar
Paul Mundt committed
394
	 */
395 396 397 398
	ret = pmb_synchronize_mappings();
	if (unlikely(ret == 0)) {
		back_to_cached();
		return 0;
Matt Fleming's avatar
Matt Fleming committed
399 400
	}

401
	__raw_writel(0, PMB_IRMCR);
Paul Mundt's avatar
Paul Mundt committed
402 403

	/* Flush out the TLB */
404
	__raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
Paul Mundt's avatar
Paul Mundt committed
405

406 407 408 409
	back_to_cached();

	return 0;
}
410

411 412 413 414 415
bool __in_29bit_mode(void)
{
        return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
}

416 417 418 419 420 421 422 423 424 425 426 427 428
static int pmb_seq_show(struct seq_file *file, void *iter)
{
	int i;

	seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
			 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
	seq_printf(file, "ety   vpn  ppn  size   flags\n");

	for (i = 0; i < NR_PMB_ENTRIES; i++) {
		unsigned long addr, data;
		unsigned int size;
		char *sz_str = NULL;

429 430
		addr = __raw_readl(mk_pmb_addr(i));
		data = __raw_readl(mk_pmb_data(i));
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454

		size = data & PMB_SZ_MASK;
		sz_str = (size == PMB_SZ_16M)  ? " 16MB":
			 (size == PMB_SZ_64M)  ? " 64MB":
			 (size == PMB_SZ_128M) ? "128MB":
					         "512MB";

		/* 02: V 0x88 0x08 128MB C CB  B */
		seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
			   i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
			   (addr >> 24) & 0xff, (data >> 24) & 0xff,
			   sz_str, (data & PMB_C) ? 'C' : ' ',
			   (data & PMB_WT) ? "WT" : "CB",
			   (data & PMB_UB) ? "UB" : " B");
	}

	return 0;
}

static int pmb_debugfs_open(struct inode *inode, struct file *file)
{
	return single_open(file, pmb_seq_show, NULL);
}

455
static const struct file_operations pmb_debugfs_fops = {
456 457 458 459
	.owner		= THIS_MODULE,
	.open		= pmb_debugfs_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
Li Zefan's avatar
Li Zefan committed
460
	.release	= single_release,
461 462 463 464 465 466 467
};

static int __init pmb_debugfs_init(void)
{
	struct dentry *dentry;

	dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
Paul Mundt's avatar
Paul Mundt committed
468
				     sh_debugfs_root, NULL, &pmb_debugfs_fops);
469 470
	if (!dentry)
		return -ENOMEM;
471 472 473 474 475 476
	if (IS_ERR(dentry))
		return PTR_ERR(dentry);

	return 0;
}
postcore_initcall(pmb_debugfs_init);
477 478 479 480 481

#ifdef CONFIG_PM
static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
{
	static pm_message_t prev_state;
482
	int i;
483 484 485 486 487

	/* Restore the PMB after a resume from hibernation */
	if (state.event == PM_EVENT_ON &&
	    prev_state.event == PM_EVENT_FREEZE) {
		struct pmb_entry *pmbe;
488
		for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
Paul Mundt's avatar
Paul Mundt committed
489
			if (test_bit(i, pmb_map)) {
490 491 492 493
				pmbe = &pmb_entry_list[i];
				set_pmb_entry(pmbe);
			}
		}
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
	}
	prev_state = state;
	return 0;
}

static int pmb_sysdev_resume(struct sys_device *dev)
{
	return pmb_sysdev_suspend(dev, PMSG_ON);
}

static struct sysdev_driver pmb_sysdev_driver = {
	.suspend = pmb_sysdev_suspend,
	.resume = pmb_sysdev_resume,
};

static int __init pmb_sysdev_init(void)
{
	return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
}
subsys_initcall(pmb_sysdev_init);
#endif