Commit ac1b01b0 authored by Patrik Jakobsson's avatar Patrik Jakobsson

drm/gma500: Give MMU code it's own header file

Signed-off-by: default avatarPatrik Jakobsson <patrik.r.jakobsson@gmail.com>
parent 64a4aff2
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "psb_drv.h" #include "psb_drv.h"
#include "psb_reg.h" #include "psb_reg.h"
#include "mmu.h"
/* /*
* Code for the SGX MMU: * Code for the SGX MMU:
...@@ -47,50 +48,6 @@ ...@@ -47,50 +48,6 @@
* but on average it should be fast. * but on average it should be fast.
*/ */
struct psb_mmu_driver {
/* protects driver- and pd structures. Always take in read mode
* before taking the page table spinlock.
*/
struct rw_semaphore sem;
/* protects page tables, directory tables and pt tables.
* and pt structures.
*/
spinlock_t lock;
atomic_t needs_tlbflush;
atomic_t *msvdx_mmu_invaldc;
struct psb_mmu_pd *default_pd;
uint32_t bif_ctrl;
int has_clflush;
int clflush_add;
unsigned long clflush_mask;
struct drm_device *dev;
};
struct psb_mmu_pd;
struct psb_mmu_pt {
struct psb_mmu_pd *pd;
uint32_t index;
uint32_t count;
struct page *p;
uint32_t *v;
};
struct psb_mmu_pd {
struct psb_mmu_driver *driver;
int hw_context;
struct psb_mmu_pt **tables;
struct page *p;
struct page *dummy_pt;
struct page *dummy_page;
uint32_t pd_mask;
uint32_t invalid_pde;
uint32_t invalid_pte;
};
static inline uint32_t psb_mmu_pt_index(uint32_t offset) static inline uint32_t psb_mmu_pt_index(uint32_t offset)
{ {
return (offset >> PSB_PTE_SHIFT) & 0x3FF; return (offset >> PSB_PTE_SHIFT) & 0x3FF;
......
/**************************************************************************
* Copyright (c) 2007-2011, Intel Corporation.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
**************************************************************************/
#ifndef __MMU_H
#define __MMU_H
struct psb_mmu_driver {
/* protects driver- and pd structures. Always take in read mode
* before taking the page table spinlock.
*/
struct rw_semaphore sem;
/* protects page tables, directory tables and pt tables.
* and pt structures.
*/
spinlock_t lock;
atomic_t needs_tlbflush;
atomic_t *msvdx_mmu_invaldc;
struct psb_mmu_pd *default_pd;
uint32_t bif_ctrl;
int has_clflush;
int clflush_add;
unsigned long clflush_mask;
struct drm_device *dev;
};
struct psb_mmu_pd;
struct psb_mmu_pt {
struct psb_mmu_pd *pd;
uint32_t index;
uint32_t count;
struct page *p;
uint32_t *v;
};
struct psb_mmu_pd {
struct psb_mmu_driver *driver;
int hw_context;
struct psb_mmu_pt **tables;
struct page *p;
struct page *dummy_pt;
struct page *dummy_page;
uint32_t pd_mask;
uint32_t invalid_pde;
uint32_t invalid_pte;
};
extern struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
int trap_pagefaults,
int invalid_type,
atomic_t *msvdx_mmu_invaldc);
extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
*driver);
extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
int trap_pagefaults,
int invalid_type);
extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
extern void psb_mmu_flush(struct psb_mmu_driver *driver);
extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
unsigned long address,
uint32_t num_pages);
extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
uint32_t start_pfn,
unsigned long address,
uint32_t num_pages, int type);
extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
unsigned long *pfn);
extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
unsigned long address, uint32_t num_pages,
uint32_t desired_tile_stride,
uint32_t hw_tile_stride, int type);
extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
unsigned long address, uint32_t num_pages,
uint32_t desired_tile_stride,
uint32_t hw_tile_stride);
#endif
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include "power.h" #include "power.h"
#include "opregion.h" #include "opregion.h"
#include "oaktrail.h" #include "oaktrail.h"
#include "mmu.h"
/* Append new drm mode definition here, align with libdrm definition */ /* Append new drm mode definition here, align with libdrm definition */
#define DRM_MODE_SCALE_NO_SCALE 2 #define DRM_MODE_SCALE_NO_SCALE 2
...@@ -713,8 +714,6 @@ struct psb_ops { ...@@ -713,8 +714,6 @@ struct psb_ops {
struct psb_mmu_driver;
extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int); extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
extern int drm_pick_crtcs(struct drm_device *dev); extern int drm_pick_crtcs(struct drm_device *dev);
...@@ -723,48 +722,6 @@ static inline struct drm_psb_private *psb_priv(struct drm_device *dev) ...@@ -723,48 +722,6 @@ static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
return (struct drm_psb_private *) dev->dev_private; return (struct drm_psb_private *) dev->dev_private;
} }
/*
* MMU stuff.
*/
extern struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
int trap_pagefaults,
int invalid_type,
atomic_t *msvdx_mmu_invaldc);
extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
*driver);
extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
uint32_t gtt_start, uint32_t gtt_pages);
extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
int trap_pagefaults,
int invalid_type);
extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
extern void psb_mmu_flush(struct psb_mmu_driver *driver);
extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
unsigned long address,
uint32_t num_pages);
extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
uint32_t start_pfn,
unsigned long address,
uint32_t num_pages, int type);
extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
unsigned long *pfn);
/*
* Enable / disable MMU for different requestors.
*/
extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
unsigned long address, uint32_t num_pages,
uint32_t desired_tile_stride,
uint32_t hw_tile_stride, int type);
extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
unsigned long address, uint32_t num_pages,
uint32_t desired_tile_stride,
uint32_t hw_tile_stride);
/* /*
*psb_irq.c *psb_irq.c
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment