Commit 55004938 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'gma500-next' of git://github.com/patjak/drm-gma500 into drm-next

Summary of what's included:

- SGX MMU support
- SGX IRQ handling (Page faults and blitter fences)
- Minor Cedarview and Poulsbo unification
- Work queue for ASLE interrupt work
- Various cleanups, style fixes and removal of dead code

* 'gma500-next' of git://github.com/patjak/drm-gma500:
  drm/gma500: remove stub .open/postclose
  drm/gma500: Code cleanup - inline documentation
  drm/gma500: Code cleanup - style fixes
  drm/gma500: Code cleanup - removal of centralized exiting of function
  drm/gma500/cdv: Cedarview display cleanups
  drm/gma500: Unify encoder mode fixup
  drm/gma500: Unify _get_core_freq for cdv and psb
  drm/gma500: Move asle interrupt work into a work task
  drm/gma500: Remove dead code
  drm/gma500: Add backing type and base align to psb_gem_create()
  drm/gma500: Remove unused ioctls
  drm/gma500: Always trap MMU page faults
  drm/gma500: Hook up the MMU
  drm/gma500: Add first piece of blitter code
  drm/gma500: Give MMU code it's own header file
  drm/gma500: Add support for SGX interrupts
  drm/gma500: Make SGX MMU driver actually do something
parents e84c20af 75144097
...@@ -13,9 +13,11 @@ gma500_gfx-y += \ ...@@ -13,9 +13,11 @@ gma500_gfx-y += \
intel_i2c.o \ intel_i2c.o \
intel_gmbus.o \ intel_gmbus.o \
mmu.o \ mmu.o \
blitter.o \
power.o \ power.o \
psb_drv.o \ psb_drv.o \
gma_display.o \ gma_display.o \
gma_device.o \
psb_intel_display.o \ psb_intel_display.o \
psb_intel_lvds.o \ psb_intel_lvds.o \
psb_intel_modes.o \ psb_intel_modes.o \
......
/*
* Copyright (c) 2014, Patrik Jakobsson
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* Authors: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
*/
#include "psb_drv.h"
#include "blitter.h"
#include "psb_reg.h"
/* Wait for the blitter to be completely idle */
int gma_blt_wait_idle(struct drm_psb_private *dev_priv)
{
unsigned long stop = jiffies + HZ;
int busy = 1;
/* NOP for Cedarview */
if (IS_CDV(dev_priv->dev))
return 0;
/* First do a quick check */
if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
return 0;
do {
busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
} while (busy && !time_after_eq(jiffies, stop));
if (busy)
return -EBUSY;
do {
busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
_PSB_C2B_STATUS_BUSY) != 0);
} while (busy && !time_after_eq(jiffies, stop));
/* If still busy, we probably have a hang */
return (busy) ? -EBUSY : 0;
}
/*
* Copyright (c) 2014, Patrik Jakobsson
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* Authors: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
*/
#ifndef __BLITTER_H
#define __BLITTER_H
extern int gma_blt_wait_idle(struct drm_psb_private *dev_priv);
#endif
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "psb_intel_reg.h" #include "psb_intel_reg.h"
#include "intel_bios.h" #include "intel_bios.h"
#include "cdv_device.h" #include "cdv_device.h"
#include "gma_device.h"
#define VGA_SR_INDEX 0x3c4 #define VGA_SR_INDEX 0x3c4
#define VGA_SR_DATA 0x3c5 #define VGA_SR_DATA 0x3c5
...@@ -426,43 +427,6 @@ static int cdv_power_up(struct drm_device *dev) ...@@ -426,43 +427,6 @@ static int cdv_power_up(struct drm_device *dev)
return 0; return 0;
} }
/* FIXME ? - shared with Poulsbo */
static void cdv_get_core_freq(struct drm_device *dev)
{
uint32_t clock;
struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
struct drm_psb_private *dev_priv = dev->dev_private;
pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
pci_read_config_dword(pci_root, 0xD4, &clock);
pci_dev_put(pci_root);
switch (clock & 0x07) {
case 0:
dev_priv->core_freq = 100;
break;
case 1:
dev_priv->core_freq = 133;
break;
case 2:
dev_priv->core_freq = 150;
break;
case 3:
dev_priv->core_freq = 178;
break;
case 4:
dev_priv->core_freq = 200;
break;
case 5:
case 6:
case 7:
dev_priv->core_freq = 266;
break;
default:
dev_priv->core_freq = 0;
}
}
static void cdv_hotplug_work_func(struct work_struct *work) static void cdv_hotplug_work_func(struct work_struct *work)
{ {
struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private, struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
...@@ -618,7 +582,7 @@ static int cdv_chip_setup(struct drm_device *dev) ...@@ -618,7 +582,7 @@ static int cdv_chip_setup(struct drm_device *dev)
if (pci_enable_msi(dev->pdev)) if (pci_enable_msi(dev->pdev))
dev_warn(dev->dev, "Enabling MSI failed!\n"); dev_warn(dev->dev, "Enabling MSI failed!\n");
dev_priv->regmap = cdv_regmap; dev_priv->regmap = cdv_regmap;
cdv_get_core_freq(dev); gma_get_core_freq(dev);
psb_intel_opregion_init(dev); psb_intel_opregion_init(dev);
psb_intel_init_bios(dev); psb_intel_init_bios(dev);
cdv_hotplug_enable(dev, false); cdv_hotplug_enable(dev, false);
......
...@@ -81,13 +81,6 @@ static int cdv_intel_crt_mode_valid(struct drm_connector *connector, ...@@ -81,13 +81,6 @@ static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
return MODE_OK; return MODE_OK;
} }
static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static void cdv_intel_crt_mode_set(struct drm_encoder *encoder, static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
...@@ -224,7 +217,7 @@ static int cdv_intel_crt_set_property(struct drm_connector *connector, ...@@ -224,7 +217,7 @@ static int cdv_intel_crt_set_property(struct drm_connector *connector,
static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = { static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
.dpms = cdv_intel_crt_dpms, .dpms = cdv_intel_crt_dpms,
.mode_fixup = cdv_intel_crt_mode_fixup, .mode_fixup = gma_encoder_mode_fixup,
.prepare = gma_encoder_prepare, .prepare = gma_encoder_prepare,
.commit = gma_encoder_commit, .commit = gma_encoder_commit,
.mode_set = cdv_intel_crt_mode_set, .mode_set = cdv_intel_crt_mode_set,
......
...@@ -412,8 +412,11 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit, ...@@ -412,8 +412,11 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
int refclk, int refclk,
struct gma_clock_t *best_clock) struct gma_clock_t *best_clock)
{ {
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct gma_clock_t clock; struct gma_clock_t clock;
if (refclk == 27000) {
switch (refclk) {
case 27000:
if (target < 200000) { if (target < 200000) {
clock.p1 = 2; clock.p1 = 2;
clock.p2 = 10; clock.p2 = 10;
...@@ -427,7 +430,9 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit, ...@@ -427,7 +430,9 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
clock.m1 = 0; clock.m1 = 0;
clock.m2 = 98; clock.m2 = 98;
} }
} else if (refclk == 100000) { break;
case 100000:
if (target < 200000) { if (target < 200000) {
clock.p1 = 2; clock.p1 = 2;
clock.p2 = 10; clock.p2 = 10;
...@@ -441,12 +446,13 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit, ...@@ -441,12 +446,13 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
clock.m1 = 0; clock.m1 = 0;
clock.m2 = 133; clock.m2 = 133;
} }
} else break;
default:
return false; return false;
clock.m = clock.m2 + 2; }
clock.p = clock.p1 * clock.p2;
clock.vco = (refclk * clock.m) / clock.n; gma_crtc->clock_funcs->clock(refclk, &clock);
clock.dot = clock.vco / clock.p;
memcpy(best_clock, &clock, sizeof(struct gma_clock_t)); memcpy(best_clock, &clock, sizeof(struct gma_clock_t));
return true; return true;
} }
...@@ -468,49 +474,6 @@ static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe) ...@@ -468,49 +474,6 @@ static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
return true; return true;
} }
static bool cdv_intel_single_pipe_active (struct drm_device *dev)
{
uint32_t pipe_enabled = 0;
if (cdv_intel_pipe_enabled(dev, 0))
pipe_enabled |= FIFO_PIPEA;
if (cdv_intel_pipe_enabled(dev, 1))
pipe_enabled |= FIFO_PIPEB;
DRM_DEBUG_KMS("pipe enabled %x\n", pipe_enabled);
if (pipe_enabled == FIFO_PIPEA || pipe_enabled == FIFO_PIPEB)
return true;
else
return false;
}
static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
if (gma_crtc->pipe != 1)
return false;
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct gma_encoder *gma_encoder =
gma_attached_encoder(connector);
if (!connector->encoder
|| connector->encoder->crtc != crtc)
continue;
if (gma_encoder->type == INTEL_OUTPUT_LVDS)
return true;
}
return false;
}
void cdv_disable_sr(struct drm_device *dev) void cdv_disable_sr(struct drm_device *dev)
{ {
if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) { if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) {
...@@ -535,8 +498,10 @@ void cdv_disable_sr(struct drm_device *dev) ...@@ -535,8 +498,10 @@ void cdv_disable_sr(struct drm_device *dev)
void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc) void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
{ {
struct drm_psb_private *dev_priv = dev->dev_private; struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
if (cdv_intel_single_pipe_active(dev)) { /* Is only one pipe enabled? */
if (cdv_intel_pipe_enabled(dev, 0) ^ cdv_intel_pipe_enabled(dev, 1)) {
u32 fw; u32 fw;
fw = REG_READ(DSPFW1); fw = REG_READ(DSPFW1);
...@@ -557,7 +522,9 @@ void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc) ...@@ -557,7 +522,9 @@ void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
/* ignore FW4 */ /* ignore FW4 */
if (is_pipeb_lvds(dev, crtc)) { /* Is pipe b lvds ? */
if (gma_crtc->pipe == 1 &&
gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
REG_WRITE(DSPFW5, 0x00040330); REG_WRITE(DSPFW5, 0x00040330);
} else { } else {
fw = (3 << DSP_PLANE_B_FIFO_WM1_SHIFT) | fw = (3 << DSP_PLANE_B_FIFO_WM1_SHIFT) |
......
...@@ -89,13 +89,6 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder, ...@@ -89,13 +89,6 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
REG_READ(hdmi_priv->hdmi_reg); REG_READ(hdmi_priv->hdmi_reg);
} }
static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode) static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
...@@ -262,7 +255,7 @@ static void cdv_hdmi_destroy(struct drm_connector *connector) ...@@ -262,7 +255,7 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = { static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
.dpms = cdv_hdmi_dpms, .dpms = cdv_hdmi_dpms,
.mode_fixup = cdv_hdmi_mode_fixup, .mode_fixup = gma_encoder_mode_fixup,
.prepare = gma_encoder_prepare, .prepare = gma_encoder_prepare,
.mode_set = cdv_hdmi_mode_set, .mode_set = cdv_hdmi_mode_set,
.commit = gma_encoder_commit, .commit = gma_encoder_commit,
......
...@@ -319,7 +319,7 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size) ...@@ -319,7 +319,7 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
{ {
struct gtt_range *backing; struct gtt_range *backing;
/* Begin by trying to use stolen memory backing */ /* Begin by trying to use stolen memory backing */
backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1); backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
if (backing) { if (backing) {
drm_gem_private_object_init(dev, &backing->gem, aligned_size); drm_gem_private_object_init(dev, &backing->gem, aligned_size);
return backing; return backing;
......
...@@ -62,9 +62,6 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, ...@@ -62,9 +62,6 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
int ret = 0; int ret = 0;
struct drm_gem_object *obj; struct drm_gem_object *obj;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
/* GEM does all our handle to object mapping */ /* GEM does all our handle to object mapping */
...@@ -98,8 +95,8 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, ...@@ -98,8 +95,8 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
* it so that userspace can speak about it. This does the core work * it so that userspace can speak about it. This does the core work
* for the various methods that do/will create GEM objects for things * for the various methods that do/will create GEM objects for things
*/ */
static int psb_gem_create(struct drm_file *file, int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
struct drm_device *dev, uint64_t size, uint32_t *handlep) u32 *handlep, int stolen, u32 align)
{ {
struct gtt_range *r; struct gtt_range *r;
int ret; int ret;
...@@ -109,7 +106,7 @@ static int psb_gem_create(struct drm_file *file, ...@@ -109,7 +106,7 @@ static int psb_gem_create(struct drm_file *file,
/* Allocate our object - for now a direct gtt range which is not /* Allocate our object - for now a direct gtt range which is not
stolen memory backed */ stolen memory backed */
r = psb_gtt_alloc_range(dev, size, "gem", 0); r = psb_gtt_alloc_range(dev, size, "gem", 0, PAGE_SIZE);
if (r == NULL) { if (r == NULL) {
dev_err(dev->dev, "no memory for %lld byte GEM object\n", size); dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
return -ENOSPC; return -ENOSPC;
...@@ -153,7 +150,8 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, ...@@ -153,7 +150,8 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
{ {
args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
args->size = args->pitch * args->height; args->size = args->pitch * args->height;
return psb_gem_create(file, dev, args->size, &args->handle); return psb_gem_create(file, dev, args->size, &args->handle, 0,
PAGE_SIZE);
} }
/** /**
...@@ -229,47 +227,3 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -229,47 +227,3 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
} }
static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
int size, u32 *handle)
{
struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
if (gtt == NULL)
return -ENOMEM;
drm_gem_private_object_init(dev, &gtt->gem, size);
if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
return 0;
drm_gem_object_release(&gtt->gem);
psb_gtt_free_range(dev, gtt);
return -ENOMEM;
}
/*
* GEM interfaces for our specific client
*/
int psb_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_psb_gem_create *args = data;
int ret;
if (args->flags & GMA_GEM_CREATE_STOLEN) {
ret = psb_gem_create_stolen(file, dev, args->size,
&args->handle);
if (ret == 0)
return 0;
/* Fall throguh */
args->flags &= ~GMA_GEM_CREATE_STOLEN;
}
return psb_gem_create(file, dev, args->size, &args->handle);
}
int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_psb_gem_mmap *args = data;
return dev->driver->dumb_map_offset(file, dev,
args->handle, &args->offset);
}
/**************************************************************************
* Copyright (c) 2014 Patrik Jakobsson
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
**************************************************************************/
#ifndef _GEM_H
#define _GEM_H
extern int psb_gem_create(struct drm_file *file, struct drm_device *dev,
u64 size, u32 *handlep, int stolen, u32 align);
#endif
/**************************************************************************
* Copyright (c) 2011, Intel Corporation.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
**************************************************************************/
#include <drm/drmP.h>
#include "psb_drv.h"
void gma_get_core_freq(struct drm_device *dev)
{
uint32_t clock;
struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
struct drm_psb_private *dev_priv = dev->dev_private;
/*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
/*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
pci_read_config_dword(pci_root, 0xD4, &clock);
pci_dev_put(pci_root);
switch (clock & 0x07) {
case 0:
dev_priv->core_freq = 100;
break;
case 1:
dev_priv->core_freq = 133;
break;
case 2:
dev_priv->core_freq = 150;
break;
case 3:
dev_priv->core_freq = 178;
break;
case 4:
dev_priv->core_freq = 200;
break;
case 5:
case 6:
case 7:
dev_priv->core_freq = 266;
break;
default:
dev_priv->core_freq = 0;
}
}
/**************************************************************************
* Copyright (c) 2011, Intel Corporation.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
**************************************************************************/
#ifndef _GMA_DEVICE_H
#define _GMA_DEVICE_H
extern void gma_get_core_freq(struct drm_device *dev);
#endif
...@@ -485,6 +485,13 @@ int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) ...@@ -485,6 +485,13 @@ int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return 0; return 0;
} }
bool gma_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
bool gma_crtc_mode_fixup(struct drm_crtc *crtc, bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
......
...@@ -90,6 +90,9 @@ extern void gma_crtc_restore(struct drm_crtc *crtc); ...@@ -90,6 +90,9 @@ extern void gma_crtc_restore(struct drm_crtc *crtc);
extern void gma_encoder_prepare(struct drm_encoder *encoder); extern void gma_encoder_prepare(struct drm_encoder *encoder);
extern void gma_encoder_commit(struct drm_encoder *encoder); extern void gma_encoder_commit(struct drm_encoder *encoder);
extern void gma_encoder_destroy(struct drm_encoder *encoder); extern void gma_encoder_destroy(struct drm_encoder *encoder);
extern bool gma_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/* Common clock related functions */ /* Common clock related functions */
extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk); extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk);
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include <linux/shmem_fs.h> #include <linux/shmem_fs.h>
#include "psb_drv.h" #include "psb_drv.h"
#include "blitter.h"
/* /*
...@@ -105,11 +106,13 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r, ...@@ -105,11 +106,13 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
/* Write our page entries into the GTT itself */ /* Write our page entries into the GTT itself */
for (i = r->roll; i < r->npage; i++) { for (i = r->roll; i < r->npage; i++) {
pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
PSB_MMU_CACHED_MEMORY);
iowrite32(pte, gtt_slot++); iowrite32(pte, gtt_slot++);
} }
for (i = 0; i < r->roll; i++) { for (i = 0; i < r->roll; i++) {
pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
PSB_MMU_CACHED_MEMORY);
iowrite32(pte, gtt_slot++); iowrite32(pte, gtt_slot++);
} }
/* Make sure all the entries are set before we return */ /* Make sure all the entries are set before we return */
...@@ -127,7 +130,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r, ...@@ -127,7 +130,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
* page table entries with the dummy page. This is protected via the gtt * page table entries with the dummy page. This is protected via the gtt
* mutex which the caller must hold. * mutex which the caller must hold.
*/ */
static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
{ {
struct drm_psb_private *dev_priv = dev->dev_private; struct drm_psb_private *dev_priv = dev->dev_private;
u32 __iomem *gtt_slot; u32 __iomem *gtt_slot;
...@@ -137,7 +140,8 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) ...@@ -137,7 +140,8 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
WARN_ON(r->stolen); WARN_ON(r->stolen);
gtt_slot = psb_gtt_entry(dev, r); gtt_slot = psb_gtt_entry(dev, r);
pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0); pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page),
PSB_MMU_CACHED_MEMORY);
for (i = 0; i < r->npage; i++) for (i = 0; i < r->npage; i++)
iowrite32(pte, gtt_slot++); iowrite32(pte, gtt_slot++);
...@@ -176,11 +180,13 @@ void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll) ...@@ -176,11 +180,13 @@ void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
gtt_slot = psb_gtt_entry(dev, r); gtt_slot = psb_gtt_entry(dev, r);
for (i = r->roll; i < r->npage; i++) { for (i = r->roll; i < r->npage; i++) {
pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
PSB_MMU_CACHED_MEMORY);
iowrite32(pte, gtt_slot++); iowrite32(pte, gtt_slot++);
} }
for (i = 0; i < r->roll; i++) { for (i = 0; i < r->roll; i++) {
pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
PSB_MMU_CACHED_MEMORY);
iowrite32(pte, gtt_slot++); iowrite32(pte, gtt_slot++);
} }
ioread32(gtt_slot - 1); ioread32(gtt_slot - 1);
...@@ -240,6 +246,7 @@ int psb_gtt_pin(struct gtt_range *gt) ...@@ -240,6 +246,7 @@ int psb_gtt_pin(struct gtt_range *gt)
int ret = 0; int ret = 0;
struct drm_device *dev = gt->gem.dev; struct drm_device *dev = gt->gem.dev;
struct drm_psb_private *dev_priv = dev->dev_private; struct drm_psb_private *dev_priv = dev->dev_private;
u32 gpu_base = dev_priv->gtt.gatt_start;
mutex_lock(&dev_priv->gtt_mutex); mutex_lock(&dev_priv->gtt_mutex);
...@@ -252,6 +259,9 @@ int psb_gtt_pin(struct gtt_range *gt) ...@@ -252,6 +259,9 @@ int psb_gtt_pin(struct gtt_range *gt)
psb_gtt_detach_pages(gt); psb_gtt_detach_pages(gt);
goto out; goto out;
} }
psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
gt->pages, (gpu_base + gt->offset),
gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY);
} }
gt->in_gart++; gt->in_gart++;
out: out:
...@@ -274,16 +284,30 @@ void psb_gtt_unpin(struct gtt_range *gt) ...@@ -274,16 +284,30 @@ void psb_gtt_unpin(struct gtt_range *gt)
{ {
struct drm_device *dev = gt->gem.dev; struct drm_device *dev = gt->gem.dev;
struct drm_psb_private *dev_priv = dev->dev_private; struct drm_psb_private *dev_priv = dev->dev_private;
u32 gpu_base = dev_priv->gtt.gatt_start;
int ret;
/* While holding the gtt_mutex no new blits can be initiated */
mutex_lock(&dev_priv->gtt_mutex); mutex_lock(&dev_priv->gtt_mutex);
/* Wait for any possible usage of the memory to be finished */
ret = gma_blt_wait_idle(dev_priv);
if (ret) {
DRM_ERROR("Failed to idle the blitter, unpin failed!");
goto out;
}
WARN_ON(!gt->in_gart); WARN_ON(!gt->in_gart);
gt->in_gart--; gt->in_gart--;
if (gt->in_gart == 0 && gt->stolen == 0) { if (gt->in_gart == 0 && gt->stolen == 0) {
psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
(gpu_base + gt->offset), gt->npage, 0, 0);
psb_gtt_remove(dev, gt); psb_gtt_remove(dev, gt);
psb_gtt_detach_pages(gt); psb_gtt_detach_pages(gt);
} }
out:
mutex_unlock(&dev_priv->gtt_mutex); mutex_unlock(&dev_priv->gtt_mutex);
} }
...@@ -306,7 +330,7 @@ void psb_gtt_unpin(struct gtt_range *gt) ...@@ -306,7 +330,7 @@ void psb_gtt_unpin(struct gtt_range *gt)
* as in use. * as in use.
*/ */
struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len, struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
const char *name, int backed) const char *name, int backed, u32 align)
{ {
struct drm_psb_private *dev_priv = dev->dev_private; struct drm_psb_private *dev_priv = dev->dev_private;
struct gtt_range *gt; struct gtt_range *gt;
...@@ -334,7 +358,7 @@ struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len, ...@@ -334,7 +358,7 @@ struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
/* Ensure this is set for non GEM objects */ /* Ensure this is set for non GEM objects */
gt->gem.dev = dev; gt->gem.dev = dev;
ret = allocate_resource(dev_priv->gtt_mem, &gt->resource, ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
len, start, end, PAGE_SIZE, NULL, NULL); len, start, end, align, NULL, NULL);
if (ret == 0) { if (ret == 0) {
gt->offset = gt->resource.start - r->start; gt->offset = gt->resource.start - r->start;
return gt; return gt;
...@@ -497,6 +521,7 @@ int psb_gtt_init(struct drm_device *dev, int resume) ...@@ -497,6 +521,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
if (!resume) if (!resume)
dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
stolen_size); stolen_size);
if (!dev_priv->vram_addr) { if (!dev_priv->vram_addr) {
dev_err(dev->dev, "Failure to map stolen base.\n"); dev_err(dev->dev, "Failure to map stolen base.\n");
ret = -ENOMEM; ret = -ENOMEM;
...@@ -512,7 +537,7 @@ int psb_gtt_init(struct drm_device *dev, int resume) ...@@ -512,7 +537,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n", dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
num_pages, pfn_base << PAGE_SHIFT, 0); num_pages, pfn_base << PAGE_SHIFT, 0);
for (i = 0; i < num_pages; ++i) { for (i = 0; i < num_pages; ++i) {
pte = psb_gtt_mask_pte(pfn_base + i, 0); pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
iowrite32(pte, dev_priv->gtt_map + i); iowrite32(pte, dev_priv->gtt_map + i);
} }
...@@ -521,7 +546,7 @@ int psb_gtt_init(struct drm_device *dev, int resume) ...@@ -521,7 +546,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
*/ */
pfn_base = page_to_pfn(dev_priv->scratch_page); pfn_base = page_to_pfn(dev_priv->scratch_page);
pte = psb_gtt_mask_pte(pfn_base, 0); pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY);
for (; i < gtt_pages; ++i) for (; i < gtt_pages; ++i)
iowrite32(pte, dev_priv->gtt_map + i); iowrite32(pte, dev_priv->gtt_map + i);
......
...@@ -53,7 +53,8 @@ struct gtt_range { ...@@ -53,7 +53,8 @@ struct gtt_range {
}; };
extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len, extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
const char *name, int backed); const char *name, int backed,
u32 align);
extern void psb_gtt_kref_put(struct gtt_range *gt); extern void psb_gtt_kref_put(struct gtt_range *gt);
extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt); extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt);
extern int psb_gtt_pin(struct gtt_range *gt); extern int psb_gtt_pin(struct gtt_range *gt);
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "psb_drv.h" #include "psb_drv.h"
#include "psb_reg.h" #include "psb_reg.h"
#include "mmu.h"
/* /*
* Code for the SGX MMU: * Code for the SGX MMU:
...@@ -47,51 +48,6 @@ ...@@ -47,51 +48,6 @@
* but on average it should be fast. * but on average it should be fast.
*/ */
struct psb_mmu_driver {
/* protects driver- and pd structures. Always take in read mode
* before taking the page table spinlock.
*/
struct rw_semaphore sem;
/* protects page tables, directory tables and pt tables.
* and pt structures.
*/
spinlock_t lock;
atomic_t needs_tlbflush;
uint8_t __iomem *register_map;
struct psb_mmu_pd *default_pd;
/*uint32_t bif_ctrl;*/
int has_clflush;
int clflush_add;
unsigned long clflush_mask;
struct drm_psb_private *dev_priv;
};
struct psb_mmu_pd;
struct psb_mmu_pt {
struct psb_mmu_pd *pd;
uint32_t index;
uint32_t count;
struct page *p;
uint32_t *v;
};
struct psb_mmu_pd {
struct psb_mmu_driver *driver;
int hw_context;
struct psb_mmu_pt **tables;
struct page *p;
struct page *dummy_pt;
struct page *dummy_page;
uint32_t pd_mask;
uint32_t invalid_pde;
uint32_t invalid_pte;
};
static inline uint32_t psb_mmu_pt_index(uint32_t offset) static inline uint32_t psb_mmu_pt_index(uint32_t offset)
{ {
return (offset >> PSB_PTE_SHIFT) & 0x3FF; return (offset >> PSB_PTE_SHIFT) & 0x3FF;
...@@ -102,13 +58,13 @@ static inline uint32_t psb_mmu_pd_index(uint32_t offset) ...@@ -102,13 +58,13 @@ static inline uint32_t psb_mmu_pd_index(uint32_t offset)
return offset >> PSB_PDE_SHIFT; return offset >> PSB_PDE_SHIFT;
} }
#if defined(CONFIG_X86)
static inline void psb_clflush(void *addr) static inline void psb_clflush(void *addr)
{ {
__asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory"); __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
} }
static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
void *addr)
{ {
if (!driver->has_clflush) if (!driver->has_clflush)
return; return;
...@@ -117,62 +73,77 @@ static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, ...@@ -117,62 +73,77 @@ static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
psb_clflush(addr); psb_clflush(addr);
mb(); mb();
} }
#else
static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page) static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
{ {;
uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
uint32_t clflush_count = PAGE_SIZE / clflush_add;
int i;
uint8_t *clf;
clf = kmap_atomic(page);
mb();
for (i = 0; i < clflush_count; ++i) {
psb_clflush(clf);
clf += clflush_add;
}
mb();
kunmap_atomic(clf);
} }
static void psb_pages_clflush(struct psb_mmu_driver *driver, #endif
struct page *page[], unsigned long num_pages)
static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
{ {
int i; struct drm_device *dev = driver->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
if (!driver->has_clflush) if (atomic_read(&driver->needs_tlbflush) || force) {
return ; uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
for (i = 0; i < num_pages; i++) /* Make sure data cache is turned off before enabling it */
psb_page_clflush(driver, *page++); wmb();
} PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, if (driver->msvdx_mmu_invaldc)
int force) atomic_set(driver->msvdx_mmu_invaldc, 1);
{ }
atomic_set(&driver->needs_tlbflush, 0); atomic_set(&driver->needs_tlbflush, 0);
} }
#if 0
static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force) static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
{ {
down_write(&driver->sem); down_write(&driver->sem);
psb_mmu_flush_pd_locked(driver, force); psb_mmu_flush_pd_locked(driver, force);
up_write(&driver->sem); up_write(&driver->sem);
} }
#endif
void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot) void psb_mmu_flush(struct psb_mmu_driver *driver)
{ {
if (rc_prot) struct drm_device *dev = driver->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
uint32_t val;
down_write(&driver->sem); down_write(&driver->sem);
if (rc_prot) val = PSB_RSGX32(PSB_CR_BIF_CTRL);
if (atomic_read(&driver->needs_tlbflush))
PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
else
PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
/* Make sure data cache is turned off and MMU is flushed before
restoring bank interface control register */
wmb();
PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
PSB_CR_BIF_CTRL);
(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
atomic_set(&driver->needs_tlbflush, 0);
if (driver->msvdx_mmu_invaldc)
atomic_set(driver->msvdx_mmu_invaldc, 1);
up_write(&driver->sem); up_write(&driver->sem);
} }
void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context) void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
{ {
/*ttm_tt_cache_flush(&pd->p, 1);*/ struct drm_device *dev = pd->driver->dev;
psb_pages_clflush(pd->driver, &pd->p, 1); struct drm_psb_private *dev_priv = dev->dev_private;
uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
down_write(&pd->driver->sem); down_write(&pd->driver->sem);
PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
wmb(); wmb();
psb_mmu_flush_pd_locked(pd->driver, 1); psb_mmu_flush_pd_locked(pd->driver, 1);
pd->hw_context = hw_context; pd->hw_context = hw_context;
...@@ -183,7 +154,6 @@ void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context) ...@@ -183,7 +154,6 @@ void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
static inline unsigned long psb_pd_addr_end(unsigned long addr, static inline unsigned long psb_pd_addr_end(unsigned long addr,
unsigned long end) unsigned long end)
{ {
addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK; addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
return (addr < end) ? addr : end; return (addr < end) ? addr : end;
} }
...@@ -223,11 +193,9 @@ struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver, ...@@ -223,11 +193,9 @@ struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
goto out_err3; goto out_err3;
if (!trap_pagefaults) { if (!trap_pagefaults) {
pd->invalid_pde = pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
invalid_type); invalid_type);
pd->invalid_pte = pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
invalid_type); invalid_type);
} else { } else {
pd->invalid_pde = 0; pd->invalid_pde = 0;
...@@ -279,12 +247,16 @@ static void psb_mmu_free_pt(struct psb_mmu_pt *pt) ...@@ -279,12 +247,16 @@ static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
void psb_mmu_free_pagedir(struct psb_mmu_pd *pd) void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
{ {
struct psb_mmu_driver *driver = pd->driver; struct psb_mmu_driver *driver = pd->driver;
struct drm_device *dev = driver->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_mmu_pt *pt; struct psb_mmu_pt *pt;
int i; int i;
down_write(&driver->sem); down_write(&driver->sem);
if (pd->hw_context != -1) if (pd->hw_context != -1) {
PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
psb_mmu_flush_pd_locked(driver, 1); psb_mmu_flush_pd_locked(driver, 1);
}
/* Should take the spinlock here, but we don't need to do that /* Should take the spinlock here, but we don't need to do that
since we have the semaphore in write mode. */ since we have the semaphore in write mode. */
...@@ -331,7 +303,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) ...@@ -331,7 +303,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
*ptes++ = pd->invalid_pte; *ptes++ = pd->invalid_pte;
#if defined(CONFIG_X86)
if (pd->driver->has_clflush && pd->hw_context != -1) { if (pd->driver->has_clflush && pd->hw_context != -1) {
mb(); mb();
for (i = 0; i < clflush_count; ++i) { for (i = 0; i < clflush_count; ++i) {
...@@ -340,7 +312,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) ...@@ -340,7 +312,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
} }
mb(); mb();
} }
#endif
kunmap_atomic(v); kunmap_atomic(v);
spin_unlock(lock); spin_unlock(lock);
...@@ -351,7 +323,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) ...@@ -351,7 +323,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
return pt; return pt;
} }
static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
unsigned long addr) unsigned long addr)
{ {
uint32_t index = psb_mmu_pd_index(addr); uint32_t index = psb_mmu_pd_index(addr);
...@@ -383,7 +355,7 @@ static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, ...@@ -383,7 +355,7 @@ static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
kunmap_atomic((void *) v); kunmap_atomic((void *) v);
if (pd->hw_context != -1) { if (pd->hw_context != -1) {
psb_mmu_clflush(pd->driver, (void *) &v[index]); psb_mmu_clflush(pd->driver, (void *)&v[index]);
atomic_set(&pd->driver->needs_tlbflush, 1); atomic_set(&pd->driver->needs_tlbflush, 1);
} }
} }
...@@ -420,8 +392,7 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt) ...@@ -420,8 +392,7 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
pd->tables[pt->index] = NULL; pd->tables[pt->index] = NULL;
if (pd->hw_context != -1) { if (pd->hw_context != -1) {
psb_mmu_clflush(pd->driver, psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
(void *) &v[pt->index]);
atomic_set(&pd->driver->needs_tlbflush, 1); atomic_set(&pd->driver->needs_tlbflush, 1);
} }
kunmap_atomic(pt->v); kunmap_atomic(pt->v);
...@@ -432,8 +403,8 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt) ...@@ -432,8 +403,8 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
spin_unlock(&pd->driver->lock); spin_unlock(&pd->driver->lock);
} }
static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
unsigned long addr, uint32_t pte) uint32_t pte)
{ {
pt->v[psb_mmu_pt_index(addr)] = pte; pt->v[psb_mmu_pt_index(addr)] = pte;
} }
...@@ -444,69 +415,50 @@ static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt, ...@@ -444,69 +415,50 @@ static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte; pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
} }
struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
uint32_t mmu_offset, uint32_t gtt_start,
uint32_t gtt_pages)
{ {
uint32_t *v; struct psb_mmu_pd *pd;
uint32_t start = psb_mmu_pd_index(mmu_offset);
struct psb_mmu_driver *driver = pd->driver;
int num_pages = gtt_pages;
down_read(&driver->sem); down_read(&driver->sem);
spin_lock(&driver->lock); pd = driver->default_pd;
up_read(&driver->sem);
v = kmap_atomic(pd->p);
v += start;
while (gtt_pages--) {
*v++ = gtt_start | pd->pd_mask;
gtt_start += PAGE_SIZE;
}
/*ttm_tt_cache_flush(&pd->p, num_pages);*/
psb_pages_clflush(pd->driver, &pd->p, num_pages);
kunmap_atomic(v);
spin_unlock(&driver->lock);
if (pd->hw_context != -1)
atomic_set(&pd->driver->needs_tlbflush, 1);
up_read(&pd->driver->sem); return pd;
psb_mmu_flush_pd(pd->driver, 0);
} }
struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver) /* Returns the physical address of the PD shared by sgx/msvdx */
uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
{ {
struct psb_mmu_pd *pd; struct psb_mmu_pd *pd;
/* down_read(&driver->sem); */ pd = psb_mmu_get_default_pd(driver);
pd = driver->default_pd; return page_to_pfn(pd->p) << PAGE_SHIFT;
/* up_read(&driver->sem); */
return pd;
} }
void psb_mmu_driver_takedown(struct psb_mmu_driver *driver) void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
{ {
struct drm_device *dev = driver->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
psb_mmu_free_pagedir(driver->default_pd); psb_mmu_free_pagedir(driver->default_pd);
kfree(driver); kfree(driver);
} }
struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers, struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
int trap_pagefaults, int trap_pagefaults,
int invalid_type, int invalid_type,
struct drm_psb_private *dev_priv) atomic_t *msvdx_mmu_invaldc)
{ {
struct psb_mmu_driver *driver; struct psb_mmu_driver *driver;
struct drm_psb_private *dev_priv = dev->dev_private;
driver = kmalloc(sizeof(*driver), GFP_KERNEL); driver = kmalloc(sizeof(*driver), GFP_KERNEL);
if (!driver) if (!driver)
return NULL; return NULL;
driver->dev_priv = dev_priv;
driver->dev = dev;
driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults, driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
invalid_type); invalid_type);
if (!driver->default_pd) if (!driver->default_pd)
...@@ -515,17 +467,24 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers, ...@@ -515,17 +467,24 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
spin_lock_init(&driver->lock); spin_lock_init(&driver->lock);
init_rwsem(&driver->sem); init_rwsem(&driver->sem);
down_write(&driver->sem); down_write(&driver->sem);
driver->register_map = registers;
atomic_set(&driver->needs_tlbflush, 1); atomic_set(&driver->needs_tlbflush, 1);
driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
PSB_CR_BIF_CTRL);
PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
PSB_CR_BIF_CTRL);
driver->has_clflush = 0; driver->has_clflush = 0;
#if defined(CONFIG_X86)
if (boot_cpu_has(X86_FEATURE_CLFLSH)) { if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
uint32_t tfms, misc, cap0, cap4, clflush_size; uint32_t tfms, misc, cap0, cap4, clflush_size;
/* /*
* clflush size is determined at kernel setup for x86_64 * clflush size is determined at kernel setup for x86_64 but not
* but not for i386. We have to do it here. * for i386. We have to do it here.
*/ */
cpuid(0x00000001, &tfms, &misc, &cap0, &cap4); cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
...@@ -536,6 +495,7 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers, ...@@ -536,6 +495,7 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
driver->clflush_mask = driver->clflush_add - 1; driver->clflush_mask = driver->clflush_add - 1;
driver->clflush_mask = ~driver->clflush_mask; driver->clflush_mask = ~driver->clflush_mask;
} }
#endif
up_write(&driver->sem); up_write(&driver->sem);
return driver; return driver;
...@@ -545,9 +505,9 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers, ...@@ -545,9 +505,9 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
return NULL; return NULL;
} }
static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, #if defined(CONFIG_X86)
unsigned long address, uint32_t num_pages, static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
uint32_t desired_tile_stride, uint32_t num_pages, uint32_t desired_tile_stride,
uint32_t hw_tile_stride) uint32_t hw_tile_stride)
{ {
struct psb_mmu_pt *pt; struct psb_mmu_pt *pt;
...@@ -561,11 +521,8 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, ...@@ -561,11 +521,8 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
unsigned long clflush_add = pd->driver->clflush_add; unsigned long clflush_add = pd->driver->clflush_add;
unsigned long clflush_mask = pd->driver->clflush_mask; unsigned long clflush_mask = pd->driver->clflush_mask;
if (!pd->driver->has_clflush) { if (!pd->driver->has_clflush)
/*ttm_tt_cache_flush(&pd->p, num_pages);*/
psb_pages_clflush(pd->driver, &pd->p, num_pages);
return; return;
}
if (hw_tile_stride) if (hw_tile_stride)
rows = num_pages / desired_tile_stride; rows = num_pages / desired_tile_stride;
...@@ -586,10 +543,8 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, ...@@ -586,10 +543,8 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
if (!pt) if (!pt)
continue; continue;
do { do {
psb_clflush(&pt->v psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
[psb_mmu_pt_index(addr)]); } while (addr += clflush_add,
} while (addr +=
clflush_add,
(addr & clflush_mask) < next); (addr & clflush_mask) < next);
psb_mmu_pt_unmap_unlock(pt); psb_mmu_pt_unmap_unlock(pt);
...@@ -598,6 +553,14 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, ...@@ -598,6 +553,14 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
} }
mb(); mb();
} }
#else
static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
uint32_t num_pages, uint32_t desired_tile_stride,
uint32_t hw_tile_stride)
{
drm_ttm_cache_flush();
}
#endif
void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
unsigned long address, uint32_t num_pages) unsigned long address, uint32_t num_pages)
...@@ -633,7 +596,7 @@ void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, ...@@ -633,7 +596,7 @@ void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
up_read(&pd->driver->sem); up_read(&pd->driver->sem);
if (pd->hw_context != -1) if (pd->hw_context != -1)
psb_mmu_flush(pd->driver, 0); psb_mmu_flush(pd->driver);
return; return;
} }
...@@ -660,7 +623,7 @@ void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address, ...@@ -660,7 +623,7 @@ void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
add = desired_tile_stride << PAGE_SHIFT; add = desired_tile_stride << PAGE_SHIFT;
row_add = hw_tile_stride << PAGE_SHIFT; row_add = hw_tile_stride << PAGE_SHIFT;
/* down_read(&pd->driver->sem); */ down_read(&pd->driver->sem);
/* Make sure we only need to flush this processor's cache */ /* Make sure we only need to flush this processor's cache */
...@@ -688,10 +651,10 @@ void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address, ...@@ -688,10 +651,10 @@ void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
psb_mmu_flush_ptes(pd, f_address, num_pages, psb_mmu_flush_ptes(pd, f_address, num_pages,
desired_tile_stride, hw_tile_stride); desired_tile_stride, hw_tile_stride);
/* up_read(&pd->driver->sem); */ up_read(&pd->driver->sem);
if (pd->hw_context != -1) if (pd->hw_context != -1)
psb_mmu_flush(pd->driver, 0); psb_mmu_flush(pd->driver);
} }
int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
...@@ -704,7 +667,7 @@ int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, ...@@ -704,7 +667,7 @@ int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
unsigned long end; unsigned long end;
unsigned long next; unsigned long next;
unsigned long f_address = address; unsigned long f_address = address;
int ret = 0; int ret = -ENOMEM;
down_read(&pd->driver->sem); down_read(&pd->driver->sem);
...@@ -726,6 +689,7 @@ int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, ...@@ -726,6 +689,7 @@ int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
psb_mmu_pt_unmap_unlock(pt); psb_mmu_pt_unmap_unlock(pt);
} while (addr = next, next != end); } while (addr = next, next != end);
ret = 0;
out: out:
if (pd->hw_context != -1) if (pd->hw_context != -1)
...@@ -734,15 +698,15 @@ int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, ...@@ -734,15 +698,15 @@ int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
up_read(&pd->driver->sem); up_read(&pd->driver->sem);
if (pd->hw_context != -1) if (pd->hw_context != -1)
psb_mmu_flush(pd->driver, 1); psb_mmu_flush(pd->driver);
return ret; return 0;
} }
int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
unsigned long address, uint32_t num_pages, unsigned long address, uint32_t num_pages,
uint32_t desired_tile_stride, uint32_t desired_tile_stride, uint32_t hw_tile_stride,
uint32_t hw_tile_stride, int type) int type)
{ {
struct psb_mmu_pt *pt; struct psb_mmu_pt *pt;
uint32_t rows = 1; uint32_t rows = 1;
...@@ -754,7 +718,7 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, ...@@ -754,7 +718,7 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
unsigned long add; unsigned long add;
unsigned long row_add; unsigned long row_add;
unsigned long f_address = address; unsigned long f_address = address;
int ret = 0; int ret = -ENOMEM;
if (hw_tile_stride) { if (hw_tile_stride) {
if (num_pages % desired_tile_stride != 0) if (num_pages % desired_tile_stride != 0)
...@@ -777,13 +741,10 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, ...@@ -777,13 +741,10 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
do { do {
next = psb_pd_addr_end(addr, end); next = psb_pd_addr_end(addr, end);
pt = psb_mmu_pt_alloc_map_lock(pd, addr); pt = psb_mmu_pt_alloc_map_lock(pd, addr);
if (!pt) { if (!pt)
ret = -ENOMEM;
goto out; goto out;
}
do { do {
pte = pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
psb_mmu_mask_pte(page_to_pfn(*pages++),
type); type);
psb_mmu_set_pte(pt, addr, pte); psb_mmu_set_pte(pt, addr, pte);
pt->count++; pt->count++;
...@@ -794,6 +755,8 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, ...@@ -794,6 +755,8 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
address += row_add; address += row_add;
} }
ret = 0;
out: out:
if (pd->hw_context != -1) if (pd->hw_context != -1)
psb_mmu_flush_ptes(pd, f_address, num_pages, psb_mmu_flush_ptes(pd, f_address, num_pages,
...@@ -802,7 +765,7 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, ...@@ -802,7 +765,7 @@ int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
up_read(&pd->driver->sem); up_read(&pd->driver->sem);
if (pd->hw_context != -1) if (pd->hw_context != -1)
psb_mmu_flush(pd->driver, 1); psb_mmu_flush(pd->driver);
return ret; return ret;
} }
......
/**************************************************************************
* Copyright (c) 2007-2011, Intel Corporation.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
**************************************************************************/
#ifndef __MMU_H
#define __MMU_H
struct psb_mmu_driver {
/* protects driver- and pd structures. Always take in read mode
* before taking the page table spinlock.
*/
struct rw_semaphore sem;
/* protects page tables, directory tables and pt tables.
* and pt structures.
*/
spinlock_t lock;
atomic_t needs_tlbflush;
atomic_t *msvdx_mmu_invaldc;
struct psb_mmu_pd *default_pd;
uint32_t bif_ctrl;
int has_clflush;
int clflush_add;
unsigned long clflush_mask;
struct drm_device *dev;
};
struct psb_mmu_pd;
struct psb_mmu_pt {
struct psb_mmu_pd *pd;
uint32_t index;
uint32_t count;
struct page *p;
uint32_t *v;
};
struct psb_mmu_pd {
struct psb_mmu_driver *driver;
int hw_context;
struct psb_mmu_pt **tables;
struct page *p;
struct page *dummy_pt;
struct page *dummy_page;
uint32_t pd_mask;
uint32_t invalid_pde;
uint32_t invalid_pte;
};
extern struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
int trap_pagefaults,
int invalid_type,
atomic_t *msvdx_mmu_invaldc);
extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
*driver);
extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
int trap_pagefaults,
int invalid_type);
extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
extern void psb_mmu_flush(struct psb_mmu_driver *driver);
extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
unsigned long address,
uint32_t num_pages);
extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
uint32_t start_pfn,
unsigned long address,
uint32_t num_pages, int type);
extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
unsigned long *pfn);
extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
unsigned long address, uint32_t num_pages,
uint32_t desired_tile_stride,
uint32_t hw_tile_stride, int type);
extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
unsigned long address, uint32_t num_pages,
uint32_t desired_tile_stride,
uint32_t hw_tile_stride);
#endif
...@@ -523,13 +523,6 @@ static int oaktrail_hdmi_mode_valid(struct drm_connector *connector, ...@@ -523,13 +523,6 @@ static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK; return MODE_OK;
} }
static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static enum drm_connector_status static enum drm_connector_status
oaktrail_hdmi_detect(struct drm_connector *connector, bool force) oaktrail_hdmi_detect(struct drm_connector *connector, bool force)
{ {
...@@ -608,7 +601,7 @@ static void oaktrail_hdmi_destroy(struct drm_connector *connector) ...@@ -608,7 +601,7 @@ static void oaktrail_hdmi_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = { static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
.dpms = oaktrail_hdmi_dpms, .dpms = oaktrail_hdmi_dpms,
.mode_fixup = oaktrail_hdmi_mode_fixup, .mode_fixup = gma_encoder_mode_fixup,
.prepare = gma_encoder_prepare, .prepare = gma_encoder_prepare,
.mode_set = oaktrail_hdmi_mode_set, .mode_set = oaktrail_hdmi_mode_set,
.commit = gma_encoder_commit, .commit = gma_encoder_commit,
......
...@@ -173,10 +173,13 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) ...@@ -173,10 +173,13 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
return 0; return 0;
} }
void psb_intel_opregion_asle_intr(struct drm_device *dev) static void psb_intel_opregion_asle_work(struct work_struct *work)
{ {
struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_opregion *opregion =
struct opregion_asle *asle = dev_priv->opregion.asle; container_of(work, struct psb_intel_opregion, asle_work);
struct drm_psb_private *dev_priv =
container_of(opregion, struct drm_psb_private, opregion);
struct opregion_asle *asle = opregion->asle;
u32 asle_stat = 0; u32 asle_stat = 0;
u32 asle_req; u32 asle_req;
...@@ -190,9 +193,18 @@ void psb_intel_opregion_asle_intr(struct drm_device *dev) ...@@ -190,9 +193,18 @@ void psb_intel_opregion_asle_intr(struct drm_device *dev)
} }
if (asle_req & ASLE_SET_BACKLIGHT) if (asle_req & ASLE_SET_BACKLIGHT)
asle_stat |= asle_set_backlight(dev, asle->bclp); asle_stat |= asle_set_backlight(dev_priv->dev, asle->bclp);
asle->aslc = asle_stat; asle->aslc = asle_stat;
}
void psb_intel_opregion_asle_intr(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
if (dev_priv->opregion.asle)
schedule_work(&dev_priv->opregion.asle_work);
} }
#define ASLE_ALS_EN (1<<0) #define ASLE_ALS_EN (1<<0)
...@@ -282,6 +294,8 @@ void psb_intel_opregion_fini(struct drm_device *dev) ...@@ -282,6 +294,8 @@ void psb_intel_opregion_fini(struct drm_device *dev)
unregister_acpi_notifier(&psb_intel_opregion_notifier); unregister_acpi_notifier(&psb_intel_opregion_notifier);
} }
cancel_work_sync(&opregion->asle_work);
/* just clear all opregion memory pointers now */ /* just clear all opregion memory pointers now */
iounmap(opregion->header); iounmap(opregion->header);
opregion->header = NULL; opregion->header = NULL;
...@@ -304,6 +318,9 @@ int psb_intel_opregion_setup(struct drm_device *dev) ...@@ -304,6 +318,9 @@ int psb_intel_opregion_setup(struct drm_device *dev)
DRM_DEBUG_DRIVER("ACPI Opregion not supported\n"); DRM_DEBUG_DRIVER("ACPI Opregion not supported\n");
return -ENOTSUPP; return -ENOTSUPP;
} }
INIT_WORK(&opregion->asle_work, psb_intel_opregion_asle_work);
DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy); DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
base = acpi_os_ioremap(opregion_phy, 8*1024); base = acpi_os_ioremap(opregion_phy, 8*1024);
if (!base) if (!base)
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "psb_intel_reg.h" #include "psb_intel_reg.h"
#include "intel_bios.h" #include "intel_bios.h"
#include "psb_device.h" #include "psb_device.h"
#include "gma_device.h"
static int psb_output_init(struct drm_device *dev) static int psb_output_init(struct drm_device *dev)
{ {
...@@ -257,45 +258,6 @@ static int psb_power_up(struct drm_device *dev) ...@@ -257,45 +258,6 @@ static int psb_power_up(struct drm_device *dev)
return 0; return 0;
} }
static void psb_get_core_freq(struct drm_device *dev)
{
uint32_t clock;
struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
struct drm_psb_private *dev_priv = dev->dev_private;
/*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
/*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
pci_read_config_dword(pci_root, 0xD4, &clock);
pci_dev_put(pci_root);
switch (clock & 0x07) {
case 0:
dev_priv->core_freq = 100;
break;
case 1:
dev_priv->core_freq = 133;
break;
case 2:
dev_priv->core_freq = 150;
break;
case 3:
dev_priv->core_freq = 178;
break;
case 4:
dev_priv->core_freq = 200;
break;
case 5:
case 6:
case 7:
dev_priv->core_freq = 266;
break;
default:
dev_priv->core_freq = 0;
}
}
/* Poulsbo */ /* Poulsbo */
static const struct psb_offset psb_regmap[2] = { static const struct psb_offset psb_regmap[2] = {
{ {
...@@ -352,7 +314,7 @@ static int psb_chip_setup(struct drm_device *dev) ...@@ -352,7 +314,7 @@ static int psb_chip_setup(struct drm_device *dev)
{ {
struct drm_psb_private *dev_priv = dev->dev_private; struct drm_psb_private *dev_priv = dev->dev_private;
dev_priv->regmap = psb_regmap; dev_priv->regmap = psb_regmap;
psb_get_core_freq(dev); gma_get_core_freq(dev);
gma_intel_setup_gmbus(dev); gma_intel_setup_gmbus(dev);
psb_intel_opregion_init(dev); psb_intel_opregion_init(dev);
psb_intel_init_bios(dev); psb_intel_init_bios(dev);
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm.h> #include <drm/drm.h>
#include <drm/gma_drm.h>
#include "psb_drv.h" #include "psb_drv.h"
#include "framebuffer.h" #include "framebuffer.h"
#include "psb_reg.h" #include "psb_reg.h"
...@@ -37,56 +36,65 @@ ...@@ -37,56 +36,65 @@
#include <acpi/video.h> #include <acpi/video.h>
#include <linux/module.h> #include <linux/module.h>
static int drm_psb_trap_pagefaults; static struct drm_driver driver;
static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
/*
* The table below contains a mapping of the PCI vendor ID and the PCI Device ID
* to the different groups of PowerVR 5-series chip designs
*
* 0x8086 = Intel Corporation
*
* PowerVR SGX535 - Poulsbo - Intel GMA 500, Intel Atom Z5xx
* PowerVR SGX535 - Moorestown - Intel GMA 600
* PowerVR SGX535 - Oaktrail - Intel GMA 600, Intel Atom Z6xx, E6xx
* PowerVR SGX540 - Medfield - Intel Atom Z2460
* PowerVR SGX544MP2 - Medfield -
* PowerVR SGX545 - Cedartrail - Intel GMA 3600, Intel Atom D2500, N2600
* PowerVR SGX545 - Cedartrail - Intel GMA 3650, Intel Atom D2550, D2700,
* N2800
*/
static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
{ 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops }, { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
{ 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops }, { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
#if defined(CONFIG_DRM_GMA600) #if defined(CONFIG_DRM_GMA600)
{ 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
/* Atom E620 */ { 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
#endif #endif
#if defined(CONFIG_DRM_MEDFIELD) #if defined(CONFIG_DRM_MEDFIELD)
{0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, { 0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
{0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, { 0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
{0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, { 0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
{0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, { 0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
{0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, { 0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
{0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, { 0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
{0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, { 0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
{0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, { 0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
#endif #endif
#if defined(CONFIG_DRM_GMA3600) #if defined(CONFIG_DRM_GMA3600)
{ 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, { 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, { 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, { 0x8086, 0x0be8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, { 0x8086, 0x0be9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0bea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, { 0x8086, 0x0bea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0beb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, { 0x8086, 0x0beb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0bec, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, { 0x8086, 0x0bec, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, { 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, { 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, { 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
#endif #endif
{ 0, } { 0, }
}; };
...@@ -95,59 +103,10 @@ MODULE_DEVICE_TABLE(pci, pciidlist); ...@@ -95,59 +103,10 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
/* /*
* Standard IOCTLs. * Standard IOCTLs.
*/ */
#define DRM_IOCTL_GMA_ADB \
DRM_IOWR(DRM_GMA_ADB + DRM_COMMAND_BASE, uint32_t)
#define DRM_IOCTL_GMA_MODE_OPERATION \
DRM_IOWR(DRM_GMA_MODE_OPERATION + DRM_COMMAND_BASE, \
struct drm_psb_mode_operation_arg)
#define DRM_IOCTL_GMA_STOLEN_MEMORY \
DRM_IOWR(DRM_GMA_STOLEN_MEMORY + DRM_COMMAND_BASE, \
struct drm_psb_stolen_memory_arg)
#define DRM_IOCTL_GMA_GAMMA \
DRM_IOWR(DRM_GMA_GAMMA + DRM_COMMAND_BASE, \
struct drm_psb_dpst_lut_arg)
#define DRM_IOCTL_GMA_DPST_BL \
DRM_IOWR(DRM_GMA_DPST_BL + DRM_COMMAND_BASE, \
uint32_t)
#define DRM_IOCTL_GMA_GET_PIPE_FROM_CRTC_ID \
DRM_IOWR(DRM_GMA_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
struct drm_psb_get_pipe_from_crtc_id_arg)
#define DRM_IOCTL_GMA_GEM_CREATE \
DRM_IOWR(DRM_GMA_GEM_CREATE + DRM_COMMAND_BASE, \
struct drm_psb_gem_create)
#define DRM_IOCTL_GMA_GEM_MMAP \
DRM_IOWR(DRM_GMA_GEM_MMAP + DRM_COMMAND_BASE, \
struct drm_psb_gem_mmap)
static int psb_adb_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
static int psb_gamma_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
static const struct drm_ioctl_desc psb_ioctls[] = { static const struct drm_ioctl_desc psb_ioctls[] = {
DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl,
DRM_AUTH),
DRM_IOCTL_DEF_DRV(GMA_STOLEN_MEMORY, psb_stolen_memory_ioctl,
DRM_AUTH),
DRM_IOCTL_DEF_DRV(GMA_GAMMA, psb_gamma_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(GMA_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(GMA_GET_PIPE_FROM_CRTC_ID,
psb_intel_get_pipe_from_crtc_id, 0),
DRM_IOCTL_DEF_DRV(GMA_GEM_CREATE, psb_gem_create_ioctl,
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(GMA_GEM_MMAP, psb_gem_mmap_ioctl,
DRM_UNLOCKED | DRM_AUTH),
}; };
static void psb_lastclose(struct drm_device *dev) static void psb_driver_lastclose(struct drm_device *dev)
{ {
int ret; int ret;
struct drm_psb_private *dev_priv = dev->dev_private; struct drm_psb_private *dev_priv = dev->dev_private;
...@@ -169,19 +128,14 @@ static int psb_do_init(struct drm_device *dev) ...@@ -169,19 +128,14 @@ static int psb_do_init(struct drm_device *dev)
uint32_t stolen_gtt; uint32_t stolen_gtt;
int ret = -ENOMEM;
if (pg->mmu_gatt_start & 0x0FFFFFFF) { if (pg->mmu_gatt_start & 0x0FFFFFFF) {
dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n"); dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
ret = -EINVAL; return -EINVAL;
goto out_err;
} }
stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4; stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT; stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
stolen_gtt = stolen_gtt = (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
(stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
dev_priv->gatt_free_offset = pg->mmu_gatt_start + dev_priv->gatt_free_offset = pg->mmu_gatt_start +
(stolen_gtt << PAGE_SHIFT) * 1024; (stolen_gtt << PAGE_SHIFT) * 1024;
...@@ -192,23 +146,26 @@ static int psb_do_init(struct drm_device *dev) ...@@ -192,23 +146,26 @@ static int psb_do_init(struct drm_device *dev)
PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0); PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1); PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
PSB_RSGX32(PSB_CR_BIF_BANK1); PSB_RSGX32(PSB_CR_BIF_BANK1);
PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK,
/* Do not bypass any MMU access, let them pagefault instead */
PSB_WSGX32((PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_MMU_ER_MASK),
PSB_CR_BIF_CTRL); PSB_CR_BIF_CTRL);
PSB_RSGX32(PSB_CR_BIF_CTRL);
psb_spank(dev_priv); psb_spank(dev_priv);
/* mmu_gatt ?? */ /* mmu_gatt ?? */
PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE); PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE); /* Post */
return 0; return 0;
out_err:
return ret;
} }
static int psb_driver_unload(struct drm_device *dev) static int psb_driver_unload(struct drm_device *dev)
{ {
struct drm_psb_private *dev_priv = dev->dev_private; struct drm_psb_private *dev_priv = dev->dev_private;
/* Kill vblank etc here */ /* TODO: Kill vblank etc here */
if (dev_priv) { if (dev_priv) {
if (dev_priv->backlight_device) if (dev_priv->backlight_device)
...@@ -268,8 +225,7 @@ static int psb_driver_unload(struct drm_device *dev) ...@@ -268,8 +225,7 @@ static int psb_driver_unload(struct drm_device *dev)
return 0; return 0;
} }
static int psb_driver_load(struct drm_device *dev, unsigned long flags)
static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
{ {
struct drm_psb_private *dev_priv; struct drm_psb_private *dev_priv;
unsigned long resource_start, resource_len; unsigned long resource_start, resource_len;
...@@ -277,15 +233,19 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -277,15 +233,19 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
int ret = -ENOMEM; int ret = -ENOMEM;
struct drm_connector *connector; struct drm_connector *connector;
struct gma_encoder *gma_encoder; struct gma_encoder *gma_encoder;
struct psb_gtt *pg;
/* allocating and initializing driver private data */
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL) if (dev_priv == NULL)
return -ENOMEM; return -ENOMEM;
dev_priv->ops = (struct psb_ops *)chipset; dev_priv->ops = (struct psb_ops *)flags;
dev_priv->dev = dev; dev_priv->dev = dev;
dev->dev_private = (void *) dev_priv; dev->dev_private = (void *) dev_priv;
pg = &dev_priv->gtt;
pci_set_master(dev->pdev); pci_set_master(dev->pdev);
dev_priv->num_pipe = dev_priv->ops->pipes; dev_priv->num_pipe = dev_priv->ops->pipes;
...@@ -347,9 +307,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -347,9 +307,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (ret) if (ret)
goto out_err; goto out_err;
dev_priv->mmu = psb_mmu_driver_init((void *)0, dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0);
drm_psb_trap_pagefaults, 0,
dev_priv);
if (!dev_priv->mmu) if (!dev_priv->mmu)
goto out_err; goto out_err;
...@@ -357,18 +315,27 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -357,18 +315,27 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (!dev_priv->pf_pd) if (!dev_priv->pf_pd)
goto out_err; goto out_err;
psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
ret = psb_do_init(dev); ret = psb_do_init(dev);
if (ret) if (ret)
return ret; return ret;
/* Add stolen memory to SGX MMU */
down_read(&pg->sem);
ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
dev_priv->stolen_base >> PAGE_SHIFT,
pg->gatt_start,
pg->stolen_size >> PAGE_SHIFT, 0);
up_read(&pg->sem);
psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE); PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE); PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
acpi_video_register(); acpi_video_register();
/* Setup vertical blanking handling */
ret = drm_vblank_init(dev, dev_priv->num_pipe); ret = drm_vblank_init(dev, dev_priv->num_pipe);
if (ret) if (ret)
goto out_err; goto out_err;
...@@ -390,9 +357,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -390,9 +357,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
drm_irq_install(dev); drm_irq_install(dev);
dev->vblank_disable_allowed = true; dev->vblank_disable_allowed = true;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
dev->driver->get_vblank_counter = psb_get_vblank_counter; dev->driver->get_vblank_counter = psb_get_vblank_counter;
psb_modeset_init(dev); psb_modeset_init(dev);
...@@ -416,11 +381,11 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -416,11 +381,11 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
return ret; return ret;
psb_intel_opregion_enable_asle(dev); psb_intel_opregion_enable_asle(dev);
#if 0 #if 0
/*enable runtime pm at last*/ /* Enable runtime pm at last */
pm_runtime_enable(&dev->pdev->dev); pm_runtime_enable(&dev->pdev->dev);
pm_runtime_set_active(&dev->pdev->dev); pm_runtime_set_active(&dev->pdev->dev);
#endif #endif
/*Intel drm driver load is done, continue doing pvr load*/ /* Intel drm driver load is done, continue doing pvr load */
return 0; return 0;
out_err: out_err:
psb_driver_unload(dev); psb_driver_unload(dev);
...@@ -442,161 +407,6 @@ static inline void get_brightness(struct backlight_device *bd) ...@@ -442,161 +407,6 @@ static inline void get_brightness(struct backlight_device *bd)
#endif #endif
} }
static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_psb_private *dev_priv = psb_priv(dev);
uint32_t *arg = data;
dev_priv->blc_adj2 = *arg;
get_brightness(dev_priv->backlight_device);
return 0;
}
static int psb_adb_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_psb_private *dev_priv = psb_priv(dev);
uint32_t *arg = data;
dev_priv->blc_adj1 = *arg;
get_brightness(dev_priv->backlight_device);
return 0;
}
static int psb_gamma_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_psb_dpst_lut_arg *lut_arg = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct drm_connector *connector;
struct gma_crtc *gma_crtc;
int i = 0;
int32_t obj_id;
obj_id = lut_arg->output_id;
obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
dev_dbg(dev->dev, "Invalid Connector object.\n");
return -ENOENT;
}
connector = obj_to_connector(obj);
crtc = connector->encoder->crtc;
gma_crtc = to_gma_crtc(crtc);
for (i = 0; i < 256; i++)
gma_crtc->lut_adj[i] = lut_arg->lut[i];
gma_crtc_load_lut(crtc);
return 0;
}
static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
uint32_t obj_id;
uint16_t op;
struct drm_mode_modeinfo *umode;
struct drm_display_mode *mode = NULL;
struct drm_psb_mode_operation_arg *arg;
struct drm_mode_object *obj;
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
int ret = 0;
int resp = MODE_OK;
arg = (struct drm_psb_mode_operation_arg *)data;
obj_id = arg->obj_id;
op = arg->operation;
switch (op) {
case PSB_MODE_OPERATION_MODE_VALID:
umode = &arg->mode;
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, obj_id,
DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
ret = -ENOENT;
goto mode_op_out;
}
connector = obj_to_connector(obj);
mode = drm_mode_create(dev);
if (!mode) {
ret = -ENOMEM;
goto mode_op_out;
}
/* drm_crtc_convert_umode(mode, umode); */
{
mode->clock = umode->clock;
mode->hdisplay = umode->hdisplay;
mode->hsync_start = umode->hsync_start;
mode->hsync_end = umode->hsync_end;
mode->htotal = umode->htotal;
mode->hskew = umode->hskew;
mode->vdisplay = umode->vdisplay;
mode->vsync_start = umode->vsync_start;
mode->vsync_end = umode->vsync_end;
mode->vtotal = umode->vtotal;
mode->vscan = umode->vscan;
mode->vrefresh = umode->vrefresh;
mode->flags = umode->flags;
mode->type = umode->type;
strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
}
connector_funcs = (struct drm_connector_helper_funcs *)
connector->helper_private;
if (connector_funcs->mode_valid) {
resp = connector_funcs->mode_valid(connector, mode);
arg->data = resp;
}
/*do some clean up work*/
if (mode)
drm_mode_destroy(dev, mode);
mode_op_out:
drm_modeset_unlock_all(dev);
return ret;
default:
dev_dbg(dev->dev, "Unsupported psb mode operation\n");
return -EOPNOTSUPP;
}
return 0;
}
static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_psb_private *dev_priv = psb_priv(dev);
struct drm_psb_stolen_memory_arg *arg = data;
arg->base = dev_priv->stolen_base;
arg->size = dev_priv->vram_stolen_size;
return 0;
}
static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
{
return 0;
}
static void psb_driver_close(struct drm_device *dev, struct drm_file *priv)
{
}
static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd, static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
...@@ -614,15 +424,21 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -614,15 +424,21 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
/* FIXME: do we need to wrap the other side of this */ /* FIXME: do we need to wrap the other side of this */
} }
/*
/* When a client dies: * When a client dies:
* - Check for and clean up flipped page state * - Check for and clean up flipped page state
*/ */
static void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv) static void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
{ {
} }
static void psb_remove(struct pci_dev *pdev) static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
return drm_get_pci_dev(pdev, ent, &driver);
}
static void psb_pci_remove(struct pci_dev *pdev)
{ {
struct drm_device *dev = pci_get_drvdata(pdev); struct drm_device *dev = pci_get_drvdata(pdev);
drm_put_dev(dev); drm_put_dev(dev);
...@@ -657,11 +473,12 @@ static const struct file_operations psb_gem_fops = { ...@@ -657,11 +473,12 @@ static const struct file_operations psb_gem_fops = {
static struct drm_driver driver = { static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
DRIVER_MODESET | DRIVER_GEM , DRIVER_MODESET | DRIVER_GEM,
.load = psb_driver_load, .load = psb_driver_load,
.unload = psb_driver_unload, .unload = psb_driver_unload,
.lastclose = psb_driver_lastclose,
.preclose = psb_driver_preclose,
.ioctls = psb_ioctls,
.num_ioctls = DRM_ARRAY_SIZE(psb_ioctls), .num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
.device_is_agp = psb_driver_device_is_agp, .device_is_agp = psb_driver_device_is_agp,
.irq_preinstall = psb_irq_preinstall, .irq_preinstall = psb_irq_preinstall,
...@@ -671,40 +488,31 @@ static struct drm_driver driver = { ...@@ -671,40 +488,31 @@ static struct drm_driver driver = {
.enable_vblank = psb_enable_vblank, .enable_vblank = psb_enable_vblank,
.disable_vblank = psb_disable_vblank, .disable_vblank = psb_disable_vblank,
.get_vblank_counter = psb_get_vblank_counter, .get_vblank_counter = psb_get_vblank_counter,
.lastclose = psb_lastclose,
.open = psb_driver_open,
.preclose = psb_driver_preclose,
.postclose = psb_driver_close,
.gem_free_object = psb_gem_free_object, .gem_free_object = psb_gem_free_object,
.gem_vm_ops = &psb_gem_vm_ops, .gem_vm_ops = &psb_gem_vm_ops,
.dumb_create = psb_gem_dumb_create, .dumb_create = psb_gem_dumb_create,
.dumb_map_offset = psb_gem_dumb_map_gtt, .dumb_map_offset = psb_gem_dumb_map_gtt,
.dumb_destroy = drm_gem_dumb_destroy, .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = psb_ioctls,
.fops = &psb_gem_fops, .fops = &psb_gem_fops,
.name = DRIVER_NAME, .name = DRIVER_NAME,
.desc = DRIVER_DESC, .desc = DRIVER_DESC,
.date = PSB_DRM_DRIVER_DATE, .date = DRIVER_DATE,
.major = PSB_DRM_DRIVER_MAJOR, .major = DRIVER_MAJOR,
.minor = PSB_DRM_DRIVER_MINOR, .minor = DRIVER_MINOR,
.patchlevel = PSB_DRM_DRIVER_PATCHLEVEL .patchlevel = DRIVER_PATCHLEVEL
}; };
static struct pci_driver psb_pci_driver = { static struct pci_driver psb_pci_driver = {
.name = DRIVER_NAME, .name = DRIVER_NAME,
.id_table = pciidlist, .id_table = pciidlist,
.probe = psb_probe, .probe = psb_pci_probe,
.remove = psb_remove, .remove = psb_pci_remove,
.driver = { .driver.pm = &psb_pm_ops,
.pm = &psb_pm_ops,
}
}; };
static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
return drm_get_pci_dev(pdev, ent, &driver);
}
static int __init psb_init(void) static int __init psb_init(void)
{ {
return drm_pci_init(&driver, &psb_pci_driver); return drm_pci_init(&driver, &psb_pci_driver);
...@@ -718,6 +526,6 @@ static void __exit psb_exit(void) ...@@ -718,6 +526,6 @@ static void __exit psb_exit(void)
late_initcall(psb_init); late_initcall(psb_init);
module_exit(psb_exit); module_exit(psb_exit);
MODULE_AUTHOR("Alan Cox <alan@linux.intel.com> and others"); MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC); MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL"); MODULE_LICENSE(DRIVER_LICENSE);
...@@ -33,6 +33,18 @@ ...@@ -33,6 +33,18 @@
#include "power.h" #include "power.h"
#include "opregion.h" #include "opregion.h"
#include "oaktrail.h" #include "oaktrail.h"
#include "mmu.h"
#define DRIVER_AUTHOR "Alan Cox <alan@linux.intel.com> and others"
#define DRIVER_LICENSE "GPL"
#define DRIVER_NAME "gma500"
#define DRIVER_DESC "DRM driver for the Intel GMA500, GMA600, GMA3600, GMA3650"
#define DRIVER_DATE "20140314"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
/* Append new drm mode definition here, align with libdrm definition */ /* Append new drm mode definition here, align with libdrm definition */
#define DRM_MODE_SCALE_NO_SCALE 2 #define DRM_MODE_SCALE_NO_SCALE 2
...@@ -49,21 +61,7 @@ enum { ...@@ -49,21 +61,7 @@ enum {
#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130) #define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0) #define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
/* /* Hardware offsets */
* Driver definitions
*/
#define DRIVER_NAME "gma500"
#define DRIVER_DESC "DRM driver for the Intel GMA500"
#define PSB_DRM_DRIVER_DATE "2011-06-06"
#define PSB_DRM_DRIVER_MAJOR 1
#define PSB_DRM_DRIVER_MINOR 0
#define PSB_DRM_DRIVER_PATCHLEVEL 0
/*
* Hardware offsets
*/
#define PSB_VDC_OFFSET 0x00000000 #define PSB_VDC_OFFSET 0x00000000
#define PSB_VDC_SIZE 0x000080000 #define PSB_VDC_SIZE 0x000080000
#define MRST_MMIO_SIZE 0x0000C0000 #define MRST_MMIO_SIZE 0x0000C0000
...@@ -71,16 +69,14 @@ enum { ...@@ -71,16 +69,14 @@ enum {
#define PSB_SGX_SIZE 0x8000 #define PSB_SGX_SIZE 0x8000
#define PSB_SGX_OFFSET 0x00040000 #define PSB_SGX_OFFSET 0x00040000
#define MRST_SGX_OFFSET 0x00080000 #define MRST_SGX_OFFSET 0x00080000
/*
* PCI resource identifiers /* PCI resource identifiers */
*/
#define PSB_MMIO_RESOURCE 0 #define PSB_MMIO_RESOURCE 0
#define PSB_AUX_RESOURCE 0 #define PSB_AUX_RESOURCE 0
#define PSB_GATT_RESOURCE 2 #define PSB_GATT_RESOURCE 2
#define PSB_GTT_RESOURCE 3 #define PSB_GTT_RESOURCE 3
/*
* PCI configuration /* PCI configuration */
*/
#define PSB_GMCH_CTRL 0x52 #define PSB_GMCH_CTRL 0x52
#define PSB_BSM 0x5C #define PSB_BSM 0x5C
#define _PSB_GMCH_ENABLED 0x4 #define _PSB_GMCH_ENABLED 0x4
...@@ -88,37 +84,29 @@ enum { ...@@ -88,37 +84,29 @@ enum {
#define _PSB_PGETBL_ENABLED 0x00000001 #define _PSB_PGETBL_ENABLED 0x00000001
#define PSB_SGX_2D_SLAVE_PORT 0x4000 #define PSB_SGX_2D_SLAVE_PORT 0x4000
/* To get rid of */ /* TODO: To get rid of */
#define PSB_TT_PRIV0_LIMIT (256*1024*1024) #define PSB_TT_PRIV0_LIMIT (256*1024*1024)
#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT) #define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
/* /* SGX side MMU definitions (these can probably go) */
* SGX side MMU definitions (these can probably go)
*/
/* /* Flags for external memory type field */
* Flags for external memory type field.
*/
#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */ #define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */ #define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */ #define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
/*
* PTE's and PDE's /* PTE's and PDE's */
*/
#define PSB_PDE_MASK 0x003FFFFF #define PSB_PDE_MASK 0x003FFFFF
#define PSB_PDE_SHIFT 22 #define PSB_PDE_SHIFT 22
#define PSB_PTE_SHIFT 12 #define PSB_PTE_SHIFT 12
/*
* Cache control /* Cache control */
*/
#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */ #define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
#define PSB_PTE_WO 0x0002 /* Write only */ #define PSB_PTE_WO 0x0002 /* Write only */
#define PSB_PTE_RO 0x0004 /* Read only */ #define PSB_PTE_RO 0x0004 /* Read only */
#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */ #define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
/* /* VDC registers and bits */
* VDC registers and bits
*/
#define PSB_MSVDX_CLOCKGATING 0x2064 #define PSB_MSVDX_CLOCKGATING 0x2064
#define PSB_TOPAZ_CLOCKGATING 0x2068 #define PSB_TOPAZ_CLOCKGATING 0x2068
#define PSB_HWSTAM 0x2098 #define PSB_HWSTAM 0x2098
...@@ -265,6 +253,7 @@ struct psb_intel_opregion { ...@@ -265,6 +253,7 @@ struct psb_intel_opregion {
struct opregion_asle *asle; struct opregion_asle *asle;
void *vbt; void *vbt;
u32 __iomem *lid_state; u32 __iomem *lid_state;
struct work_struct asle_work;
}; };
struct sdvo_device_mapping { struct sdvo_device_mapping {
...@@ -283,10 +272,7 @@ struct intel_gmbus { ...@@ -283,10 +272,7 @@ struct intel_gmbus {
u32 reg0; u32 reg0;
}; };
/* /* Register offset maps */
* Register offset maps
*/
struct psb_offset { struct psb_offset {
u32 fp0; u32 fp0;
u32 fp1; u32 fp1;
...@@ -320,9 +306,7 @@ struct psb_offset { ...@@ -320,9 +306,7 @@ struct psb_offset {
* update the register cache instead. * update the register cache instead.
*/ */
/* /* Common status for pipes */
* Common status for pipes.
*/
struct psb_pipe { struct psb_pipe {
u32 fp0; u32 fp0;
u32 fp1; u32 fp1;
...@@ -482,35 +466,24 @@ struct drm_psb_private { ...@@ -482,35 +466,24 @@ struct drm_psb_private {
struct psb_mmu_driver *mmu; struct psb_mmu_driver *mmu;
struct psb_mmu_pd *pf_pd; struct psb_mmu_pd *pf_pd;
/* /* Register base */
* Register base
*/
uint8_t __iomem *sgx_reg; uint8_t __iomem *sgx_reg;
uint8_t __iomem *vdc_reg; uint8_t __iomem *vdc_reg;
uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */ uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */
uint32_t gatt_free_offset; uint32_t gatt_free_offset;
/* /* Fencing / irq */
* Fencing / irq.
*/
uint32_t vdc_irq_mask; uint32_t vdc_irq_mask;
uint32_t pipestat[PSB_NUM_PIPE]; uint32_t pipestat[PSB_NUM_PIPE];
spinlock_t irqmask_lock; spinlock_t irqmask_lock;
/* /* Power */
* Power
*/
bool suspended; bool suspended;
bool display_power; bool display_power;
int display_count; int display_count;
/* /* Modesetting */
* Modesetting
*/
struct psb_intel_mode_device mode_dev; struct psb_intel_mode_device mode_dev;
bool modeset; /* true if we have done the mode_device setup */ bool modeset; /* true if we have done the mode_device setup */
...@@ -518,15 +491,10 @@ struct drm_psb_private { ...@@ -518,15 +491,10 @@ struct drm_psb_private {
struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE]; struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
uint32_t num_pipe; uint32_t num_pipe;
/* /* OSPM info (Power management base) (TODO: can go ?) */
* OSPM info (Power management base) (can go ?)
*/
uint32_t ospm_base; uint32_t ospm_base;
/* /* Sizes info */
* Sizes info
*/
u32 fuse_reg_value; u32 fuse_reg_value;
u32 video_device_fuse; u32 video_device_fuse;
...@@ -546,9 +514,7 @@ struct drm_psb_private { ...@@ -546,9 +514,7 @@ struct drm_psb_private {
struct drm_property *broadcast_rgb_property; struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property; struct drm_property *force_audio_property;
/* /* LVDS info */
* LVDS info
*/
int backlight_duty_cycle; /* restore backlight to this value */ int backlight_duty_cycle; /* restore backlight to this value */
bool panel_wants_dither; bool panel_wants_dither;
struct drm_display_mode *panel_fixed_mode; struct drm_display_mode *panel_fixed_mode;
...@@ -582,34 +548,23 @@ struct drm_psb_private { ...@@ -582,34 +548,23 @@ struct drm_psb_private {
/* Oaktrail HDMI state */ /* Oaktrail HDMI state */
struct oaktrail_hdmi_dev *hdmi_priv; struct oaktrail_hdmi_dev *hdmi_priv;
/* /* Register state */
* Register state
*/
struct psb_save_area regs; struct psb_save_area regs;
/* MSI reg save */ /* MSI reg save */
uint32_t msi_addr; uint32_t msi_addr;
uint32_t msi_data; uint32_t msi_data;
/* /* Hotplug handling */
* Hotplug handling
*/
struct work_struct hotplug_work; struct work_struct hotplug_work;
/* /* LID-Switch */
* LID-Switch
*/
spinlock_t lid_lock; spinlock_t lid_lock;
struct timer_list lid_timer; struct timer_list lid_timer;
struct psb_intel_opregion opregion; struct psb_intel_opregion opregion;
u32 lid_last_state; u32 lid_last_state;
/* /* Watchdog */
* Watchdog
*/
uint32_t apm_reg; uint32_t apm_reg;
uint16_t apm_base; uint16_t apm_base;
...@@ -629,9 +584,7 @@ struct drm_psb_private { ...@@ -629,9 +584,7 @@ struct drm_psb_private {
/* 2D acceleration */ /* 2D acceleration */
spinlock_t lock_2d; spinlock_t lock_2d;
/* /* Panel brightness */
* Panel brightness
*/
int brightness; int brightness;
int brightness_adjusted; int brightness_adjusted;
...@@ -664,10 +617,7 @@ struct drm_psb_private { ...@@ -664,10 +617,7 @@ struct drm_psb_private {
}; };
/* /* Operations for each board type */
* Operations for each board type
*/
struct psb_ops { struct psb_ops {
const char *name; const char *name;
unsigned int accel_2d:1; unsigned int accel_2d:1;
...@@ -713,8 +663,6 @@ struct psb_ops { ...@@ -713,8 +663,6 @@ struct psb_ops {
struct psb_mmu_driver;
extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int); extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
extern int drm_pick_crtcs(struct drm_device *dev); extern int drm_pick_crtcs(struct drm_device *dev);
...@@ -723,52 +671,7 @@ static inline struct drm_psb_private *psb_priv(struct drm_device *dev) ...@@ -723,52 +671,7 @@ static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
return (struct drm_psb_private *) dev->dev_private; return (struct drm_psb_private *) dev->dev_private;
} }
/* /* psb_irq.c */
* MMU stuff.
*/
extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
int trap_pagefaults,
int invalid_type,
struct drm_psb_private *dev_priv);
extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
*driver);
extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
uint32_t gtt_start, uint32_t gtt_pages);
extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
int trap_pagefaults,
int invalid_type);
extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
extern void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot);
extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
unsigned long address,
uint32_t num_pages);
extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
uint32_t start_pfn,
unsigned long address,
uint32_t num_pages, int type);
extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
unsigned long *pfn);
/*
* Enable / disable MMU for different requestors.
*/
extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
unsigned long address, uint32_t num_pages,
uint32_t desired_tile_stride,
uint32_t hw_tile_stride, int type);
extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
unsigned long address, uint32_t num_pages,
uint32_t desired_tile_stride,
uint32_t hw_tile_stride);
/*
*psb_irq.c
*/
extern irqreturn_t psb_irq_handler(int irq, void *arg); extern irqreturn_t psb_irq_handler(int irq, void *arg);
extern int psb_irq_enable_dpst(struct drm_device *dev); extern int psb_irq_enable_dpst(struct drm_device *dev);
extern int psb_irq_disable_dpst(struct drm_device *dev); extern int psb_irq_disable_dpst(struct drm_device *dev);
...@@ -791,24 +694,17 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask); ...@@ -791,24 +694,17 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc); extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
/* /* framebuffer.c */
* framebuffer.c
*/
extern int psbfb_probed(struct drm_device *dev); extern int psbfb_probed(struct drm_device *dev);
extern int psbfb_remove(struct drm_device *dev, extern int psbfb_remove(struct drm_device *dev,
struct drm_framebuffer *fb); struct drm_framebuffer *fb);
/* /* accel_2d.c */
* accel_2d.c
*/
extern void psbfb_copyarea(struct fb_info *info, extern void psbfb_copyarea(struct fb_info *info,
const struct fb_copyarea *region); const struct fb_copyarea *region);
extern int psbfb_sync(struct fb_info *info); extern int psbfb_sync(struct fb_info *info);
extern void psb_spank(struct drm_psb_private *dev_priv); extern void psb_spank(struct drm_psb_private *dev_priv);
/* /* psb_reset.c */
* psb_reset.c
*/
extern void psb_lid_timer_init(struct drm_psb_private *dev_priv); extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv); extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
extern void psb_print_pagefault(struct drm_psb_private *dev_priv); extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
...@@ -867,9 +763,7 @@ extern const struct psb_ops mdfld_chip_ops; ...@@ -867,9 +763,7 @@ extern const struct psb_ops mdfld_chip_ops;
/* cdv_device.c */ /* cdv_device.c */
extern const struct psb_ops cdv_chip_ops; extern const struct psb_ops cdv_chip_ops;
/* /* Debug print bits setting */
* Debug print bits setting
*/
#define PSB_D_GENERAL (1 << 0) #define PSB_D_GENERAL (1 << 0)
#define PSB_D_INIT (1 << 1) #define PSB_D_INIT (1 << 1)
#define PSB_D_IRQ (1 << 2) #define PSB_D_IRQ (1 << 2)
...@@ -885,10 +779,7 @@ extern const struct psb_ops cdv_chip_ops; ...@@ -885,10 +779,7 @@ extern const struct psb_ops cdv_chip_ops;
extern int drm_idle_check_interval; extern int drm_idle_check_interval;
/* /* Utilities */
* Utilities
*/
static inline u32 MRST_MSG_READ32(uint port, uint offset) static inline u32 MRST_MSG_READ32(uint port, uint offset)
{ {
int mcr = (0xD0<<24) | (port << 16) | (offset << 8); int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
......
...@@ -469,7 +469,8 @@ static void psb_intel_cursor_init(struct drm_device *dev, ...@@ -469,7 +469,8 @@ static void psb_intel_cursor_init(struct drm_device *dev,
/* Allocate 4 pages of stolen mem for a hardware cursor. That /* Allocate 4 pages of stolen mem for a hardware cursor. That
* is enough for the 64 x 64 ARGB cursors we support. * is enough for the 64 x 64 ARGB cursors we support.
*/ */
cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1); cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1,
PAGE_SIZE);
if (!cursor_gt) { if (!cursor_gt) {
gma_crtc->cursor_gt = NULL; gma_crtc->cursor_gt = NULL;
goto out; goto out;
...@@ -554,33 +555,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe, ...@@ -554,33 +555,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
gma_crtc->active = true; gma_crtc->active = true;
} }
int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
struct drm_mode_object *drmmode_obj;
struct gma_crtc *crtc;
if (!dev_priv) {
dev_err(dev->dev, "called with no initialization\n");
return -EINVAL;
}
drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!drmmode_obj) {
dev_err(dev->dev, "no such CRTC id\n");
return -ENOENT;
}
crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));
pipe_from_crtc_id->pipe = crtc->pipe;
return 0;
}
struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
{ {
struct drm_crtc *crtc = NULL; struct drm_crtc *crtc = NULL;
......
...@@ -238,8 +238,6 @@ static inline struct gma_encoder *gma_attached_encoder( ...@@ -238,8 +238,6 @@ static inline struct gma_encoder *gma_attached_encoder(
extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc); struct drm_crtc *crtc);
extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
int pipe); int pipe);
extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
......
...@@ -200,11 +200,64 @@ static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat) ...@@ -200,11 +200,64 @@ static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
mid_pipe_event_handler(dev, 1); mid_pipe_event_handler(dev, 1);
} }
/*
* SGX interrupt handler
*/
static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 val, addr;
int error = false;
if (stat_1 & _PSB_CE_TWOD_COMPLETE)
val = PSB_RSGX32(PSB_CR_2D_BLIT_STATUS);
if (stat_2 & _PSB_CE2_BIF_REQUESTER_FAULT) {
val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
if (val) {
if (val & _PSB_CBI_STAT_PF_N_RW)
DRM_ERROR("SGX MMU page fault:");
else
DRM_ERROR("SGX MMU read / write protection fault:");
if (val & _PSB_CBI_STAT_FAULT_CACHE)
DRM_ERROR("\tCache requestor");
if (val & _PSB_CBI_STAT_FAULT_TA)
DRM_ERROR("\tTA requestor");
if (val & _PSB_CBI_STAT_FAULT_VDM)
DRM_ERROR("\tVDM requestor");
if (val & _PSB_CBI_STAT_FAULT_2D)
DRM_ERROR("\t2D requestor");
if (val & _PSB_CBI_STAT_FAULT_PBE)
DRM_ERROR("\tPBE requestor");
if (val & _PSB_CBI_STAT_FAULT_TSP)
DRM_ERROR("\tTSP requestor");
if (val & _PSB_CBI_STAT_FAULT_ISP)
DRM_ERROR("\tISP requestor");
if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
DRM_ERROR("\tUSSEPDS requestor");
if (val & _PSB_CBI_STAT_FAULT_HOST)
DRM_ERROR("\tHost requestor");
DRM_ERROR("\tMMU failing address is 0x%08x.\n",
(unsigned int)addr);
error = true;
}
}
/* Clear bits */
PSB_WSGX32(stat_1, PSB_CR_EVENT_HOST_CLEAR);
PSB_WSGX32(stat_2, PSB_CR_EVENT_HOST_CLEAR2);
PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2);
}
irqreturn_t psb_irq_handler(int irq, void *arg) irqreturn_t psb_irq_handler(int irq, void *arg)
{ {
struct drm_device *dev = arg; struct drm_device *dev = arg;
struct drm_psb_private *dev_priv = dev->dev_private; struct drm_psb_private *dev_priv = dev->dev_private;
uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0; uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
u32 sgx_stat_1, sgx_stat_2;
int handled = 0; int handled = 0;
spin_lock(&dev_priv->irqmask_lock); spin_lock(&dev_priv->irqmask_lock);
...@@ -233,14 +286,9 @@ irqreturn_t psb_irq_handler(int irq, void *arg) ...@@ -233,14 +286,9 @@ irqreturn_t psb_irq_handler(int irq, void *arg)
} }
if (sgx_int) { if (sgx_int) {
/* Not expected - we have it masked, shut it up */ sgx_stat_1 = PSB_RSGX32(PSB_CR_EVENT_STATUS);
u32 s, s2; sgx_stat_2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
s = PSB_RSGX32(PSB_CR_EVENT_STATUS); psb_sgx_interrupt(dev, sgx_stat_1, sgx_stat_2);
s2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
PSB_WSGX32(s, PSB_CR_EVENT_HOST_CLEAR);
PSB_WSGX32(s2, PSB_CR_EVENT_HOST_CLEAR2);
/* if s & _PSB_CE_TWOD_COMPLETE we have 2D done but
we may as well poll even if we add that ! */
handled = 1; handled = 1;
} }
...@@ -269,8 +317,13 @@ void psb_irq_preinstall(struct drm_device *dev) ...@@ -269,8 +317,13 @@ void psb_irq_preinstall(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
if (gma_power_is_on(dev)) if (gma_power_is_on(dev)) {
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
}
if (dev->vblank[0].enabled) if (dev->vblank[0].enabled)
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG; dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
if (dev->vblank[1].enabled) if (dev->vblank[1].enabled)
...@@ -286,7 +339,7 @@ void psb_irq_preinstall(struct drm_device *dev) ...@@ -286,7 +339,7 @@ void psb_irq_preinstall(struct drm_device *dev)
/* Revisit this area - want per device masks ? */ /* Revisit this area - want per device masks ? */
if (dev_priv->ops->hotplug) if (dev_priv->ops->hotplug)
dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC; dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE; dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE | _PSB_IRQ_SGX_FLAG;
/* This register is safe even if display island is off */ /* This register is safe even if display island is off */
PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
...@@ -295,12 +348,16 @@ void psb_irq_preinstall(struct drm_device *dev) ...@@ -295,12 +348,16 @@ void psb_irq_preinstall(struct drm_device *dev)
int psb_irq_postinstall(struct drm_device *dev) int psb_irq_postinstall(struct drm_device *dev)
{ {
struct drm_psb_private *dev_priv = struct drm_psb_private *dev_priv = dev->dev_private;
(struct drm_psb_private *) dev->dev_private;
unsigned long irqflags; unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
/* Enable 2D and MMU fault interrupts */
PSB_WSGX32(_PSB_CE2_BIF_REQUESTER_FAULT, PSB_CR_EVENT_HOST_ENABLE2);
PSB_WSGX32(_PSB_CE_TWOD_COMPLETE, PSB_CR_EVENT_HOST_ENABLE);
PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); /* Post */
/* This register is safe even if display island is off */ /* This register is safe even if display island is off */
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
......
...@@ -19,73 +19,7 @@ ...@@ -19,73 +19,7 @@
* *
**************************************************************************/ **************************************************************************/
#ifndef _PSB_DRM_H_ #ifndef _GMA_DRM_H_
#define _PSB_DRM_H_ #define _GMA_DRM_H_
/*
* Manage the LUT for an output
*/
struct drm_psb_dpst_lut_arg {
uint8_t lut[256];
int output_id;
};
/*
* Validate modes
*/
struct drm_psb_mode_operation_arg {
u32 obj_id;
u16 operation;
struct drm_mode_modeinfo mode;
u64 data;
};
/*
* Query the stolen memory for smarter management of
* memory by the server
*/
struct drm_psb_stolen_memory_arg {
u32 base;
u32 size;
};
struct drm_psb_get_pipe_from_crtc_id_arg {
/** ID of CRTC being requested **/
u32 crtc_id;
/** pipe of requested CRTC **/
u32 pipe;
};
struct drm_psb_gem_create {
__u64 size;
__u32 handle;
__u32 flags;
#define GMA_GEM_CREATE_STOLEN 1 /* Stolen memory can be used */
};
struct drm_psb_gem_mmap {
__u32 handle;
__u32 pad;
/**
* Fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 offset;
};
/* Controlling the kernel modesetting buffers */
#define DRM_GMA_GEM_CREATE 0x00 /* Create a GEM object */
#define DRM_GMA_GEM_MMAP 0x01 /* Map GEM memory */
#define DRM_GMA_STOLEN_MEMORY 0x02 /* Report stolen memory */
#define DRM_GMA_2D_OP 0x03 /* Will be merged later */
#define DRM_GMA_GAMMA 0x04 /* Set gamma table */
#define DRM_GMA_ADB 0x05 /* Get backlight */
#define DRM_GMA_DPST_BL 0x06 /* Set backlight */
#define DRM_GMA_MODE_OPERATION 0x07 /* Mode validation/DC set */
#define PSB_MODE_OPERATION_MODE_VALID 0x01
#define DRM_GMA_GET_PIPE_FROM_CRTC_ID 0x08 /* CRTC to physical pipe# */
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment