Commit d2dbaaf6 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'server-fixes' into drm-next

Merge the fixes for the server driver dirty update paths
* server-fixes:
  drm/cirrus: deal with bo reserve fail in dirty update path
  drm/ast: deal with bo reserve fail in dirty update path
  drm/mgag200: deal with bo reserve fail in dirty update path
parents 7e17fc0b f3b2bbdc
...@@ -241,6 +241,8 @@ struct ast_fbdev { ...@@ -241,6 +241,8 @@ struct ast_fbdev {
void *sysram; void *sysram;
int size; int size;
struct ttm_bo_kmap_obj mapping; struct ttm_bo_kmap_obj mapping;
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
}; };
#define to_ast_crtc(x) container_of(x, struct ast_crtc, base) #define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
......
...@@ -53,16 +53,52 @@ static void ast_dirty_update(struct ast_fbdev *afbdev, ...@@ -53,16 +53,52 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8; int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
int ret; int ret;
bool unmap = false; bool unmap = false;
bool store_for_later = false;
int x2, y2;
unsigned long flags;
obj = afbdev->afb.obj; obj = afbdev->afb.obj;
bo = gem_to_ast_bo(obj); bo = gem_to_ast_bo(obj);
/*
* try and reserve the BO, if we fail with busy
* then the BO is being moved and we should
* store up the damage until later.
*/
ret = ast_bo_reserve(bo, true); ret = ast_bo_reserve(bo, true);
if (ret) { if (ret) {
DRM_ERROR("failed to reserve fb bo\n"); if (ret != -EBUSY)
return;
store_for_later = true;
}
x2 = x + width - 1;
y2 = y + height - 1;
spin_lock_irqsave(&afbdev->dirty_lock, flags);
if (afbdev->y1 < y)
y = afbdev->y1;
if (afbdev->y2 > y2)
y2 = afbdev->y2;
if (afbdev->x1 < x)
x = afbdev->x1;
if (afbdev->x2 > x2)
x2 = afbdev->x2;
if (store_for_later) {
afbdev->x1 = x;
afbdev->x2 = x2;
afbdev->y1 = y;
afbdev->y2 = y2;
spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
return; return;
} }
afbdev->x1 = afbdev->y1 = INT_MAX;
afbdev->x2 = afbdev->y2 = 0;
spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
if (!bo->kmap.virtual) { if (!bo->kmap.virtual) {
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) { if (ret) {
...@@ -72,10 +108,10 @@ static void ast_dirty_update(struct ast_fbdev *afbdev, ...@@ -72,10 +108,10 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
} }
unmap = true; unmap = true;
} }
for (i = y; i < y + height; i++) { for (i = y; i <= y2; i++) {
/* assume equal stride for now */ /* assume equal stride for now */
src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp); src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp); memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp);
} }
if (unmap) if (unmap)
...@@ -292,6 +328,7 @@ int ast_fbdev_init(struct drm_device *dev) ...@@ -292,6 +328,7 @@ int ast_fbdev_init(struct drm_device *dev)
ast->fbdev = afbdev; ast->fbdev = afbdev;
afbdev->helper.funcs = &ast_fb_helper_funcs; afbdev->helper.funcs = &ast_fb_helper_funcs;
spin_lock_init(&afbdev->dirty_lock);
ret = drm_fb_helper_init(dev, &afbdev->helper, ret = drm_fb_helper_init(dev, &afbdev->helper,
1, 1); 1, 1);
if (ret) { if (ret) {
......
...@@ -316,7 +316,7 @@ int ast_bo_reserve(struct ast_bo *bo, bool no_wait) ...@@ -316,7 +316,7 @@ int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
if (ret) { if (ret) {
if (ret != -ERESTARTSYS) if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo); DRM_ERROR("reserve failed %p\n", bo);
return ret; return ret;
} }
......
...@@ -154,6 +154,8 @@ struct cirrus_fbdev { ...@@ -154,6 +154,8 @@ struct cirrus_fbdev {
struct list_head fbdev_list; struct list_head fbdev_list;
void *sysram; void *sysram;
int size; int size;
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
}; };
struct cirrus_bo { struct cirrus_bo {
......
...@@ -27,16 +27,51 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev, ...@@ -27,16 +27,51 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8; int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
int ret; int ret;
bool unmap = false; bool unmap = false;
bool store_for_later = false;
int x2, y2;
unsigned long flags;
obj = afbdev->gfb.obj; obj = afbdev->gfb.obj;
bo = gem_to_cirrus_bo(obj); bo = gem_to_cirrus_bo(obj);
/*
* try and reserve the BO, if we fail with busy
* then the BO is being moved and we should
* store up the damage until later.
*/
ret = cirrus_bo_reserve(bo, true); ret = cirrus_bo_reserve(bo, true);
if (ret) { if (ret) {
DRM_ERROR("failed to reserve fb bo\n"); if (ret != -EBUSY)
return; return;
store_for_later = true;
} }
x2 = x + width - 1;
y2 = y + height - 1;
spin_lock_irqsave(&afbdev->dirty_lock, flags);
if (afbdev->y1 < y)
y = afbdev->y1;
if (afbdev->y2 > y2)
y2 = afbdev->y2;
if (afbdev->x1 < x)
x = afbdev->x1;
if (afbdev->x2 > x2)
x2 = afbdev->x2;
if (store_for_later) {
afbdev->x1 = x;
afbdev->x2 = x2;
afbdev->y1 = y;
afbdev->y2 = y2;
spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
return;
}
afbdev->x1 = afbdev->y1 = INT_MAX;
afbdev->x2 = afbdev->y2 = 0;
spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
if (!bo->kmap.virtual) { if (!bo->kmap.virtual) {
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) { if (ret) {
...@@ -268,6 +303,7 @@ int cirrus_fbdev_init(struct cirrus_device *cdev) ...@@ -268,6 +303,7 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
cdev->mode_info.gfbdev = gfbdev; cdev->mode_info.gfbdev = gfbdev;
gfbdev->helper.funcs = &cirrus_fb_helper_funcs; gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
spin_lock_init(&gfbdev->dirty_lock);
ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper, ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
cdev->num_crtc, CIRRUSFB_CONN_LIMIT); cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
......
...@@ -321,7 +321,7 @@ int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait) ...@@ -321,7 +321,7 @@ int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
if (ret) { if (ret) {
if (ret != -ERESTARTSYS) if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo); DRM_ERROR("reserve failed %p\n", bo);
return ret; return ret;
} }
......
...@@ -115,6 +115,8 @@ struct mga_fbdev { ...@@ -115,6 +115,8 @@ struct mga_fbdev {
void *sysram; void *sysram;
int size; int size;
struct ttm_bo_kmap_obj mapping; struct ttm_bo_kmap_obj mapping;
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
}; };
struct mga_crtc { struct mga_crtc {
......
...@@ -29,16 +29,52 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev, ...@@ -29,16 +29,52 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8; int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
int ret; int ret;
bool unmap = false; bool unmap = false;
bool store_for_later = false;
int x2, y2;
unsigned long flags;
obj = mfbdev->mfb.obj; obj = mfbdev->mfb.obj;
bo = gem_to_mga_bo(obj); bo = gem_to_mga_bo(obj);
/*
* try and reserve the BO, if we fail with busy
* then the BO is being moved and we should
* store up the damage until later.
*/
ret = mgag200_bo_reserve(bo, true); ret = mgag200_bo_reserve(bo, true);
if (ret) { if (ret) {
DRM_ERROR("failed to reserve fb bo\n"); if (ret != -EBUSY)
return;
store_for_later = true;
}
x2 = x + width - 1;
y2 = y + height - 1;
spin_lock_irqsave(&mfbdev->dirty_lock, flags);
if (mfbdev->y1 < y)
y = mfbdev->y1;
if (mfbdev->y2 > y2)
y2 = mfbdev->y2;
if (mfbdev->x1 < x)
x = mfbdev->x1;
if (mfbdev->x2 > x2)
x2 = mfbdev->x2;
if (store_for_later) {
mfbdev->x1 = x;
mfbdev->x2 = x2;
mfbdev->y1 = y;
mfbdev->y2 = y2;
spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
return; return;
} }
mfbdev->x1 = mfbdev->y1 = INT_MAX;
mfbdev->x2 = mfbdev->y2 = 0;
spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
if (!bo->kmap.virtual) { if (!bo->kmap.virtual) {
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) { if (ret) {
...@@ -48,10 +84,10 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev, ...@@ -48,10 +84,10 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
} }
unmap = true; unmap = true;
} }
for (i = y; i < y + height; i++) { for (i = y; i <= y2; i++) {
/* assume equal stride for now */ /* assume equal stride for now */
src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp); src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp); memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);
} }
if (unmap) if (unmap)
...@@ -252,6 +288,7 @@ int mgag200_fbdev_init(struct mga_device *mdev) ...@@ -252,6 +288,7 @@ int mgag200_fbdev_init(struct mga_device *mdev)
mdev->mfbdev = mfbdev; mdev->mfbdev = mfbdev;
mfbdev->helper.funcs = &mga_fb_helper_funcs; mfbdev->helper.funcs = &mga_fb_helper_funcs;
spin_lock_init(&mfbdev->dirty_lock);
ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
mdev->num_crtc, MGAG200FB_CONN_LIMIT); mdev->num_crtc, MGAG200FB_CONN_LIMIT);
......
...@@ -315,8 +315,8 @@ int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait) ...@@ -315,8 +315,8 @@ int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
if (ret) { if (ret) {
if (ret != -ERESTARTSYS) if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo); DRM_ERROR("reserve failed %p %d\n", bo, ret);
return ret; return ret;
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment