Linux Audio

Check our new training course

Loading...
v3.1
  1/**************************************************************************
  2 *
  3 * Copyright © 2007 David Airlie
  4 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 **************************************************************************/
 28
 29#include "drmP.h"
 
 
 30#include "vmwgfx_drv.h"
 
 31
 32#include "ttm/ttm_placement.h"
 33
 34#define VMW_DIRTY_DELAY (HZ / 30)
 35
 36struct vmw_fb_par {
 37	struct vmw_private *vmw_priv;
 38
 39	void *vmalloc;
 40
 
 41	struct vmw_dma_buffer *vmw_bo;
 42	struct ttm_bo_kmap_obj map;
 
 
 
 
 
 43
 44	u32 pseudo_palette[17];
 45
 46	unsigned depth;
 47	unsigned bpp;
 48
 49	unsigned max_width;
 50	unsigned max_height;
 51
 52	void *bo_ptr;
 53	unsigned bo_size;
 54	bool bo_iowrite;
 55
 56	struct {
 57		spinlock_t lock;
 58		bool active;
 59		unsigned x1;
 60		unsigned y1;
 61		unsigned x2;
 62		unsigned y2;
 63	} dirty;
 
 
 
 
 64};
 65
 66static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
 67			    unsigned blue, unsigned transp,
 68			    struct fb_info *info)
 69{
 70	struct vmw_fb_par *par = info->par;
 71	u32 *pal = par->pseudo_palette;
 72
 73	if (regno > 15) {
 74		DRM_ERROR("Bad regno %u.\n", regno);
 75		return 1;
 76	}
 77
 78	switch (par->depth) {
 79	case 24:
 80	case 32:
 81		pal[regno] = ((red & 0xff00) << 8) |
 82			      (green & 0xff00) |
 83			     ((blue  & 0xff00) >> 8);
 84		break;
 85	default:
 86		DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
 
 
 87		return 1;
 88	}
 89
 90	return 0;
 91}
 92
 93static int vmw_fb_check_var(struct fb_var_screeninfo *var,
 94			    struct fb_info *info)
 95{
 96	int depth = var->bits_per_pixel;
 97	struct vmw_fb_par *par = info->par;
 98	struct vmw_private *vmw_priv = par->vmw_priv;
 99
100	switch (var->bits_per_pixel) {
101	case 32:
102		depth = (var->transp.length > 0) ? 32 : 24;
103		break;
104	default:
105		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
106		return -EINVAL;
107	}
108
109	switch (depth) {
110	case 24:
111		var->red.offset = 16;
112		var->green.offset = 8;
113		var->blue.offset = 0;
114		var->red.length = 8;
115		var->green.length = 8;
116		var->blue.length = 8;
117		var->transp.length = 0;
118		var->transp.offset = 0;
119		break;
120	case 32:
121		var->red.offset = 16;
122		var->green.offset = 8;
123		var->blue.offset = 0;
124		var->red.length = 8;
125		var->green.length = 8;
126		var->blue.length = 8;
127		var->transp.length = 8;
128		var->transp.offset = 24;
129		break;
130	default:
131		DRM_ERROR("Bad depth %u.\n", depth);
132		return -EINVAL;
133	}
134
135	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
136	    (var->xoffset != 0 || var->yoffset != 0)) {
137		DRM_ERROR("Can not handle panning without display topology\n");
138		return -EINVAL;
139	}
140
141	if ((var->xoffset + var->xres) > par->max_width ||
142	    (var->yoffset + var->yres) > par->max_height) {
143		DRM_ERROR("Requested geom can not fit in framebuffer\n");
144		return -EINVAL;
145	}
146
147	if (!vmw_kms_validate_mode_vram(vmw_priv,
148					info->fix.line_length,
149					var->yoffset + var->yres)) {
150		DRM_ERROR("Requested geom can not fit in framebuffer\n");
151		return -EINVAL;
152	}
153
154	return 0;
155}
156
157static int vmw_fb_set_par(struct fb_info *info)
158{
159	struct vmw_fb_par *par = info->par;
160	struct vmw_private *vmw_priv = par->vmw_priv;
161
162	vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
163			   info->fix.line_length,
164			   par->bpp, par->depth);
165	if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
166		/* TODO check if pitch and offset changes */
167		vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
168		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
169		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
170		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
171		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
172		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
173		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
174		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
175	}
176
177	/* This is really helpful since if this fails the user
178	 * can probably not see anything on the screen.
179	 */
180	WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
181
182	return 0;
183}
184
185static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
186			      struct fb_info *info)
187{
188	return 0;
189}
190
191static int vmw_fb_blank(int blank, struct fb_info *info)
192{
193	return 0;
194}
195
196/*
197 * Dirty code
 
 
 
 
 
 
 
 
198 */
199
200static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
201{
 
 
202	struct vmw_private *vmw_priv = par->vmw_priv;
203	struct fb_info *info = vmw_priv->fb_info;
204	int stride = (info->fix.line_length / 4);
205	int *src = (int *)info->screen_base;
206	__le32 __iomem *vram_mem = par->bo_ptr;
207	unsigned long flags;
208	unsigned x, y, w, h;
209	int i, k;
210	struct {
211		uint32_t header;
212		SVGAFifoCmdUpdate body;
213	} *cmd;
214
215	if (vmw_priv->suspended)
216		return;
217
218	spin_lock_irqsave(&par->dirty.lock, flags);
 
 
 
 
 
 
 
 
 
 
 
219	if (!par->dirty.active) {
220		spin_unlock_irqrestore(&par->dirty.lock, flags);
221		return;
222	}
223	x = par->dirty.x1;
224	y = par->dirty.y1;
225	w = min(par->dirty.x2, info->var.xres) - x;
226	h = min(par->dirty.y2, info->var.yres) - y;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227	par->dirty.x1 = par->dirty.x2 = 0;
228	par->dirty.y1 = par->dirty.y2 = 0;
229	spin_unlock_irqrestore(&par->dirty.lock, flags);
230
231	for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
232		for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
233			iowrite32(src[k], vram_mem + k);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234	}
235
236#if 0
237	DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
238#endif
239
240	cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
241	if (unlikely(cmd == NULL)) {
242		DRM_ERROR("Fifo reserve failed.\n");
243		return;
244	}
245
246	cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
247	cmd->body.x = cpu_to_le32(x);
248	cmd->body.y = cpu_to_le32(y);
249	cmd->body.width = cpu_to_le32(w);
250	cmd->body.height = cpu_to_le32(h);
251	vmw_fifo_commit(vmw_priv, sizeof(*cmd));
252}
253
254static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
255			      unsigned x1, unsigned y1,
256			      unsigned width, unsigned height)
257{
258	struct fb_info *info = par->vmw_priv->fb_info;
259	unsigned long flags;
260	unsigned x2 = x1 + width;
261	unsigned y2 = y1 + height;
262
263	spin_lock_irqsave(&par->dirty.lock, flags);
264	if (par->dirty.x1 == par->dirty.x2) {
265		par->dirty.x1 = x1;
266		par->dirty.y1 = y1;
267		par->dirty.x2 = x2;
268		par->dirty.y2 = y2;
269		/* if we are active start the dirty work
270		 * we share the work with the defio system */
271		if (par->dirty.active)
272			schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
 
273	} else {
274		if (x1 < par->dirty.x1)
275			par->dirty.x1 = x1;
276		if (y1 < par->dirty.y1)
277			par->dirty.y1 = y1;
278		if (x2 > par->dirty.x2)
279			par->dirty.x2 = x2;
280		if (y2 > par->dirty.y2)
281			par->dirty.y2 = y2;
282	}
283	spin_unlock_irqrestore(&par->dirty.lock, flags);
284}
285
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
286static void vmw_deferred_io(struct fb_info *info,
287			    struct list_head *pagelist)
288{
289	struct vmw_fb_par *par = info->par;
290	unsigned long start, end, min, max;
291	unsigned long flags;
292	struct page *page;
293	int y1, y2;
294
295	min = ULONG_MAX;
296	max = 0;
297	list_for_each_entry(page, pagelist, lru) {
298		start = page->index << PAGE_SHIFT;
299		end = start + PAGE_SIZE - 1;
300		min = min(min, start);
301		max = max(max, end);
302	}
303
304	if (min < max) {
305		y1 = min / info->fix.line_length;
306		y2 = (max / info->fix.line_length) + 1;
307
308		spin_lock_irqsave(&par->dirty.lock, flags);
309		par->dirty.x1 = 0;
310		par->dirty.y1 = y1;
311		par->dirty.x2 = info->var.xres;
312		par->dirty.y2 = y2;
313		spin_unlock_irqrestore(&par->dirty.lock, flags);
314	}
315
316	vmw_fb_dirty_flush(par);
 
 
 
 
 
 
317};
318
319struct fb_deferred_io vmw_defio = {
320	.delay		= VMW_DIRTY_DELAY,
321	.deferred_io	= vmw_deferred_io,
322};
323
324/*
325 * Draw code
326 */
327
328static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
329{
330	cfb_fillrect(info, rect);
331	vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
332			  rect->width, rect->height);
333}
334
335static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
336{
337	cfb_copyarea(info, region);
338	vmw_fb_dirty_mark(info->par, region->dx, region->dy,
339			  region->width, region->height);
340}
341
342static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
343{
344	cfb_imageblit(info, image);
345	vmw_fb_dirty_mark(info->par, image->dx, image->dy,
346			  image->width, image->height);
347}
348
349/*
350 * Bring up code
351 */
352
353static struct fb_ops vmw_fb_ops = {
354	.owner = THIS_MODULE,
355	.fb_check_var = vmw_fb_check_var,
356	.fb_set_par = vmw_fb_set_par,
357	.fb_setcolreg = vmw_fb_setcolreg,
358	.fb_fillrect = vmw_fb_fillrect,
359	.fb_copyarea = vmw_fb_copyarea,
360	.fb_imageblit = vmw_fb_imageblit,
361	.fb_pan_display = vmw_fb_pan_display,
362	.fb_blank = vmw_fb_blank,
363};
364
365static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
366			    size_t size, struct vmw_dma_buffer **out)
367{
368	struct vmw_dma_buffer *vmw_bo;
369	struct ttm_placement ne_placement = vmw_vram_ne_placement;
370	int ret;
371
372	ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
373
374	/* interuptable? */
375	ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
376	if (unlikely(ret != 0))
377		return ret;
378
379	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
380	if (!vmw_bo)
 
381		goto err_unlock;
 
382
383	ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
384			      &ne_placement,
385			      false,
386			      &vmw_dmabuf_bo_free);
387	if (unlikely(ret != 0))
388		goto err_unlock; /* init frees the buffer on failure */
389
390	*out = vmw_bo;
391
392	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
393
394	return 0;
395
396err_unlock:
397	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
398	return ret;
399}
400
 
 
 
 
 
 
 
 
 
 
 
 
 
401int vmw_fb_init(struct vmw_private *vmw_priv)
402{
403	struct device *device = &vmw_priv->dev->pdev->dev;
404	struct vmw_fb_par *par;
405	struct fb_info *info;
406	unsigned initial_width, initial_height;
407	unsigned fb_width, fb_height;
408	unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
 
409	int ret;
410
411	/* XXX These shouldn't be hardcoded. */
412	initial_width = 800;
413	initial_height = 600;
414
415	fb_bbp = 32;
416	fb_depth = 24;
417
418	/* XXX As shouldn't these be as well. */
419	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
420	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
421
422	initial_width = min(fb_width, initial_width);
423	initial_height = min(fb_height, initial_height);
424
425	fb_pitch = fb_width * fb_bbp / 8;
426	fb_size = fb_pitch * fb_height;
427	fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
428
429	info = framebuffer_alloc(sizeof(*par), device);
430	if (!info)
431		return -ENOMEM;
432
433	/*
434	 * Par
435	 */
436	vmw_priv->fb_info = info;
437	par = info->par;
 
 
438	par->vmw_priv = vmw_priv;
439	par->depth = fb_depth;
440	par->bpp = fb_bbp;
441	par->vmalloc = NULL;
442	par->max_width = fb_width;
443	par->max_height = fb_height;
444
 
 
 
 
 
 
 
 
 
445	/*
446	 * Create buffers and alloc memory
447	 */
448	par->vmalloc = vmalloc(fb_size);
449	if (unlikely(par->vmalloc == NULL)) {
450		ret = -ENOMEM;
451		goto err_free;
452	}
453
454	ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
455	if (unlikely(ret != 0))
456		goto err_free;
457
458	ret = ttm_bo_kmap(&par->vmw_bo->base,
459			  0,
460			  par->vmw_bo->base.num_pages,
461			  &par->map);
462	if (unlikely(ret != 0))
463		goto err_unref;
464	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
465	par->bo_size = fb_size;
466
467	/*
468	 * Fixed and var
469	 */
470	strcpy(info->fix.id, "svgadrmfb");
471	info->fix.type = FB_TYPE_PACKED_PIXELS;
472	info->fix.visual = FB_VISUAL_TRUECOLOR;
473	info->fix.type_aux = 0;
474	info->fix.xpanstep = 1; /* doing it in hw */
475	info->fix.ypanstep = 1; /* doing it in hw */
476	info->fix.ywrapstep = 0;
477	info->fix.accel = FB_ACCEL_NONE;
478	info->fix.line_length = fb_pitch;
479
480	info->fix.smem_start = 0;
481	info->fix.smem_len = fb_size;
482
483	info->pseudo_palette = par->pseudo_palette;
484	info->screen_base = par->vmalloc;
485	info->screen_size = fb_size;
486
487	info->flags = FBINFO_DEFAULT;
488	info->fbops = &vmw_fb_ops;
489
490	/* 24 depth per default */
491	info->var.red.offset = 16;
492	info->var.green.offset = 8;
493	info->var.blue.offset = 0;
494	info->var.red.length = 8;
495	info->var.green.length = 8;
496	info->var.blue.length = 8;
497	info->var.transp.offset = 0;
498	info->var.transp.length = 0;
499
500	info->var.xres_virtual = fb_width;
501	info->var.yres_virtual = fb_height;
502	info->var.bits_per_pixel = par->bpp;
503	info->var.xoffset = 0;
504	info->var.yoffset = 0;
505	info->var.activate = FB_ACTIVATE_NOW;
506	info->var.height = -1;
507	info->var.width = -1;
508
509	info->var.xres = initial_width;
510	info->var.yres = initial_height;
511
512#if 0
513	info->pixmap.size = 64*1024;
514	info->pixmap.buf_align = 8;
515	info->pixmap.access_align = 32;
516	info->pixmap.flags = FB_PIXMAP_SYSTEM;
517	info->pixmap.scan_align = 1;
518#else
519	info->pixmap.size = 0;
520	info->pixmap.buf_align = 8;
521	info->pixmap.access_align = 32;
522	info->pixmap.flags = FB_PIXMAP_SYSTEM;
523	info->pixmap.scan_align = 1;
524#endif
525
526	info->apertures = alloc_apertures(1);
527	if (!info->apertures) {
528		ret = -ENOMEM;
529		goto err_aper;
530	}
531	info->apertures->ranges[0].base = vmw_priv->vram_start;
532	info->apertures->ranges[0].size = vmw_priv->vram_size;
533
534	/*
535	 * Dirty & Deferred IO
536	 */
537	par->dirty.x1 = par->dirty.x2 = 0;
538	par->dirty.y1 = par->dirty.y2 = 0;
539	par->dirty.active = true;
540	spin_lock_init(&par->dirty.lock);
 
541	info->fbdefio = &vmw_defio;
542	fb_deferred_io_init(info);
543
544	ret = register_framebuffer(info);
545	if (unlikely(ret != 0))
546		goto err_defio;
547
 
 
548	return 0;
549
550err_defio:
551	fb_deferred_io_cleanup(info);
552err_aper:
553	ttm_bo_kunmap(&par->map);
554err_unref:
555	ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
556err_free:
557	vfree(par->vmalloc);
 
558	framebuffer_release(info);
559	vmw_priv->fb_info = NULL;
560
561	return ret;
562}
563
564int vmw_fb_close(struct vmw_private *vmw_priv)
565{
566	struct fb_info *info;
567	struct vmw_fb_par *par;
568	struct ttm_buffer_object *bo;
569
570	if (!vmw_priv->fb_info)
571		return 0;
572
573	info = vmw_priv->fb_info;
574	par = info->par;
575	bo = &par->vmw_bo->base;
576	par->vmw_bo = NULL;
577
578	/* ??? order */
579	fb_deferred_io_cleanup(info);
 
580	unregister_framebuffer(info);
581
582	ttm_bo_kunmap(&par->map);
583	ttm_bo_unref(&bo);
 
584
585	vfree(par->vmalloc);
586	framebuffer_release(info);
587
588	return 0;
589}
590
591int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
592			 struct vmw_dma_buffer *vmw_bo)
593{
594	struct ttm_buffer_object *bo = &vmw_bo->base;
595	int ret = 0;
596
597	ret = ttm_bo_reserve(bo, false, false, false, 0);
598	if (unlikely(ret != 0))
599		return ret;
600
601	ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
602	ttm_bo_unreserve(bo);
603
604	return ret;
605}
606
607int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
608				struct vmw_dma_buffer *vmw_bo)
609{
610	struct ttm_buffer_object *bo = &vmw_bo->base;
611	struct ttm_placement ne_placement = vmw_vram_ne_placement;
612	int ret = 0;
613
614	ne_placement.lpfn = bo->num_pages;
615
616	/* interuptable? */
617	ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
618	if (unlikely(ret != 0))
619		return ret;
620
621	ret = ttm_bo_reserve(bo, false, false, false, 0);
622	if (unlikely(ret != 0))
623		goto err_unlock;
624
625	if (bo->mem.mem_type == TTM_PL_VRAM &&
626	    bo->mem.start < bo->num_pages &&
627	    bo->mem.start > 0)
628		(void) ttm_bo_validate(bo, &vmw_sys_placement, false,
629				       false, false);
630
631	ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
632
633	/* Could probably bug on */
634	WARN_ON(bo->offset != 0);
635
636	ttm_bo_unreserve(bo);
637err_unlock:
638	ttm_write_unlock(&vmw_priv->active_master->lock);
639
640	return ret;
641}
642
643int vmw_fb_off(struct vmw_private *vmw_priv)
644{
645	struct fb_info *info;
646	struct vmw_fb_par *par;
647	unsigned long flags;
648
649	if (!vmw_priv->fb_info)
650		return -EINVAL;
651
652	info = vmw_priv->fb_info;
653	par = info->par;
654
655	spin_lock_irqsave(&par->dirty.lock, flags);
656	par->dirty.active = false;
657	spin_unlock_irqrestore(&par->dirty.lock, flags);
658
659	flush_delayed_work_sync(&info->deferred_work);
660
661	par->bo_ptr = NULL;
662	ttm_bo_kunmap(&par->map);
663
664	vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
665
666	return 0;
667}
668
669int vmw_fb_on(struct vmw_private *vmw_priv)
670{
671	struct fb_info *info;
672	struct vmw_fb_par *par;
673	unsigned long flags;
674	bool dummy;
675	int ret;
676
677	if (!vmw_priv->fb_info)
678		return -EINVAL;
679
680	info = vmw_priv->fb_info;
681	par = info->par;
682
683	/* we are already active */
684	if (par->bo_ptr != NULL)
685		return 0;
686
687	/* Make sure that all overlays are stoped when we take over */
688	vmw_overlay_stop_all(vmw_priv);
689
690	ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
691	if (unlikely(ret != 0)) {
692		DRM_ERROR("could not move buffer to start of VRAM\n");
693		goto err_no_buffer;
694	}
695
696	ret = ttm_bo_kmap(&par->vmw_bo->base,
697			  0,
698			  par->vmw_bo->base.num_pages,
699			  &par->map);
700	BUG_ON(ret != 0);
701	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
702
703	spin_lock_irqsave(&par->dirty.lock, flags);
704	par->dirty.active = true;
705	spin_unlock_irqrestore(&par->dirty.lock, flags);
706
707err_no_buffer:
708	vmw_fb_set_par(info);
709
710	vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
711
712	/* If there already was stuff dirty we wont
713	 * schedule a new work, so lets do it now */
714	schedule_delayed_work(&info->deferred_work, 0);
715
716	return 0;
717}
v4.17
  1/**************************************************************************
  2 *
  3 * Copyright © 2007 David Airlie
  4 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 **************************************************************************/
 28
 29#include <linux/export.h>
 30
 31#include <drm/drmP.h>
 32#include "vmwgfx_drv.h"
 33#include "vmwgfx_kms.h"
 34
 35#include <drm/ttm/ttm_placement.h>
 36
 37#define VMW_DIRTY_DELAY (HZ / 30)
 38
 39struct vmw_fb_par {
 40	struct vmw_private *vmw_priv;
 41
 42	void *vmalloc;
 43
 44	struct mutex bo_mutex;
 45	struct vmw_dma_buffer *vmw_bo;
 46	unsigned bo_size;
 47	struct drm_framebuffer *set_fb;
 48	struct drm_display_mode *set_mode;
 49	u32 fb_x;
 50	u32 fb_y;
 51	bool bo_iowrite;
 52
 53	u32 pseudo_palette[17];
 54
 
 
 
 55	unsigned max_width;
 56	unsigned max_height;
 57
 
 
 
 
 58	struct {
 59		spinlock_t lock;
 60		bool active;
 61		unsigned x1;
 62		unsigned y1;
 63		unsigned x2;
 64		unsigned y2;
 65	} dirty;
 66
 67	struct drm_crtc *crtc;
 68	struct drm_connector *con;
 69	struct delayed_work local_work;
 70};
 71
 72static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
 73			    unsigned blue, unsigned transp,
 74			    struct fb_info *info)
 75{
 76	struct vmw_fb_par *par = info->par;
 77	u32 *pal = par->pseudo_palette;
 78
 79	if (regno > 15) {
 80		DRM_ERROR("Bad regno %u.\n", regno);
 81		return 1;
 82	}
 83
 84	switch (par->set_fb->format->depth) {
 85	case 24:
 86	case 32:
 87		pal[regno] = ((red & 0xff00) << 8) |
 88			      (green & 0xff00) |
 89			     ((blue  & 0xff00) >> 8);
 90		break;
 91	default:
 92		DRM_ERROR("Bad depth %u, bpp %u.\n",
 93			  par->set_fb->format->depth,
 94			  par->set_fb->format->cpp[0] * 8);
 95		return 1;
 96	}
 97
 98	return 0;
 99}
100
101static int vmw_fb_check_var(struct fb_var_screeninfo *var,
102			    struct fb_info *info)
103{
104	int depth = var->bits_per_pixel;
105	struct vmw_fb_par *par = info->par;
106	struct vmw_private *vmw_priv = par->vmw_priv;
107
108	switch (var->bits_per_pixel) {
109	case 32:
110		depth = (var->transp.length > 0) ? 32 : 24;
111		break;
112	default:
113		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
114		return -EINVAL;
115	}
116
117	switch (depth) {
118	case 24:
119		var->red.offset = 16;
120		var->green.offset = 8;
121		var->blue.offset = 0;
122		var->red.length = 8;
123		var->green.length = 8;
124		var->blue.length = 8;
125		var->transp.length = 0;
126		var->transp.offset = 0;
127		break;
128	case 32:
129		var->red.offset = 16;
130		var->green.offset = 8;
131		var->blue.offset = 0;
132		var->red.length = 8;
133		var->green.length = 8;
134		var->blue.length = 8;
135		var->transp.length = 8;
136		var->transp.offset = 24;
137		break;
138	default:
139		DRM_ERROR("Bad depth %u.\n", depth);
140		return -EINVAL;
141	}
142
 
 
 
 
 
 
143	if ((var->xoffset + var->xres) > par->max_width ||
144	    (var->yoffset + var->yres) > par->max_height) {
145		DRM_ERROR("Requested geom can not fit in framebuffer\n");
146		return -EINVAL;
147	}
148
149	if (!vmw_kms_validate_mode_vram(vmw_priv,
150					var->xres * var->bits_per_pixel/8,
151					var->yoffset + var->yres)) {
152		DRM_ERROR("Requested geom can not fit in framebuffer\n");
153		return -EINVAL;
154	}
155
156	return 0;
157}
158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159static int vmw_fb_blank(int blank, struct fb_info *info)
160{
161	return 0;
162}
163
164/**
165 * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer
166 *
167 * @work: The struct work_struct associated with this task.
168 *
169 * This function flushes the dirty regions of the vmalloc framebuffer to the
170 * kms framebuffer, and if the kms framebuffer is visible, also updated the
171 * corresponding displays. Note that this function runs even if the kms
172 * framebuffer is not bound to a crtc and thus not visible, but it's turned
173 * off during hibernation using the par->dirty.active bool.
174 */
175static void vmw_fb_dirty_flush(struct work_struct *work)
 
176{
177	struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
178					      local_work.work);
179	struct vmw_private *vmw_priv = par->vmw_priv;
180	struct fb_info *info = vmw_priv->fb_info;
181	unsigned long irq_flags;
182	s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
183	u32 cpp, max_x, max_y;
184	struct drm_clip_rect clip;
185	struct drm_framebuffer *cur_fb;
186	u8 *src_ptr, *dst_ptr;
187	struct vmw_dma_buffer *vbo = par->vmw_bo;
188	void *virtual;
 
 
189
190	if (!READ_ONCE(par->dirty.active))
191		return;
192
193	mutex_lock(&par->bo_mutex);
194	cur_fb = par->set_fb;
195	if (!cur_fb)
196		goto out_unlock;
197
198	(void) ttm_read_lock(&vmw_priv->reservation_sem, false);
199	(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
200	virtual = vmw_dma_buffer_map_and_cache(vbo);
201	if (!virtual)
202		goto out_unreserve;
203
204	spin_lock_irqsave(&par->dirty.lock, irq_flags);
205	if (!par->dirty.active) {
206		spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
207		goto out_unreserve;
208	}
209
210	/*
211	 * Handle panning when copying from vmalloc to framebuffer.
212	 * Clip dirty area to framebuffer.
213	 */
214	cpp = cur_fb->format->cpp[0];
215	max_x = par->fb_x + cur_fb->width;
216	max_y = par->fb_y + cur_fb->height;
217
218	dst_x1 = par->dirty.x1 - par->fb_x;
219	dst_y1 = par->dirty.y1 - par->fb_y;
220	dst_x1 = max_t(s32, dst_x1, 0);
221	dst_y1 = max_t(s32, dst_y1, 0);
222
223	dst_x2 = par->dirty.x2 - par->fb_x;
224	dst_y2 = par->dirty.y2 - par->fb_y;
225	dst_x2 = min_t(s32, dst_x2, max_x);
226	dst_y2 = min_t(s32, dst_y2, max_y);
227	w = dst_x2 - dst_x1;
228	h = dst_y2 - dst_y1;
229	w = max_t(s32, 0, w);
230	h = max_t(s32, 0, h);
231
232	par->dirty.x1 = par->dirty.x2 = 0;
233	par->dirty.y1 = par->dirty.y2 = 0;
234	spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
235
236	if (w && h) {
237		dst_ptr = (u8 *)virtual  +
238			(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
239		src_ptr = (u8 *)par->vmalloc +
240			((dst_y1 + par->fb_y) * info->fix.line_length +
241			 (dst_x1 + par->fb_x) * cpp);
242
243		while (h-- > 0) {
244			memcpy(dst_ptr, src_ptr, w*cpp);
245			dst_ptr += par->set_fb->pitches[0];
246			src_ptr += info->fix.line_length;
247		}
248
249		clip.x1 = dst_x1;
250		clip.x2 = dst_x2;
251		clip.y1 = dst_y1;
252		clip.y2 = dst_y2;
253	}
254
255out_unreserve:
256	ttm_bo_unreserve(&vbo->base);
257	ttm_read_unlock(&vmw_priv->reservation_sem);
258	if (w && h) {
259		WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
260						       &clip, 1));
261		vmw_fifo_flush(vmw_priv, false);
262	}
263out_unlock:
264	mutex_unlock(&par->bo_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265}
266
267static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
268			      unsigned x1, unsigned y1,
269			      unsigned width, unsigned height)
270{
 
271	unsigned long flags;
272	unsigned x2 = x1 + width;
273	unsigned y2 = y1 + height;
274
275	spin_lock_irqsave(&par->dirty.lock, flags);
276	if (par->dirty.x1 == par->dirty.x2) {
277		par->dirty.x1 = x1;
278		par->dirty.y1 = y1;
279		par->dirty.x2 = x2;
280		par->dirty.y2 = y2;
281		/* if we are active start the dirty work
282		 * we share the work with the defio system */
283		if (par->dirty.active)
284			schedule_delayed_work(&par->local_work,
285					      VMW_DIRTY_DELAY);
286	} else {
287		if (x1 < par->dirty.x1)
288			par->dirty.x1 = x1;
289		if (y1 < par->dirty.y1)
290			par->dirty.y1 = y1;
291		if (x2 > par->dirty.x2)
292			par->dirty.x2 = x2;
293		if (y2 > par->dirty.y2)
294			par->dirty.y2 = y2;
295	}
296	spin_unlock_irqrestore(&par->dirty.lock, flags);
297}
298
299static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
300			      struct fb_info *info)
301{
302	struct vmw_fb_par *par = info->par;
303
304	if ((var->xoffset + var->xres) > var->xres_virtual ||
305	    (var->yoffset + var->yres) > var->yres_virtual) {
306		DRM_ERROR("Requested panning can not fit in framebuffer\n");
307		return -EINVAL;
308	}
309
310	mutex_lock(&par->bo_mutex);
311	par->fb_x = var->xoffset;
312	par->fb_y = var->yoffset;
313	if (par->set_fb)
314		vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
315				  par->set_fb->height);
316	mutex_unlock(&par->bo_mutex);
317
318	return 0;
319}
320
321static void vmw_deferred_io(struct fb_info *info,
322			    struct list_head *pagelist)
323{
324	struct vmw_fb_par *par = info->par;
325	unsigned long start, end, min, max;
326	unsigned long flags;
327	struct page *page;
328	int y1, y2;
329
330	min = ULONG_MAX;
331	max = 0;
332	list_for_each_entry(page, pagelist, lru) {
333		start = page->index << PAGE_SHIFT;
334		end = start + PAGE_SIZE - 1;
335		min = min(min, start);
336		max = max(max, end);
337	}
338
339	if (min < max) {
340		y1 = min / info->fix.line_length;
341		y2 = (max / info->fix.line_length) + 1;
342
343		spin_lock_irqsave(&par->dirty.lock, flags);
344		par->dirty.x1 = 0;
345		par->dirty.y1 = y1;
346		par->dirty.x2 = info->var.xres;
347		par->dirty.y2 = y2;
348		spin_unlock_irqrestore(&par->dirty.lock, flags);
 
349
350		/*
351		 * Since we've already waited on this work once, try to
352		 * execute asap.
353		 */
354		cancel_delayed_work(&par->local_work);
355		schedule_delayed_work(&par->local_work, 0);
356	}
357};
358
359static struct fb_deferred_io vmw_defio = {
360	.delay		= VMW_DIRTY_DELAY,
361	.deferred_io	= vmw_deferred_io,
362};
363
364/*
365 * Draw code
366 */
367
368static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
369{
370	cfb_fillrect(info, rect);
371	vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
372			  rect->width, rect->height);
373}
374
375static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
376{
377	cfb_copyarea(info, region);
378	vmw_fb_dirty_mark(info->par, region->dx, region->dy,
379			  region->width, region->height);
380}
381
382static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
383{
384	cfb_imageblit(info, image);
385	vmw_fb_dirty_mark(info->par, image->dx, image->dy,
386			  image->width, image->height);
387}
388
389/*
390 * Bring up code
391 */
392
 
 
 
 
 
 
 
 
 
 
 
 
393static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
394			    size_t size, struct vmw_dma_buffer **out)
395{
396	struct vmw_dma_buffer *vmw_bo;
 
397	int ret;
398
399	(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
 
 
 
 
 
400
401	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
402	if (!vmw_bo) {
403		ret = -ENOMEM;
404		goto err_unlock;
405	}
406
407	ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
408			      &vmw_sys_placement,
409			      false,
410			      &vmw_dmabuf_bo_free);
411	if (unlikely(ret != 0))
412		goto err_unlock; /* init frees the buffer on failure */
413
414	*out = vmw_bo;
415	ttm_write_unlock(&vmw_priv->reservation_sem);
 
416
417	return 0;
418
419err_unlock:
420	ttm_write_unlock(&vmw_priv->reservation_sem);
421	return ret;
422}
423
424static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
425				int *depth)
426{
427	switch (var->bits_per_pixel) {
428	case 32:
429		*depth = (var->transp.length > 0) ? 32 : 24;
430		break;
431	default:
432		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
433		return -EINVAL;
434	}
435
436	return 0;
437}
438
439static int vmwgfx_set_config_internal(struct drm_mode_set *set)
440{
441	struct drm_crtc *crtc = set->crtc;
442	struct drm_framebuffer *fb;
443	struct drm_crtc *tmp;
444	struct drm_device *dev = set->crtc->dev;
445	struct drm_modeset_acquire_ctx ctx;
446	int ret;
447
448	drm_modeset_acquire_init(&ctx, 0);
449
450restart:
451	/*
452	 * NOTE: ->set_config can also disable other crtcs (if we steal all
453	 * connectors from it), hence we need to refcount the fbs across all
454	 * crtcs. Atomic modeset will have saner semantics ...
455	 */
456	drm_for_each_crtc(tmp, dev)
457		tmp->primary->old_fb = tmp->primary->fb;
458
459	fb = set->fb;
460
461	ret = crtc->funcs->set_config(set, &ctx);
462	if (ret == 0) {
463		crtc->primary->crtc = crtc;
464		crtc->primary->fb = fb;
465	}
466
467	drm_for_each_crtc(tmp, dev) {
468		if (tmp->primary->fb)
469			drm_framebuffer_get(tmp->primary->fb);
470		if (tmp->primary->old_fb)
471			drm_framebuffer_put(tmp->primary->old_fb);
472		tmp->primary->old_fb = NULL;
473	}
474
475	if (ret == -EDEADLK) {
476		drm_modeset_backoff(&ctx);
477		goto restart;
478	}
479
480	drm_modeset_drop_locks(&ctx);
481	drm_modeset_acquire_fini(&ctx);
482
483	return ret;
484}
485
486static int vmw_fb_kms_detach(struct vmw_fb_par *par,
487			     bool detach_bo,
488			     bool unref_bo)
489{
490	struct drm_framebuffer *cur_fb = par->set_fb;
491	int ret;
492
493	/* Detach the KMS framebuffer from crtcs */
494	if (par->set_mode) {
495		struct drm_mode_set set;
496
497		set.crtc = par->crtc;
498		set.x = 0;
499		set.y = 0;
500		set.mode = NULL;
501		set.fb = NULL;
502		set.num_connectors = 0;
503		set.connectors = &par->con;
504		ret = vmwgfx_set_config_internal(&set);
505		if (ret) {
506			DRM_ERROR("Could not unset a mode.\n");
507			return ret;
508		}
509		drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
510		par->set_mode = NULL;
511	}
512
513	if (cur_fb) {
514		drm_framebuffer_put(cur_fb);
515		par->set_fb = NULL;
516	}
517
518	if (par->vmw_bo && detach_bo && unref_bo)
519		vmw_dmabuf_unreference(&par->vmw_bo);
520
521	return 0;
522}
523
524static int vmw_fb_kms_framebuffer(struct fb_info *info)
525{
526	struct drm_mode_fb_cmd2 mode_cmd;
527	struct vmw_fb_par *par = info->par;
528	struct fb_var_screeninfo *var = &info->var;
529	struct drm_framebuffer *cur_fb;
530	struct vmw_framebuffer *vfb;
531	int ret = 0, depth;
532	size_t new_bo_size;
533
534	ret = vmw_fb_compute_depth(var, &depth);
535	if (ret)
536		return ret;
537
538	mode_cmd.width = var->xres;
539	mode_cmd.height = var->yres;
540	mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
541	mode_cmd.pixel_format =
542		drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
543
544	cur_fb = par->set_fb;
545	if (cur_fb && cur_fb->width == mode_cmd.width &&
546	    cur_fb->height == mode_cmd.height &&
547	    cur_fb->format->format == mode_cmd.pixel_format &&
548	    cur_fb->pitches[0] == mode_cmd.pitches[0])
549		return 0;
550
551	/* Need new buffer object ? */
552	new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
553	ret = vmw_fb_kms_detach(par,
554				par->bo_size < new_bo_size ||
555				par->bo_size > 2*new_bo_size,
556				true);
557	if (ret)
558		return ret;
559
560	if (!par->vmw_bo) {
561		ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
562				       &par->vmw_bo);
563		if (ret) {
564			DRM_ERROR("Failed creating a buffer object for "
565				  "fbdev.\n");
566			return ret;
567		}
568		par->bo_size = new_bo_size;
569	}
570
571	vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
572				      true, &mode_cmd);
573	if (IS_ERR(vfb))
574		return PTR_ERR(vfb);
575
576	par->set_fb = &vfb->base;
577
578	return 0;
579}
580
581static int vmw_fb_set_par(struct fb_info *info)
582{
583	struct vmw_fb_par *par = info->par;
584	struct vmw_private *vmw_priv = par->vmw_priv;
585	struct drm_mode_set set;
586	struct fb_var_screeninfo *var = &info->var;
587	struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
588		DRM_MODE_TYPE_DRIVER,
589		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
590		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
591	};
592	struct drm_display_mode *old_mode;
593	struct drm_display_mode *mode;
594	int ret;
595
596	old_mode = par->set_mode;
597	mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
598	if (!mode) {
599		DRM_ERROR("Could not create new fb mode.\n");
600		return -ENOMEM;
601	}
602
603	mode->hdisplay = var->xres;
604	mode->vdisplay = var->yres;
605	vmw_guess_mode_timing(mode);
606
607	if (old_mode && drm_mode_equal(old_mode, mode)) {
608		drm_mode_destroy(vmw_priv->dev, mode);
609		mode = old_mode;
610		old_mode = NULL;
611	} else if (!vmw_kms_validate_mode_vram(vmw_priv,
612					mode->hdisplay *
613					DIV_ROUND_UP(var->bits_per_pixel, 8),
614					mode->vdisplay)) {
615		drm_mode_destroy(vmw_priv->dev, mode);
616		return -EINVAL;
617	}
618
619	mutex_lock(&par->bo_mutex);
620	ret = vmw_fb_kms_framebuffer(info);
621	if (ret)
622		goto out_unlock;
623
624	par->fb_x = var->xoffset;
625	par->fb_y = var->yoffset;
626
627	set.crtc = par->crtc;
628	set.x = 0;
629	set.y = 0;
630	set.mode = mode;
631	set.fb = par->set_fb;
632	set.num_connectors = 1;
633	set.connectors = &par->con;
634
635	ret = vmwgfx_set_config_internal(&set);
636	if (ret)
637		goto out_unlock;
638
639	vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
640			  par->set_fb->width, par->set_fb->height);
641
642	/* If there already was stuff dirty we wont
643	 * schedule a new work, so lets do it now */
644
645	schedule_delayed_work(&par->local_work, 0);
646
647out_unlock:
648	if (old_mode)
649		drm_mode_destroy(vmw_priv->dev, old_mode);
650	par->set_mode = mode;
651
652	mutex_unlock(&par->bo_mutex);
653
654	return ret;
655}
656
657
658static struct fb_ops vmw_fb_ops = {
659	.owner = THIS_MODULE,
660	.fb_check_var = vmw_fb_check_var,
661	.fb_set_par = vmw_fb_set_par,
662	.fb_setcolreg = vmw_fb_setcolreg,
663	.fb_fillrect = vmw_fb_fillrect,
664	.fb_copyarea = vmw_fb_copyarea,
665	.fb_imageblit = vmw_fb_imageblit,
666	.fb_pan_display = vmw_fb_pan_display,
667	.fb_blank = vmw_fb_blank,
668};
669
670int vmw_fb_init(struct vmw_private *vmw_priv)
671{
672	struct device *device = &vmw_priv->dev->pdev->dev;
673	struct vmw_fb_par *par;
674	struct fb_info *info;
 
675	unsigned fb_width, fb_height;
676	unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
677	struct drm_display_mode *init_mode;
678	int ret;
679
680	fb_bpp = 32;
 
 
 
 
681	fb_depth = 24;
682
683	/* XXX As shouldn't these be as well. */
684	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
685	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
686
687	fb_pitch = fb_width * fb_bpp / 8;
 
 
 
688	fb_size = fb_pitch * fb_height;
689	fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
690
691	info = framebuffer_alloc(sizeof(*par), device);
692	if (!info)
693		return -ENOMEM;
694
695	/*
696	 * Par
697	 */
698	vmw_priv->fb_info = info;
699	par = info->par;
700	memset(par, 0, sizeof(*par));
701	INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
702	par->vmw_priv = vmw_priv;
 
 
703	par->vmalloc = NULL;
704	par->max_width = fb_width;
705	par->max_height = fb_height;
706
707	ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
708				      par->max_height, &par->con,
709				      &par->crtc, &init_mode);
710	if (ret)
711		goto err_kms;
712
713	info->var.xres = init_mode->hdisplay;
714	info->var.yres = init_mode->vdisplay;
715
716	/*
717	 * Create buffers and alloc memory
718	 */
719	par->vmalloc = vzalloc(fb_size);
720	if (unlikely(par->vmalloc == NULL)) {
721		ret = -ENOMEM;
722		goto err_free;
723	}
724
 
 
 
 
 
 
 
 
 
 
 
 
 
725	/*
726	 * Fixed and var
727	 */
728	strcpy(info->fix.id, "svgadrmfb");
729	info->fix.type = FB_TYPE_PACKED_PIXELS;
730	info->fix.visual = FB_VISUAL_TRUECOLOR;
731	info->fix.type_aux = 0;
732	info->fix.xpanstep = 1; /* doing it in hw */
733	info->fix.ypanstep = 1; /* doing it in hw */
734	info->fix.ywrapstep = 0;
735	info->fix.accel = FB_ACCEL_NONE;
736	info->fix.line_length = fb_pitch;
737
738	info->fix.smem_start = 0;
739	info->fix.smem_len = fb_size;
740
741	info->pseudo_palette = par->pseudo_palette;
742	info->screen_base = (char __iomem *)par->vmalloc;
743	info->screen_size = fb_size;
744
 
745	info->fbops = &vmw_fb_ops;
746
747	/* 24 depth per default */
748	info->var.red.offset = 16;
749	info->var.green.offset = 8;
750	info->var.blue.offset = 0;
751	info->var.red.length = 8;
752	info->var.green.length = 8;
753	info->var.blue.length = 8;
754	info->var.transp.offset = 0;
755	info->var.transp.length = 0;
756
757	info->var.xres_virtual = fb_width;
758	info->var.yres_virtual = fb_height;
759	info->var.bits_per_pixel = fb_bpp;
760	info->var.xoffset = 0;
761	info->var.yoffset = 0;
762	info->var.activate = FB_ACTIVATE_NOW;
763	info->var.height = -1;
764	info->var.width = -1;
765
766	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
767	info->apertures = alloc_apertures(1);
768	if (!info->apertures) {
769		ret = -ENOMEM;
770		goto err_aper;
771	}
772	info->apertures->ranges[0].base = vmw_priv->vram_start;
773	info->apertures->ranges[0].size = vmw_priv->vram_size;
774
775	/*
776	 * Dirty & Deferred IO
777	 */
778	par->dirty.x1 = par->dirty.x2 = 0;
779	par->dirty.y1 = par->dirty.y2 = 0;
780	par->dirty.active = true;
781	spin_lock_init(&par->dirty.lock);
782	mutex_init(&par->bo_mutex);
783	info->fbdefio = &vmw_defio;
784	fb_deferred_io_init(info);
785
786	ret = register_framebuffer(info);
787	if (unlikely(ret != 0))
788		goto err_defio;
789
790	vmw_fb_set_par(info);
791
792	return 0;
793
794err_defio:
795	fb_deferred_io_cleanup(info);
796err_aper:
 
 
 
797err_free:
798	vfree(par->vmalloc);
799err_kms:
800	framebuffer_release(info);
801	vmw_priv->fb_info = NULL;
802
803	return ret;
804}
805
806int vmw_fb_close(struct vmw_private *vmw_priv)
807{
808	struct fb_info *info;
809	struct vmw_fb_par *par;
 
810
811	if (!vmw_priv->fb_info)
812		return 0;
813
814	info = vmw_priv->fb_info;
815	par = info->par;
 
 
816
817	/* ??? order */
818	fb_deferred_io_cleanup(info);
819	cancel_delayed_work_sync(&par->local_work);
820	unregister_framebuffer(info);
821
822	mutex_lock(&par->bo_mutex);
823	(void) vmw_fb_kms_detach(par, true, true);
824	mutex_unlock(&par->bo_mutex);
825
826	vfree(par->vmalloc);
827	framebuffer_release(info);
828
829	return 0;
830}
831
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
832int vmw_fb_off(struct vmw_private *vmw_priv)
833{
834	struct fb_info *info;
835	struct vmw_fb_par *par;
836	unsigned long flags;
837
838	if (!vmw_priv->fb_info)
839		return -EINVAL;
840
841	info = vmw_priv->fb_info;
842	par = info->par;
843
844	spin_lock_irqsave(&par->dirty.lock, flags);
845	par->dirty.active = false;
846	spin_unlock_irqrestore(&par->dirty.lock, flags);
847
848	flush_delayed_work(&info->deferred_work);
849	flush_delayed_work(&par->local_work);
 
 
 
 
850
851	return 0;
852}
853
854int vmw_fb_on(struct vmw_private *vmw_priv)
855{
856	struct fb_info *info;
857	struct vmw_fb_par *par;
858	unsigned long flags;
 
 
859
860	if (!vmw_priv->fb_info)
861		return -EINVAL;
862
863	info = vmw_priv->fb_info;
864	par = info->par;
865
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
866	spin_lock_irqsave(&par->dirty.lock, flags);
867	par->dirty.active = true;
868	spin_unlock_irqrestore(&par->dirty.lock, flags);
869
870	/*
871	 * Need to reschedule a dirty update, because otherwise that's
872	 * only done in dirty_mark() if the previous coalesced
873	 * dirty region was empty.
874	 */
875	schedule_delayed_work(&par->local_work, 0);
 
 
876
877	return 0;
878}