Linux Audio

Check our new training course

Loading...
v4.6
  1/**************************************************************************
  2 *
  3 * Copyright © 2007 David Airlie
  4 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 **************************************************************************/
 28
 29#include <linux/export.h>
 30
 31#include <drm/drmP.h>
 32#include "vmwgfx_drv.h"
 33#include "vmwgfx_kms.h"
 34
 35#include <drm/ttm/ttm_placement.h>
 36
 37#define VMW_DIRTY_DELAY (HZ / 30)
 38
 39struct vmw_fb_par {
 40	struct vmw_private *vmw_priv;
 41
 42	void *vmalloc;
 43
 44	struct mutex bo_mutex;
 45	struct vmw_dma_buffer *vmw_bo;
 46	struct ttm_bo_kmap_obj map;
 47	void *bo_ptr;
 48	unsigned bo_size;
 49	struct drm_framebuffer *set_fb;
 50	struct drm_display_mode *set_mode;
 51	u32 fb_x;
 52	u32 fb_y;
 53	bool bo_iowrite;
 54
 55	u32 pseudo_palette[17];
 56
 57	unsigned max_width;
 58	unsigned max_height;
 59
 60	struct {
 61		spinlock_t lock;
 62		bool active;
 63		unsigned x1;
 64		unsigned y1;
 65		unsigned x2;
 66		unsigned y2;
 67	} dirty;
 68
 69	struct drm_crtc *crtc;
 70	struct drm_connector *con;
 71	struct delayed_work local_work;
 72};
 73
 74static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
 75			    unsigned blue, unsigned transp,
 76			    struct fb_info *info)
 77{
 78	struct vmw_fb_par *par = info->par;
 79	u32 *pal = par->pseudo_palette;
 80
 81	if (regno > 15) {
 82		DRM_ERROR("Bad regno %u.\n", regno);
 83		return 1;
 84	}
 85
 86	switch (par->set_fb->depth) {
 87	case 24:
 88	case 32:
 89		pal[regno] = ((red & 0xff00) << 8) |
 90			      (green & 0xff00) |
 91			     ((blue  & 0xff00) >> 8);
 92		break;
 93	default:
 94		DRM_ERROR("Bad depth %u, bpp %u.\n", par->set_fb->depth,
 95			  par->set_fb->bits_per_pixel);
 
 96		return 1;
 97	}
 98
 99	return 0;
100}
101
102static int vmw_fb_check_var(struct fb_var_screeninfo *var,
103			    struct fb_info *info)
104{
105	int depth = var->bits_per_pixel;
106	struct vmw_fb_par *par = info->par;
107	struct vmw_private *vmw_priv = par->vmw_priv;
108
109	switch (var->bits_per_pixel) {
110	case 32:
111		depth = (var->transp.length > 0) ? 32 : 24;
112		break;
113	default:
114		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
115		return -EINVAL;
116	}
117
118	switch (depth) {
119	case 24:
120		var->red.offset = 16;
121		var->green.offset = 8;
122		var->blue.offset = 0;
123		var->red.length = 8;
124		var->green.length = 8;
125		var->blue.length = 8;
126		var->transp.length = 0;
127		var->transp.offset = 0;
128		break;
129	case 32:
130		var->red.offset = 16;
131		var->green.offset = 8;
132		var->blue.offset = 0;
133		var->red.length = 8;
134		var->green.length = 8;
135		var->blue.length = 8;
136		var->transp.length = 8;
137		var->transp.offset = 24;
138		break;
139	default:
140		DRM_ERROR("Bad depth %u.\n", depth);
141		return -EINVAL;
142	}
143
144	if ((var->xoffset + var->xres) > par->max_width ||
145	    (var->yoffset + var->yres) > par->max_height) {
146		DRM_ERROR("Requested geom can not fit in framebuffer\n");
147		return -EINVAL;
148	}
149
150	if (!vmw_kms_validate_mode_vram(vmw_priv,
151					var->xres * var->bits_per_pixel/8,
152					var->yoffset + var->yres)) {
153		DRM_ERROR("Requested geom can not fit in framebuffer\n");
154		return -EINVAL;
155	}
156
157	return 0;
158}
159
160static int vmw_fb_blank(int blank, struct fb_info *info)
161{
162	return 0;
163}
164
165/*
166 * Dirty code
 
 
 
 
 
 
 
 
167 */
168
169static void vmw_fb_dirty_flush(struct work_struct *work)
170{
171	struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
172					      local_work.work);
173	struct vmw_private *vmw_priv = par->vmw_priv;
174	struct fb_info *info = vmw_priv->fb_info;
175	unsigned long irq_flags;
176	s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
177	u32 cpp, max_x, max_y;
178	struct drm_clip_rect clip;
179	struct drm_framebuffer *cur_fb;
180	u8 *src_ptr, *dst_ptr;
 
 
181
182	if (vmw_priv->suspended)
183		return;
184
185	mutex_lock(&par->bo_mutex);
186	cur_fb = par->set_fb;
187	if (!cur_fb)
188		goto out_unlock;
189
 
 
 
 
 
 
190	spin_lock_irqsave(&par->dirty.lock, irq_flags);
191	if (!par->dirty.active) {
192		spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
193		goto out_unlock;
194	}
195
196	/*
197	 * Handle panning when copying from vmalloc to framebuffer.
198	 * Clip dirty area to framebuffer.
199	 */
200	cpp = (cur_fb->bits_per_pixel + 7) / 8;
201	max_x = par->fb_x + cur_fb->width;
202	max_y = par->fb_y + cur_fb->height;
203
204	dst_x1 = par->dirty.x1 - par->fb_x;
205	dst_y1 = par->dirty.y1 - par->fb_y;
206	dst_x1 = max_t(s32, dst_x1, 0);
207	dst_y1 = max_t(s32, dst_y1, 0);
208
209	dst_x2 = par->dirty.x2 - par->fb_x;
210	dst_y2 = par->dirty.y2 - par->fb_y;
211	dst_x2 = min_t(s32, dst_x2, max_x);
212	dst_y2 = min_t(s32, dst_y2, max_y);
213	w = dst_x2 - dst_x1;
214	h = dst_y2 - dst_y1;
215	w = max_t(s32, 0, w);
216	h = max_t(s32, 0, h);
217
218	par->dirty.x1 = par->dirty.x2 = 0;
219	par->dirty.y1 = par->dirty.y2 = 0;
220	spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
221
222	if (w && h) {
223		dst_ptr = (u8 *)par->bo_ptr  +
224			(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
225		src_ptr = (u8 *)par->vmalloc +
226			((dst_y1 + par->fb_y) * info->fix.line_length +
227			 (dst_x1 + par->fb_x) * cpp);
228
229		while (h-- > 0) {
230			memcpy(dst_ptr, src_ptr, w*cpp);
231			dst_ptr += par->set_fb->pitches[0];
232			src_ptr += info->fix.line_length;
233		}
234
235		clip.x1 = dst_x1;
236		clip.x2 = dst_x2;
237		clip.y1 = dst_y1;
238		clip.y2 = dst_y2;
 
239
 
 
 
 
240		WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
241						       &clip, 1));
242		vmw_fifo_flush(vmw_priv, false);
243	}
244out_unlock:
245	mutex_unlock(&par->bo_mutex);
246}
247
248static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
249			      unsigned x1, unsigned y1,
250			      unsigned width, unsigned height)
251{
252	unsigned long flags;
253	unsigned x2 = x1 + width;
254	unsigned y2 = y1 + height;
255
256	spin_lock_irqsave(&par->dirty.lock, flags);
257	if (par->dirty.x1 == par->dirty.x2) {
258		par->dirty.x1 = x1;
259		par->dirty.y1 = y1;
260		par->dirty.x2 = x2;
261		par->dirty.y2 = y2;
262		/* if we are active start the dirty work
263		 * we share the work with the defio system */
264		if (par->dirty.active)
265			schedule_delayed_work(&par->local_work,
266					      VMW_DIRTY_DELAY);
267	} else {
268		if (x1 < par->dirty.x1)
269			par->dirty.x1 = x1;
270		if (y1 < par->dirty.y1)
271			par->dirty.y1 = y1;
272		if (x2 > par->dirty.x2)
273			par->dirty.x2 = x2;
274		if (y2 > par->dirty.y2)
275			par->dirty.y2 = y2;
276	}
277	spin_unlock_irqrestore(&par->dirty.lock, flags);
278}
279
280static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
281			      struct fb_info *info)
282{
283	struct vmw_fb_par *par = info->par;
284
285	if ((var->xoffset + var->xres) > var->xres_virtual ||
286	    (var->yoffset + var->yres) > var->yres_virtual) {
287		DRM_ERROR("Requested panning can not fit in framebuffer\n");
288		return -EINVAL;
289	}
290
291	mutex_lock(&par->bo_mutex);
292	par->fb_x = var->xoffset;
293	par->fb_y = var->yoffset;
294	if (par->set_fb)
295		vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
296				  par->set_fb->height);
297	mutex_unlock(&par->bo_mutex);
298
299	return 0;
300}
301
302static void vmw_deferred_io(struct fb_info *info,
303			    struct list_head *pagelist)
304{
305	struct vmw_fb_par *par = info->par;
306	unsigned long start, end, min, max;
307	unsigned long flags;
308	struct page *page;
309	int y1, y2;
310
311	min = ULONG_MAX;
312	max = 0;
313	list_for_each_entry(page, pagelist, lru) {
314		start = page->index << PAGE_SHIFT;
315		end = start + PAGE_SIZE - 1;
316		min = min(min, start);
317		max = max(max, end);
318	}
319
320	if (min < max) {
321		y1 = min / info->fix.line_length;
322		y2 = (max / info->fix.line_length) + 1;
323
324		spin_lock_irqsave(&par->dirty.lock, flags);
325		par->dirty.x1 = 0;
326		par->dirty.y1 = y1;
327		par->dirty.x2 = info->var.xres;
328		par->dirty.y2 = y2;
329		spin_unlock_irqrestore(&par->dirty.lock, flags);
330
331		/*
332		 * Since we've already waited on this work once, try to
333		 * execute asap.
334		 */
335		cancel_delayed_work(&par->local_work);
336		schedule_delayed_work(&par->local_work, 0);
337	}
338};
339
340static struct fb_deferred_io vmw_defio = {
341	.delay		= VMW_DIRTY_DELAY,
342	.deferred_io	= vmw_deferred_io,
343};
344
345/*
346 * Draw code
347 */
348
349static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
350{
351	cfb_fillrect(info, rect);
352	vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
353			  rect->width, rect->height);
354}
355
356static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
357{
358	cfb_copyarea(info, region);
359	vmw_fb_dirty_mark(info->par, region->dx, region->dy,
360			  region->width, region->height);
361}
362
363static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
364{
365	cfb_imageblit(info, image);
366	vmw_fb_dirty_mark(info->par, image->dx, image->dy,
367			  image->width, image->height);
368}
369
370/*
371 * Bring up code
372 */
373
374static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
375			    size_t size, struct vmw_dma_buffer **out)
376{
377	struct vmw_dma_buffer *vmw_bo;
378	int ret;
379
380	(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
381
382	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
383	if (!vmw_bo) {
384		ret = -ENOMEM;
385		goto err_unlock;
386	}
387
388	ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
389			      &vmw_sys_placement,
390			      false,
391			      &vmw_dmabuf_bo_free);
392	if (unlikely(ret != 0))
393		goto err_unlock; /* init frees the buffer on failure */
394
395	*out = vmw_bo;
396	ttm_write_unlock(&vmw_priv->reservation_sem);
397
398	return 0;
399
400err_unlock:
401	ttm_write_unlock(&vmw_priv->reservation_sem);
402	return ret;
403}
404
405static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
406				int *depth)
407{
408	switch (var->bits_per_pixel) {
409	case 32:
410		*depth = (var->transp.length > 0) ? 32 : 24;
411		break;
412	default:
413		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
414		return -EINVAL;
415	}
416
417	return 0;
418}
419
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420static int vmw_fb_kms_detach(struct vmw_fb_par *par,
421			     bool detach_bo,
422			     bool unref_bo)
423{
424	struct drm_framebuffer *cur_fb = par->set_fb;
425	int ret;
426
427	/* Detach the KMS framebuffer from crtcs */
428	if (par->set_mode) {
429		struct drm_mode_set set;
430
431		set.crtc = par->crtc;
432		set.x = 0;
433		set.y = 0;
434		set.mode = NULL;
435		set.fb = NULL;
436		set.num_connectors = 1;
437		set.connectors = &par->con;
438		ret = drm_mode_set_config_internal(&set);
439		if (ret) {
440			DRM_ERROR("Could not unset a mode.\n");
441			return ret;
442		}
443		drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
444		par->set_mode = NULL;
445	}
446
447	if (cur_fb) {
448		drm_framebuffer_unreference(cur_fb);
449		par->set_fb = NULL;
450	}
451
452	if (par->vmw_bo && detach_bo) {
453		if (par->bo_ptr) {
454			ttm_bo_kunmap(&par->map);
455			par->bo_ptr = NULL;
456		}
457		if (unref_bo)
458			vmw_dmabuf_unreference(&par->vmw_bo);
459		else
460			vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
461	}
462
463	return 0;
464}
465
466static int vmw_fb_kms_framebuffer(struct fb_info *info)
467{
468	struct drm_mode_fb_cmd mode_cmd;
469	struct vmw_fb_par *par = info->par;
470	struct fb_var_screeninfo *var = &info->var;
471	struct drm_framebuffer *cur_fb;
472	struct vmw_framebuffer *vfb;
473	int ret = 0;
474	size_t new_bo_size;
475
476	ret = vmw_fb_compute_depth(var, &mode_cmd.depth);
477	if (ret)
478		return ret;
479
480	mode_cmd.width = var->xres;
481	mode_cmd.height = var->yres;
482	mode_cmd.bpp = var->bits_per_pixel;
483	mode_cmd.pitch = ((mode_cmd.bpp + 7) / 8) * mode_cmd.width;
 
484
485	cur_fb = par->set_fb;
486	if (cur_fb && cur_fb->width == mode_cmd.width &&
487	    cur_fb->height == mode_cmd.height &&
488	    cur_fb->bits_per_pixel == mode_cmd.bpp &&
489	    cur_fb->depth == mode_cmd.depth &&
490	    cur_fb->pitches[0] == mode_cmd.pitch)
491		return 0;
492
493	/* Need new buffer object ? */
494	new_bo_size = (size_t) mode_cmd.pitch * (size_t) mode_cmd.height;
495	ret = vmw_fb_kms_detach(par,
496				par->bo_size < new_bo_size ||
497				par->bo_size > 2*new_bo_size,
498				true);
499	if (ret)
500		return ret;
501
502	if (!par->vmw_bo) {
503		ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
504				       &par->vmw_bo);
505		if (ret) {
506			DRM_ERROR("Failed creating a buffer object for "
507				  "fbdev.\n");
508			return ret;
509		}
510		par->bo_size = new_bo_size;
511	}
512
513	vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
514				      true, &mode_cmd);
515	if (IS_ERR(vfb))
516		return PTR_ERR(vfb);
517
518	par->set_fb = &vfb->base;
519
520	if (!par->bo_ptr) {
521		/*
522		 * Pin before mapping. Since we don't know in what placement
523		 * to pin, call into KMS to do it for us.
524		 */
525		ret = vfb->pin(vfb);
526		if (ret) {
527			DRM_ERROR("Could not pin the fbdev framebuffer.\n");
528			return ret;
529		}
530
531		ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
532				  par->vmw_bo->base.num_pages, &par->map);
533		if (ret) {
534			vfb->unpin(vfb);
535			DRM_ERROR("Could not map the fbdev framebuffer.\n");
536			return ret;
537		}
538
539		par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
540	}
541
542	return 0;
543}
544
545static int vmw_fb_set_par(struct fb_info *info)
546{
547	struct vmw_fb_par *par = info->par;
548	struct vmw_private *vmw_priv = par->vmw_priv;
549	struct drm_mode_set set;
550	struct fb_var_screeninfo *var = &info->var;
551	struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
552		DRM_MODE_TYPE_DRIVER,
553		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
554		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
555	};
556	struct drm_display_mode *old_mode;
557	struct drm_display_mode *mode;
558	int ret;
559
560	old_mode = par->set_mode;
561	mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
562	if (!mode) {
563		DRM_ERROR("Could not create new fb mode.\n");
564		return -ENOMEM;
565	}
566
567	mode->hdisplay = var->xres;
568	mode->vdisplay = var->yres;
569	vmw_guess_mode_timing(mode);
570
571	if (old_mode && drm_mode_equal(old_mode, mode)) {
572		drm_mode_destroy(vmw_priv->dev, mode);
573		mode = old_mode;
574		old_mode = NULL;
575	} else if (!vmw_kms_validate_mode_vram(vmw_priv,
576					mode->hdisplay *
577					DIV_ROUND_UP(var->bits_per_pixel, 8),
578					mode->vdisplay)) {
579		drm_mode_destroy(vmw_priv->dev, mode);
580		return -EINVAL;
581	}
582
583	mutex_lock(&par->bo_mutex);
584	drm_modeset_lock_all(vmw_priv->dev);
585	ret = vmw_fb_kms_framebuffer(info);
586	if (ret)
587		goto out_unlock;
588
589	par->fb_x = var->xoffset;
590	par->fb_y = var->yoffset;
591
592	set.crtc = par->crtc;
593	set.x = 0;
594	set.y = 0;
595	set.mode = mode;
596	set.fb = par->set_fb;
597	set.num_connectors = 1;
598	set.connectors = &par->con;
599
600	ret = drm_mode_set_config_internal(&set);
601	if (ret)
602		goto out_unlock;
603
604	vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
605			  par->set_fb->width, par->set_fb->height);
606
607	/* If there already was stuff dirty we wont
608	 * schedule a new work, so lets do it now */
609
610	schedule_delayed_work(&par->local_work, 0);
611
612out_unlock:
613	if (old_mode)
614		drm_mode_destroy(vmw_priv->dev, old_mode);
615	par->set_mode = mode;
616
617	drm_modeset_unlock_all(vmw_priv->dev);
618	mutex_unlock(&par->bo_mutex);
619
620	return ret;
621}
622
623
624static struct fb_ops vmw_fb_ops = {
625	.owner = THIS_MODULE,
626	.fb_check_var = vmw_fb_check_var,
627	.fb_set_par = vmw_fb_set_par,
628	.fb_setcolreg = vmw_fb_setcolreg,
629	.fb_fillrect = vmw_fb_fillrect,
630	.fb_copyarea = vmw_fb_copyarea,
631	.fb_imageblit = vmw_fb_imageblit,
632	.fb_pan_display = vmw_fb_pan_display,
633	.fb_blank = vmw_fb_blank,
634};
635
636int vmw_fb_init(struct vmw_private *vmw_priv)
637{
638	struct device *device = &vmw_priv->dev->pdev->dev;
639	struct vmw_fb_par *par;
640	struct fb_info *info;
641	unsigned fb_width, fb_height;
642	unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
643	struct drm_display_mode *init_mode;
644	int ret;
645
646	fb_bpp = 32;
647	fb_depth = 24;
648
649	/* XXX As shouldn't these be as well. */
650	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
651	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
652
653	fb_pitch = fb_width * fb_bpp / 8;
654	fb_size = fb_pitch * fb_height;
655	fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
656
657	info = framebuffer_alloc(sizeof(*par), device);
658	if (!info)
659		return -ENOMEM;
660
661	/*
662	 * Par
663	 */
664	vmw_priv->fb_info = info;
665	par = info->par;
666	memset(par, 0, sizeof(*par));
667	INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
668	par->vmw_priv = vmw_priv;
669	par->vmalloc = NULL;
670	par->max_width = fb_width;
671	par->max_height = fb_height;
672
673	drm_modeset_lock_all(vmw_priv->dev);
674	ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
675				      par->max_height, &par->con,
676				      &par->crtc, &init_mode);
677	if (ret) {
678		drm_modeset_unlock_all(vmw_priv->dev);
679		goto err_kms;
680	}
681
682	info->var.xres = init_mode->hdisplay;
683	info->var.yres = init_mode->vdisplay;
684	drm_modeset_unlock_all(vmw_priv->dev);
685
686	/*
687	 * Create buffers and alloc memory
688	 */
689	par->vmalloc = vzalloc(fb_size);
690	if (unlikely(par->vmalloc == NULL)) {
691		ret = -ENOMEM;
692		goto err_free;
693	}
694
695	/*
696	 * Fixed and var
697	 */
698	strcpy(info->fix.id, "svgadrmfb");
699	info->fix.type = FB_TYPE_PACKED_PIXELS;
700	info->fix.visual = FB_VISUAL_TRUECOLOR;
701	info->fix.type_aux = 0;
702	info->fix.xpanstep = 1; /* doing it in hw */
703	info->fix.ypanstep = 1; /* doing it in hw */
704	info->fix.ywrapstep = 0;
705	info->fix.accel = FB_ACCEL_NONE;
706	info->fix.line_length = fb_pitch;
707
708	info->fix.smem_start = 0;
709	info->fix.smem_len = fb_size;
710
711	info->pseudo_palette = par->pseudo_palette;
712	info->screen_base = (char __iomem *)par->vmalloc;
713	info->screen_size = fb_size;
714
715	info->flags = FBINFO_DEFAULT;
716	info->fbops = &vmw_fb_ops;
717
718	/* 24 depth per default */
719	info->var.red.offset = 16;
720	info->var.green.offset = 8;
721	info->var.blue.offset = 0;
722	info->var.red.length = 8;
723	info->var.green.length = 8;
724	info->var.blue.length = 8;
725	info->var.transp.offset = 0;
726	info->var.transp.length = 0;
727
728	info->var.xres_virtual = fb_width;
729	info->var.yres_virtual = fb_height;
730	info->var.bits_per_pixel = fb_bpp;
731	info->var.xoffset = 0;
732	info->var.yoffset = 0;
733	info->var.activate = FB_ACTIVATE_NOW;
734	info->var.height = -1;
735	info->var.width = -1;
736
737	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
738	info->apertures = alloc_apertures(1);
739	if (!info->apertures) {
740		ret = -ENOMEM;
741		goto err_aper;
742	}
743	info->apertures->ranges[0].base = vmw_priv->vram_start;
744	info->apertures->ranges[0].size = vmw_priv->vram_size;
745
746	/*
747	 * Dirty & Deferred IO
748	 */
749	par->dirty.x1 = par->dirty.x2 = 0;
750	par->dirty.y1 = par->dirty.y2 = 0;
751	par->dirty.active = true;
752	spin_lock_init(&par->dirty.lock);
753	mutex_init(&par->bo_mutex);
754	info->fbdefio = &vmw_defio;
755	fb_deferred_io_init(info);
756
757	ret = register_framebuffer(info);
758	if (unlikely(ret != 0))
759		goto err_defio;
760
761	vmw_fb_set_par(info);
762
763	return 0;
764
765err_defio:
766	fb_deferred_io_cleanup(info);
767err_aper:
768err_free:
769	vfree(par->vmalloc);
770err_kms:
771	framebuffer_release(info);
772	vmw_priv->fb_info = NULL;
773
774	return ret;
775}
776
777int vmw_fb_close(struct vmw_private *vmw_priv)
778{
779	struct fb_info *info;
780	struct vmw_fb_par *par;
781
782	if (!vmw_priv->fb_info)
783		return 0;
784
785	info = vmw_priv->fb_info;
786	par = info->par;
787
788	/* ??? order */
789	fb_deferred_io_cleanup(info);
790	cancel_delayed_work_sync(&par->local_work);
791	unregister_framebuffer(info);
792
 
793	(void) vmw_fb_kms_detach(par, true, true);
 
794
795	vfree(par->vmalloc);
796	framebuffer_release(info);
797
798	return 0;
799}
800
801int vmw_fb_off(struct vmw_private *vmw_priv)
802{
803	struct fb_info *info;
804	struct vmw_fb_par *par;
805	unsigned long flags;
806
807	if (!vmw_priv->fb_info)
808		return -EINVAL;
809
810	info = vmw_priv->fb_info;
811	par = info->par;
812
813	spin_lock_irqsave(&par->dirty.lock, flags);
814	par->dirty.active = false;
815	spin_unlock_irqrestore(&par->dirty.lock, flags);
816
817	flush_delayed_work(&info->deferred_work);
818	flush_delayed_work(&par->local_work);
819
820	mutex_lock(&par->bo_mutex);
821	(void) vmw_fb_kms_detach(par, true, false);
822	mutex_unlock(&par->bo_mutex);
823
824	return 0;
825}
826
827int vmw_fb_on(struct vmw_private *vmw_priv)
828{
829	struct fb_info *info;
830	struct vmw_fb_par *par;
831	unsigned long flags;
832
833	if (!vmw_priv->fb_info)
834		return -EINVAL;
835
836	info = vmw_priv->fb_info;
837	par = info->par;
838
839	vmw_fb_set_par(info);
840	spin_lock_irqsave(&par->dirty.lock, flags);
841	par->dirty.active = true;
842	spin_unlock_irqrestore(&par->dirty.lock, flags);
843 
 
 
 
 
 
 
 
844	return 0;
845}
v4.17
  1/**************************************************************************
  2 *
  3 * Copyright © 2007 David Airlie
  4 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 **************************************************************************/
 28
 29#include <linux/export.h>
 30
 31#include <drm/drmP.h>
 32#include "vmwgfx_drv.h"
 33#include "vmwgfx_kms.h"
 34
 35#include <drm/ttm/ttm_placement.h>
 36
 37#define VMW_DIRTY_DELAY (HZ / 30)
 38
 39struct vmw_fb_par {
 40	struct vmw_private *vmw_priv;
 41
 42	void *vmalloc;
 43
 44	struct mutex bo_mutex;
 45	struct vmw_dma_buffer *vmw_bo;
 
 
 46	unsigned bo_size;
 47	struct drm_framebuffer *set_fb;
 48	struct drm_display_mode *set_mode;
 49	u32 fb_x;
 50	u32 fb_y;
 51	bool bo_iowrite;
 52
 53	u32 pseudo_palette[17];
 54
 55	unsigned max_width;
 56	unsigned max_height;
 57
 58	struct {
 59		spinlock_t lock;
 60		bool active;
 61		unsigned x1;
 62		unsigned y1;
 63		unsigned x2;
 64		unsigned y2;
 65	} dirty;
 66
 67	struct drm_crtc *crtc;
 68	struct drm_connector *con;
 69	struct delayed_work local_work;
 70};
 71
 72static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
 73			    unsigned blue, unsigned transp,
 74			    struct fb_info *info)
 75{
 76	struct vmw_fb_par *par = info->par;
 77	u32 *pal = par->pseudo_palette;
 78
 79	if (regno > 15) {
 80		DRM_ERROR("Bad regno %u.\n", regno);
 81		return 1;
 82	}
 83
 84	switch (par->set_fb->format->depth) {
 85	case 24:
 86	case 32:
 87		pal[regno] = ((red & 0xff00) << 8) |
 88			      (green & 0xff00) |
 89			     ((blue  & 0xff00) >> 8);
 90		break;
 91	default:
 92		DRM_ERROR("Bad depth %u, bpp %u.\n",
 93			  par->set_fb->format->depth,
 94			  par->set_fb->format->cpp[0] * 8);
 95		return 1;
 96	}
 97
 98	return 0;
 99}
100
101static int vmw_fb_check_var(struct fb_var_screeninfo *var,
102			    struct fb_info *info)
103{
104	int depth = var->bits_per_pixel;
105	struct vmw_fb_par *par = info->par;
106	struct vmw_private *vmw_priv = par->vmw_priv;
107
108	switch (var->bits_per_pixel) {
109	case 32:
110		depth = (var->transp.length > 0) ? 32 : 24;
111		break;
112	default:
113		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
114		return -EINVAL;
115	}
116
117	switch (depth) {
118	case 24:
119		var->red.offset = 16;
120		var->green.offset = 8;
121		var->blue.offset = 0;
122		var->red.length = 8;
123		var->green.length = 8;
124		var->blue.length = 8;
125		var->transp.length = 0;
126		var->transp.offset = 0;
127		break;
128	case 32:
129		var->red.offset = 16;
130		var->green.offset = 8;
131		var->blue.offset = 0;
132		var->red.length = 8;
133		var->green.length = 8;
134		var->blue.length = 8;
135		var->transp.length = 8;
136		var->transp.offset = 24;
137		break;
138	default:
139		DRM_ERROR("Bad depth %u.\n", depth);
140		return -EINVAL;
141	}
142
143	if ((var->xoffset + var->xres) > par->max_width ||
144	    (var->yoffset + var->yres) > par->max_height) {
145		DRM_ERROR("Requested geom can not fit in framebuffer\n");
146		return -EINVAL;
147	}
148
149	if (!vmw_kms_validate_mode_vram(vmw_priv,
150					var->xres * var->bits_per_pixel/8,
151					var->yoffset + var->yres)) {
152		DRM_ERROR("Requested geom can not fit in framebuffer\n");
153		return -EINVAL;
154	}
155
156	return 0;
157}
158
159static int vmw_fb_blank(int blank, struct fb_info *info)
160{
161	return 0;
162}
163
164/**
165 * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer
166 *
167 * @work: The struct work_struct associated with this task.
168 *
169 * This function flushes the dirty regions of the vmalloc framebuffer to the
170 * kms framebuffer, and if the kms framebuffer is visible, also updated the
171 * corresponding displays. Note that this function runs even if the kms
172 * framebuffer is not bound to a crtc and thus not visible, but it's turned
173 * off during hibernation using the par->dirty.active bool.
174 */
 
175static void vmw_fb_dirty_flush(struct work_struct *work)
176{
177	struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
178					      local_work.work);
179	struct vmw_private *vmw_priv = par->vmw_priv;
180	struct fb_info *info = vmw_priv->fb_info;
181	unsigned long irq_flags;
182	s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
183	u32 cpp, max_x, max_y;
184	struct drm_clip_rect clip;
185	struct drm_framebuffer *cur_fb;
186	u8 *src_ptr, *dst_ptr;
187	struct vmw_dma_buffer *vbo = par->vmw_bo;
188	void *virtual;
189
190	if (!READ_ONCE(par->dirty.active))
191		return;
192
193	mutex_lock(&par->bo_mutex);
194	cur_fb = par->set_fb;
195	if (!cur_fb)
196		goto out_unlock;
197
198	(void) ttm_read_lock(&vmw_priv->reservation_sem, false);
199	(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
200	virtual = vmw_dma_buffer_map_and_cache(vbo);
201	if (!virtual)
202		goto out_unreserve;
203
204	spin_lock_irqsave(&par->dirty.lock, irq_flags);
205	if (!par->dirty.active) {
206		spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
207		goto out_unreserve;
208	}
209
210	/*
211	 * Handle panning when copying from vmalloc to framebuffer.
212	 * Clip dirty area to framebuffer.
213	 */
214	cpp = cur_fb->format->cpp[0];
215	max_x = par->fb_x + cur_fb->width;
216	max_y = par->fb_y + cur_fb->height;
217
218	dst_x1 = par->dirty.x1 - par->fb_x;
219	dst_y1 = par->dirty.y1 - par->fb_y;
220	dst_x1 = max_t(s32, dst_x1, 0);
221	dst_y1 = max_t(s32, dst_y1, 0);
222
223	dst_x2 = par->dirty.x2 - par->fb_x;
224	dst_y2 = par->dirty.y2 - par->fb_y;
225	dst_x2 = min_t(s32, dst_x2, max_x);
226	dst_y2 = min_t(s32, dst_y2, max_y);
227	w = dst_x2 - dst_x1;
228	h = dst_y2 - dst_y1;
229	w = max_t(s32, 0, w);
230	h = max_t(s32, 0, h);
231
232	par->dirty.x1 = par->dirty.x2 = 0;
233	par->dirty.y1 = par->dirty.y2 = 0;
234	spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
235
236	if (w && h) {
237		dst_ptr = (u8 *)virtual  +
238			(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
239		src_ptr = (u8 *)par->vmalloc +
240			((dst_y1 + par->fb_y) * info->fix.line_length +
241			 (dst_x1 + par->fb_x) * cpp);
242
243		while (h-- > 0) {
244			memcpy(dst_ptr, src_ptr, w*cpp);
245			dst_ptr += par->set_fb->pitches[0];
246			src_ptr += info->fix.line_length;
247		}
248
249		clip.x1 = dst_x1;
250		clip.x2 = dst_x2;
251		clip.y1 = dst_y1;
252		clip.y2 = dst_y2;
253	}
254
255out_unreserve:
256	ttm_bo_unreserve(&vbo->base);
257	ttm_read_unlock(&vmw_priv->reservation_sem);
258	if (w && h) {
259		WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
260						       &clip, 1));
261		vmw_fifo_flush(vmw_priv, false);
262	}
263out_unlock:
264	mutex_unlock(&par->bo_mutex);
265}
266
267static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
268			      unsigned x1, unsigned y1,
269			      unsigned width, unsigned height)
270{
271	unsigned long flags;
272	unsigned x2 = x1 + width;
273	unsigned y2 = y1 + height;
274
275	spin_lock_irqsave(&par->dirty.lock, flags);
276	if (par->dirty.x1 == par->dirty.x2) {
277		par->dirty.x1 = x1;
278		par->dirty.y1 = y1;
279		par->dirty.x2 = x2;
280		par->dirty.y2 = y2;
281		/* if we are active start the dirty work
282		 * we share the work with the defio system */
283		if (par->dirty.active)
284			schedule_delayed_work(&par->local_work,
285					      VMW_DIRTY_DELAY);
286	} else {
287		if (x1 < par->dirty.x1)
288			par->dirty.x1 = x1;
289		if (y1 < par->dirty.y1)
290			par->dirty.y1 = y1;
291		if (x2 > par->dirty.x2)
292			par->dirty.x2 = x2;
293		if (y2 > par->dirty.y2)
294			par->dirty.y2 = y2;
295	}
296	spin_unlock_irqrestore(&par->dirty.lock, flags);
297}
298
299static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
300			      struct fb_info *info)
301{
302	struct vmw_fb_par *par = info->par;
303
304	if ((var->xoffset + var->xres) > var->xres_virtual ||
305	    (var->yoffset + var->yres) > var->yres_virtual) {
306		DRM_ERROR("Requested panning can not fit in framebuffer\n");
307		return -EINVAL;
308	}
309
310	mutex_lock(&par->bo_mutex);
311	par->fb_x = var->xoffset;
312	par->fb_y = var->yoffset;
313	if (par->set_fb)
314		vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
315				  par->set_fb->height);
316	mutex_unlock(&par->bo_mutex);
317
318	return 0;
319}
320
321static void vmw_deferred_io(struct fb_info *info,
322			    struct list_head *pagelist)
323{
324	struct vmw_fb_par *par = info->par;
325	unsigned long start, end, min, max;
326	unsigned long flags;
327	struct page *page;
328	int y1, y2;
329
330	min = ULONG_MAX;
331	max = 0;
332	list_for_each_entry(page, pagelist, lru) {
333		start = page->index << PAGE_SHIFT;
334		end = start + PAGE_SIZE - 1;
335		min = min(min, start);
336		max = max(max, end);
337	}
338
339	if (min < max) {
340		y1 = min / info->fix.line_length;
341		y2 = (max / info->fix.line_length) + 1;
342
343		spin_lock_irqsave(&par->dirty.lock, flags);
344		par->dirty.x1 = 0;
345		par->dirty.y1 = y1;
346		par->dirty.x2 = info->var.xres;
347		par->dirty.y2 = y2;
348		spin_unlock_irqrestore(&par->dirty.lock, flags);
349
350		/*
351		 * Since we've already waited on this work once, try to
352		 * execute asap.
353		 */
354		cancel_delayed_work(&par->local_work);
355		schedule_delayed_work(&par->local_work, 0);
356	}
357};
358
359static struct fb_deferred_io vmw_defio = {
360	.delay		= VMW_DIRTY_DELAY,
361	.deferred_io	= vmw_deferred_io,
362};
363
364/*
365 * Draw code
366 */
367
368static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
369{
370	cfb_fillrect(info, rect);
371	vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
372			  rect->width, rect->height);
373}
374
375static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
376{
377	cfb_copyarea(info, region);
378	vmw_fb_dirty_mark(info->par, region->dx, region->dy,
379			  region->width, region->height);
380}
381
382static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
383{
384	cfb_imageblit(info, image);
385	vmw_fb_dirty_mark(info->par, image->dx, image->dy,
386			  image->width, image->height);
387}
388
389/*
390 * Bring up code
391 */
392
393static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
394			    size_t size, struct vmw_dma_buffer **out)
395{
396	struct vmw_dma_buffer *vmw_bo;
397	int ret;
398
399	(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
400
401	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
402	if (!vmw_bo) {
403		ret = -ENOMEM;
404		goto err_unlock;
405	}
406
407	ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
408			      &vmw_sys_placement,
409			      false,
410			      &vmw_dmabuf_bo_free);
411	if (unlikely(ret != 0))
412		goto err_unlock; /* init frees the buffer on failure */
413
414	*out = vmw_bo;
415	ttm_write_unlock(&vmw_priv->reservation_sem);
416
417	return 0;
418
419err_unlock:
420	ttm_write_unlock(&vmw_priv->reservation_sem);
421	return ret;
422}
423
424static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
425				int *depth)
426{
427	switch (var->bits_per_pixel) {
428	case 32:
429		*depth = (var->transp.length > 0) ? 32 : 24;
430		break;
431	default:
432		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
433		return -EINVAL;
434	}
435
436	return 0;
437}
438
439static int vmwgfx_set_config_internal(struct drm_mode_set *set)
440{
441	struct drm_crtc *crtc = set->crtc;
442	struct drm_framebuffer *fb;
443	struct drm_crtc *tmp;
444	struct drm_device *dev = set->crtc->dev;
445	struct drm_modeset_acquire_ctx ctx;
446	int ret;
447
448	drm_modeset_acquire_init(&ctx, 0);
449
450restart:
451	/*
452	 * NOTE: ->set_config can also disable other crtcs (if we steal all
453	 * connectors from it), hence we need to refcount the fbs across all
454	 * crtcs. Atomic modeset will have saner semantics ...
455	 */
456	drm_for_each_crtc(tmp, dev)
457		tmp->primary->old_fb = tmp->primary->fb;
458
459	fb = set->fb;
460
461	ret = crtc->funcs->set_config(set, &ctx);
462	if (ret == 0) {
463		crtc->primary->crtc = crtc;
464		crtc->primary->fb = fb;
465	}
466
467	drm_for_each_crtc(tmp, dev) {
468		if (tmp->primary->fb)
469			drm_framebuffer_get(tmp->primary->fb);
470		if (tmp->primary->old_fb)
471			drm_framebuffer_put(tmp->primary->old_fb);
472		tmp->primary->old_fb = NULL;
473	}
474
475	if (ret == -EDEADLK) {
476		drm_modeset_backoff(&ctx);
477		goto restart;
478	}
479
480	drm_modeset_drop_locks(&ctx);
481	drm_modeset_acquire_fini(&ctx);
482
483	return ret;
484}
485
486static int vmw_fb_kms_detach(struct vmw_fb_par *par,
487			     bool detach_bo,
488			     bool unref_bo)
489{
490	struct drm_framebuffer *cur_fb = par->set_fb;
491	int ret;
492
493	/* Detach the KMS framebuffer from crtcs */
494	if (par->set_mode) {
495		struct drm_mode_set set;
496
497		set.crtc = par->crtc;
498		set.x = 0;
499		set.y = 0;
500		set.mode = NULL;
501		set.fb = NULL;
502		set.num_connectors = 0;
503		set.connectors = &par->con;
504		ret = vmwgfx_set_config_internal(&set);
505		if (ret) {
506			DRM_ERROR("Could not unset a mode.\n");
507			return ret;
508		}
509		drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
510		par->set_mode = NULL;
511	}
512
513	if (cur_fb) {
514		drm_framebuffer_put(cur_fb);
515		par->set_fb = NULL;
516	}
517
518	if (par->vmw_bo && detach_bo && unref_bo)
519		vmw_dmabuf_unreference(&par->vmw_bo);
 
 
 
 
 
 
 
 
520
521	return 0;
522}
523
524static int vmw_fb_kms_framebuffer(struct fb_info *info)
525{
526	struct drm_mode_fb_cmd2 mode_cmd;
527	struct vmw_fb_par *par = info->par;
528	struct fb_var_screeninfo *var = &info->var;
529	struct drm_framebuffer *cur_fb;
530	struct vmw_framebuffer *vfb;
531	int ret = 0, depth;
532	size_t new_bo_size;
533
534	ret = vmw_fb_compute_depth(var, &depth);
535	if (ret)
536		return ret;
537
538	mode_cmd.width = var->xres;
539	mode_cmd.height = var->yres;
540	mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
541	mode_cmd.pixel_format =
542		drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
543
544	cur_fb = par->set_fb;
545	if (cur_fb && cur_fb->width == mode_cmd.width &&
546	    cur_fb->height == mode_cmd.height &&
547	    cur_fb->format->format == mode_cmd.pixel_format &&
548	    cur_fb->pitches[0] == mode_cmd.pitches[0])
 
549		return 0;
550
551	/* Need new buffer object ? */
552	new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
553	ret = vmw_fb_kms_detach(par,
554				par->bo_size < new_bo_size ||
555				par->bo_size > 2*new_bo_size,
556				true);
557	if (ret)
558		return ret;
559
560	if (!par->vmw_bo) {
561		ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
562				       &par->vmw_bo);
563		if (ret) {
564			DRM_ERROR("Failed creating a buffer object for "
565				  "fbdev.\n");
566			return ret;
567		}
568		par->bo_size = new_bo_size;
569	}
570
571	vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
572				      true, &mode_cmd);
573	if (IS_ERR(vfb))
574		return PTR_ERR(vfb);
575
576	par->set_fb = &vfb->base;
577
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
578	return 0;
579}
580
581static int vmw_fb_set_par(struct fb_info *info)
582{
583	struct vmw_fb_par *par = info->par;
584	struct vmw_private *vmw_priv = par->vmw_priv;
585	struct drm_mode_set set;
586	struct fb_var_screeninfo *var = &info->var;
587	struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
588		DRM_MODE_TYPE_DRIVER,
589		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
590		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
591	};
592	struct drm_display_mode *old_mode;
593	struct drm_display_mode *mode;
594	int ret;
595
596	old_mode = par->set_mode;
597	mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
598	if (!mode) {
599		DRM_ERROR("Could not create new fb mode.\n");
600		return -ENOMEM;
601	}
602
603	mode->hdisplay = var->xres;
604	mode->vdisplay = var->yres;
605	vmw_guess_mode_timing(mode);
606
607	if (old_mode && drm_mode_equal(old_mode, mode)) {
608		drm_mode_destroy(vmw_priv->dev, mode);
609		mode = old_mode;
610		old_mode = NULL;
611	} else if (!vmw_kms_validate_mode_vram(vmw_priv,
612					mode->hdisplay *
613					DIV_ROUND_UP(var->bits_per_pixel, 8),
614					mode->vdisplay)) {
615		drm_mode_destroy(vmw_priv->dev, mode);
616		return -EINVAL;
617	}
618
619	mutex_lock(&par->bo_mutex);
 
620	ret = vmw_fb_kms_framebuffer(info);
621	if (ret)
622		goto out_unlock;
623
624	par->fb_x = var->xoffset;
625	par->fb_y = var->yoffset;
626
627	set.crtc = par->crtc;
628	set.x = 0;
629	set.y = 0;
630	set.mode = mode;
631	set.fb = par->set_fb;
632	set.num_connectors = 1;
633	set.connectors = &par->con;
634
635	ret = vmwgfx_set_config_internal(&set);
636	if (ret)
637		goto out_unlock;
638
639	vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
640			  par->set_fb->width, par->set_fb->height);
641
642	/* If there already was stuff dirty we wont
643	 * schedule a new work, so lets do it now */
644
645	schedule_delayed_work(&par->local_work, 0);
646
647out_unlock:
648	if (old_mode)
649		drm_mode_destroy(vmw_priv->dev, old_mode);
650	par->set_mode = mode;
651
 
652	mutex_unlock(&par->bo_mutex);
653
654	return ret;
655}
656
657
658static struct fb_ops vmw_fb_ops = {
659	.owner = THIS_MODULE,
660	.fb_check_var = vmw_fb_check_var,
661	.fb_set_par = vmw_fb_set_par,
662	.fb_setcolreg = vmw_fb_setcolreg,
663	.fb_fillrect = vmw_fb_fillrect,
664	.fb_copyarea = vmw_fb_copyarea,
665	.fb_imageblit = vmw_fb_imageblit,
666	.fb_pan_display = vmw_fb_pan_display,
667	.fb_blank = vmw_fb_blank,
668};
669
670int vmw_fb_init(struct vmw_private *vmw_priv)
671{
672	struct device *device = &vmw_priv->dev->pdev->dev;
673	struct vmw_fb_par *par;
674	struct fb_info *info;
675	unsigned fb_width, fb_height;
676	unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
677	struct drm_display_mode *init_mode;
678	int ret;
679
680	fb_bpp = 32;
681	fb_depth = 24;
682
683	/* XXX As shouldn't these be as well. */
684	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
685	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
686
687	fb_pitch = fb_width * fb_bpp / 8;
688	fb_size = fb_pitch * fb_height;
689	fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
690
691	info = framebuffer_alloc(sizeof(*par), device);
692	if (!info)
693		return -ENOMEM;
694
695	/*
696	 * Par
697	 */
698	vmw_priv->fb_info = info;
699	par = info->par;
700	memset(par, 0, sizeof(*par));
701	INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
702	par->vmw_priv = vmw_priv;
703	par->vmalloc = NULL;
704	par->max_width = fb_width;
705	par->max_height = fb_height;
706
 
707	ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
708				      par->max_height, &par->con,
709				      &par->crtc, &init_mode);
710	if (ret)
 
711		goto err_kms;
 
712
713	info->var.xres = init_mode->hdisplay;
714	info->var.yres = init_mode->vdisplay;
 
715
716	/*
717	 * Create buffers and alloc memory
718	 */
719	par->vmalloc = vzalloc(fb_size);
720	if (unlikely(par->vmalloc == NULL)) {
721		ret = -ENOMEM;
722		goto err_free;
723	}
724
725	/*
726	 * Fixed and var
727	 */
728	strcpy(info->fix.id, "svgadrmfb");
729	info->fix.type = FB_TYPE_PACKED_PIXELS;
730	info->fix.visual = FB_VISUAL_TRUECOLOR;
731	info->fix.type_aux = 0;
732	info->fix.xpanstep = 1; /* doing it in hw */
733	info->fix.ypanstep = 1; /* doing it in hw */
734	info->fix.ywrapstep = 0;
735	info->fix.accel = FB_ACCEL_NONE;
736	info->fix.line_length = fb_pitch;
737
738	info->fix.smem_start = 0;
739	info->fix.smem_len = fb_size;
740
741	info->pseudo_palette = par->pseudo_palette;
742	info->screen_base = (char __iomem *)par->vmalloc;
743	info->screen_size = fb_size;
744
 
745	info->fbops = &vmw_fb_ops;
746
747	/* 24 depth per default */
748	info->var.red.offset = 16;
749	info->var.green.offset = 8;
750	info->var.blue.offset = 0;
751	info->var.red.length = 8;
752	info->var.green.length = 8;
753	info->var.blue.length = 8;
754	info->var.transp.offset = 0;
755	info->var.transp.length = 0;
756
757	info->var.xres_virtual = fb_width;
758	info->var.yres_virtual = fb_height;
759	info->var.bits_per_pixel = fb_bpp;
760	info->var.xoffset = 0;
761	info->var.yoffset = 0;
762	info->var.activate = FB_ACTIVATE_NOW;
763	info->var.height = -1;
764	info->var.width = -1;
765
766	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
767	info->apertures = alloc_apertures(1);
768	if (!info->apertures) {
769		ret = -ENOMEM;
770		goto err_aper;
771	}
772	info->apertures->ranges[0].base = vmw_priv->vram_start;
773	info->apertures->ranges[0].size = vmw_priv->vram_size;
774
775	/*
776	 * Dirty & Deferred IO
777	 */
778	par->dirty.x1 = par->dirty.x2 = 0;
779	par->dirty.y1 = par->dirty.y2 = 0;
780	par->dirty.active = true;
781	spin_lock_init(&par->dirty.lock);
782	mutex_init(&par->bo_mutex);
783	info->fbdefio = &vmw_defio;
784	fb_deferred_io_init(info);
785
786	ret = register_framebuffer(info);
787	if (unlikely(ret != 0))
788		goto err_defio;
789
790	vmw_fb_set_par(info);
791
792	return 0;
793
794err_defio:
795	fb_deferred_io_cleanup(info);
796err_aper:
797err_free:
798	vfree(par->vmalloc);
799err_kms:
800	framebuffer_release(info);
801	vmw_priv->fb_info = NULL;
802
803	return ret;
804}
805
806int vmw_fb_close(struct vmw_private *vmw_priv)
807{
808	struct fb_info *info;
809	struct vmw_fb_par *par;
810
811	if (!vmw_priv->fb_info)
812		return 0;
813
814	info = vmw_priv->fb_info;
815	par = info->par;
816
817	/* ??? order */
818	fb_deferred_io_cleanup(info);
819	cancel_delayed_work_sync(&par->local_work);
820	unregister_framebuffer(info);
821
822	mutex_lock(&par->bo_mutex);
823	(void) vmw_fb_kms_detach(par, true, true);
824	mutex_unlock(&par->bo_mutex);
825
826	vfree(par->vmalloc);
827	framebuffer_release(info);
828
829	return 0;
830}
831
832int vmw_fb_off(struct vmw_private *vmw_priv)
833{
834	struct fb_info *info;
835	struct vmw_fb_par *par;
836	unsigned long flags;
837
838	if (!vmw_priv->fb_info)
839		return -EINVAL;
840
841	info = vmw_priv->fb_info;
842	par = info->par;
843
844	spin_lock_irqsave(&par->dirty.lock, flags);
845	par->dirty.active = false;
846	spin_unlock_irqrestore(&par->dirty.lock, flags);
847
848	flush_delayed_work(&info->deferred_work);
849	flush_delayed_work(&par->local_work);
850
 
 
 
 
851	return 0;
852}
853
854int vmw_fb_on(struct vmw_private *vmw_priv)
855{
856	struct fb_info *info;
857	struct vmw_fb_par *par;
858	unsigned long flags;
859
860	if (!vmw_priv->fb_info)
861		return -EINVAL;
862
863	info = vmw_priv->fb_info;
864	par = info->par;
865
 
866	spin_lock_irqsave(&par->dirty.lock, flags);
867	par->dirty.active = true;
868	spin_unlock_irqrestore(&par->dirty.lock, flags);
869
870	/*
871	 * Need to reschedule a dirty update, because otherwise that's
872	 * only done in dirty_mark() if the previous coalesced
873	 * dirty region was empty.
874	 */
875	schedule_delayed_work(&par->local_work, 0);
876
877	return 0;
878}