Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright © 2007 David Airlie
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 21 * DEALINGS IN THE SOFTWARE.
 22 *
 23 * Authors:
 24 *     David Airlie
 25 */
 26#include <linux/module.h>
 27#include <linux/slab.h>
 28#include <linux/fb.h>
 29
 30#include <drm/drmP.h>
 31#include <drm/drm_crtc.h>
 32#include <drm/drm_crtc_helper.h>
 33#include <drm/amdgpu_drm.h>
 34#include "amdgpu.h"
 35#include "cikd.h"
 36
 37#include <drm/drm_fb_helper.h>
 38
 39#include <linux/vga_switcheroo.h>
 40
 
 
 41/* object hierarchy -
 42   this contains a helper + a amdgpu fb
 43   the helper contains a pointer to amdgpu framebuffer baseclass.
 44*/
 45struct amdgpu_fbdev {
 46	struct drm_fb_helper helper;
 47	struct amdgpu_framebuffer rfb;
 48	struct amdgpu_device *adev;
 49};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 50
 51static struct fb_ops amdgpufb_ops = {
 52	.owner = THIS_MODULE,
 53	.fb_check_var = drm_fb_helper_check_var,
 54	.fb_set_par = drm_fb_helper_set_par,
 
 55	.fb_fillrect = drm_fb_helper_cfb_fillrect,
 56	.fb_copyarea = drm_fb_helper_cfb_copyarea,
 57	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 58	.fb_pan_display = drm_fb_helper_pan_display,
 59	.fb_blank = drm_fb_helper_blank,
 60	.fb_setcmap = drm_fb_helper_setcmap,
 61	.fb_debug_enter = drm_fb_helper_debug_enter,
 62	.fb_debug_leave = drm_fb_helper_debug_leave,
 63};
 64
 65
 66int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled)
 67{
 68	int aligned = width;
 69	int pitch_mask = 0;
 70
 71	switch (bpp / 8) {
 72	case 1:
 73		pitch_mask = 255;
 74		break;
 75	case 2:
 76		pitch_mask = 127;
 77		break;
 78	case 3:
 79	case 4:
 80		pitch_mask = 63;
 81		break;
 82	}
 83
 84	aligned += pitch_mask;
 85	aligned &= ~pitch_mask;
 86	return aligned;
 87}
 88
 89static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
 90{
 91	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(gobj);
 92	int ret;
 93
 94	ret = amdgpu_bo_reserve(rbo, false);
 95	if (likely(ret == 0)) {
 96		amdgpu_bo_kunmap(rbo);
 97		amdgpu_bo_unpin(rbo);
 98		amdgpu_bo_unreserve(rbo);
 99	}
100	drm_gem_object_unreference_unlocked(gobj);
101}
102
103static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
104					 struct drm_mode_fb_cmd2 *mode_cmd,
105					 struct drm_gem_object **gobj_p)
106{
107	struct amdgpu_device *adev = rfbdev->adev;
108	struct drm_gem_object *gobj = NULL;
109	struct amdgpu_bo *rbo = NULL;
110	bool fb_tiled = false; /* useful for testing */
111	u32 tiling_flags = 0;
112	int ret;
113	int aligned_size, size;
114	int height = mode_cmd->height;
115	u32 bpp, depth;
116
117	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
118
119	/* need to align pitch with crtc limits */
120	mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, bpp,
121						  fb_tiled) * ((bpp + 1) / 8);
 
122
123	height = ALIGN(mode_cmd->height, 8);
124	size = mode_cmd->pitches[0] * height;
125	aligned_size = ALIGN(size, PAGE_SIZE);
126	ret = amdgpu_gem_object_create(adev, aligned_size, 0,
127				       AMDGPU_GEM_DOMAIN_VRAM,
128				       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
129				       true, &gobj);
 
130	if (ret) {
131		printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
132		       aligned_size);
133		return -ENOMEM;
134	}
135	rbo = gem_to_amdgpu_bo(gobj);
136
137	if (fb_tiled)
138		tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1);
139
140	ret = amdgpu_bo_reserve(rbo, false);
141	if (unlikely(ret != 0))
142		goto out_unref;
143
144	if (tiling_flags) {
145		ret = amdgpu_bo_set_tiling_flags(rbo,
146						 tiling_flags);
147		if (ret)
148			dev_err(adev->dev, "FB failed to set tiling flags\n");
149	}
150
151
152	ret = amdgpu_bo_pin_restricted(rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL);
153	if (ret) {
154		amdgpu_bo_unreserve(rbo);
155		goto out_unref;
156	}
157	ret = amdgpu_bo_kmap(rbo, NULL);
158	amdgpu_bo_unreserve(rbo);
159	if (ret) {
160		goto out_unref;
161	}
162
163	*gobj_p = gobj;
164	return 0;
165out_unref:
166	amdgpufb_destroy_pinned_object(gobj);
167	*gobj_p = NULL;
168	return ret;
169}
170
171static int amdgpufb_create(struct drm_fb_helper *helper,
172			   struct drm_fb_helper_surface_size *sizes)
173{
174	struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper;
175	struct amdgpu_device *adev = rfbdev->adev;
176	struct fb_info *info;
177	struct drm_framebuffer *fb = NULL;
178	struct drm_mode_fb_cmd2 mode_cmd;
179	struct drm_gem_object *gobj = NULL;
180	struct amdgpu_bo *rbo = NULL;
181	int ret;
182	unsigned long tmp;
183
184	mode_cmd.width = sizes->surface_width;
185	mode_cmd.height = sizes->surface_height;
186
187	if (sizes->surface_bpp == 24)
188		sizes->surface_bpp = 32;
189
190	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
191							  sizes->surface_depth);
192
193	ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
194	if (ret) {
195		DRM_ERROR("failed to create fbcon object %d\n", ret);
196		return ret;
197	}
198
199	rbo = gem_to_amdgpu_bo(gobj);
200
201	/* okay we have an object now allocate the framebuffer */
202	info = drm_fb_helper_alloc_fbi(helper);
203	if (IS_ERR(info)) {
204		ret = PTR_ERR(info);
205		goto out_unref;
206	}
207
208	info->par = rfbdev;
209	info->skip_vt_switch = true;
210
211	ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
 
212	if (ret) {
213		DRM_ERROR("failed to initialize framebuffer %d\n", ret);
214		goto out_destroy_fbi;
215	}
216
217	fb = &rfbdev->rfb.base;
218
219	/* setup helper */
220	rfbdev->helper.fb = fb;
221
222	memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo));
223
224	strcpy(info->fix.id, "amdgpudrmfb");
225
226	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
227
228	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
229	info->fbops = &amdgpufb_ops;
230
231	tmp = amdgpu_bo_gpu_offset(rbo) - adev->mc.vram_start;
232	info->fix.smem_start = adev->mc.aper_base + tmp;
233	info->fix.smem_len = amdgpu_bo_size(rbo);
234	info->screen_base = rbo->kptr;
235	info->screen_size = amdgpu_bo_size(rbo);
236
237	drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
238
239	/* setup aperture base/size for vesafb takeover */
240	info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
241	info->apertures->ranges[0].size = adev->mc.aper_size;
242
243	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
244
245	if (info->screen_base == NULL) {
246		ret = -ENOSPC;
247		goto out_destroy_fbi;
248	}
249
250	DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
251	DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)adev->mc.aper_base);
252	DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(rbo));
253	DRM_INFO("fb depth is %d\n", fb->depth);
254	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
255
256	vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
257	return 0;
258
259out_destroy_fbi:
260	drm_fb_helper_release_fbi(helper);
261out_unref:
262	if (rbo) {
263
264	}
265	if (fb && ret) {
266		drm_gem_object_unreference_unlocked(gobj);
267		drm_framebuffer_unregister_private(fb);
268		drm_framebuffer_cleanup(fb);
269		kfree(fb);
270	}
271	return ret;
272}
273
274void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev)
275{
276	if (adev->mode_info.rfbdev)
277		drm_fb_helper_hotplug_event(&adev->mode_info.rfbdev->helper);
278}
279
280static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
281{
282	struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
283
284	drm_fb_helper_unregister_fbi(&rfbdev->helper);
285	drm_fb_helper_release_fbi(&rfbdev->helper);
286
287	if (rfb->obj) {
288		amdgpufb_destroy_pinned_object(rfb->obj);
289		rfb->obj = NULL;
 
 
290	}
291	drm_fb_helper_fini(&rfbdev->helper);
292	drm_framebuffer_unregister_private(&rfb->base);
293	drm_framebuffer_cleanup(&rfb->base);
294
295	return 0;
296}
297
298/** Sets the color ramps on behalf of fbcon */
299static void amdgpu_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
300				      u16 blue, int regno)
301{
302	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
303
304	amdgpu_crtc->lut_r[regno] = red >> 6;
305	amdgpu_crtc->lut_g[regno] = green >> 6;
306	amdgpu_crtc->lut_b[regno] = blue >> 6;
307}
308
309/** Gets the color ramps on behalf of fbcon */
310static void amdgpu_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
311				      u16 *blue, int regno)
312{
313	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
314
315	*red = amdgpu_crtc->lut_r[regno] << 6;
316	*green = amdgpu_crtc->lut_g[regno] << 6;
317	*blue = amdgpu_crtc->lut_b[regno] << 6;
318}
319
320static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = {
321	.gamma_set = amdgpu_crtc_fb_gamma_set,
322	.gamma_get = amdgpu_crtc_fb_gamma_get,
323	.fb_probe = amdgpufb_create,
324};
325
326int amdgpu_fbdev_init(struct amdgpu_device *adev)
327{
328	struct amdgpu_fbdev *rfbdev;
329	int bpp_sel = 32;
330	int ret;
331
332	/* don't init fbdev on hw without DCE */
333	if (!adev->mode_info.mode_config_initialized)
334		return 0;
335
336	/* don't init fbdev if there are no connectors */
337	if (list_empty(&adev->ddev->mode_config.connector_list))
338		return 0;
339
340	/* select 8 bpp console on low vram cards */
341	if (adev->mc.real_vram_size <= (32*1024*1024))
342		bpp_sel = 8;
343
344	rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL);
345	if (!rfbdev)
346		return -ENOMEM;
347
348	rfbdev->adev = adev;
349	adev->mode_info.rfbdev = rfbdev;
350
351	drm_fb_helper_prepare(adev->ddev, &rfbdev->helper,
352			&amdgpu_fb_helper_funcs);
353
354	ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper,
355				 adev->mode_info.num_crtc,
356				 AMDGPUFB_CONN_LIMIT);
357	if (ret) {
358		kfree(rfbdev);
359		return ret;
360	}
361
362	drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
363
364	/* disable all the possible outputs/crtcs before entering KMS mode */
365	drm_helper_disable_unused_functions(adev->ddev);
 
366
367	drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
368	return 0;
369}
370
371void amdgpu_fbdev_fini(struct amdgpu_device *adev)
372{
373	if (!adev->mode_info.rfbdev)
374		return;
375
376	amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev);
377	kfree(adev->mode_info.rfbdev);
378	adev->mode_info.rfbdev = NULL;
379}
380
381void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
382{
383	if (adev->mode_info.rfbdev)
384		drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper,
385			state);
386}
387
388int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
389{
390	struct amdgpu_bo *robj;
391	int size = 0;
392
393	if (!adev->mode_info.rfbdev)
394		return 0;
395
396	robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj);
397	size += amdgpu_bo_size(robj);
398	return size;
399}
400
401bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
402{
403	if (!adev->mode_info.rfbdev)
404		return false;
405	if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj))
406		return true;
407	return false;
408}
409
410void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
411{
412	struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev;
413	struct drm_fb_helper *fb_helper;
414	int ret;
415
416	if (!afbdev)
417		return;
418
419	fb_helper = &afbdev->helper;
420
421	ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
422	if (ret)
423		DRM_DEBUG("failed to restore crtc mode\n");
424}
v4.17
  1/*
  2 * Copyright © 2007 David Airlie
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 21 * DEALINGS IN THE SOFTWARE.
 22 *
 23 * Authors:
 24 *     David Airlie
 25 */
 26#include <linux/module.h>
 27#include <linux/slab.h>
 28#include <linux/pm_runtime.h>
 29
 30#include <drm/drmP.h>
 31#include <drm/drm_crtc.h>
 32#include <drm/drm_crtc_helper.h>
 33#include <drm/amdgpu_drm.h>
 34#include "amdgpu.h"
 35#include "cikd.h"
 36
 37#include <drm/drm_fb_helper.h>
 38
 39#include <linux/vga_switcheroo.h>
 40
 41#include "amdgpu_display.h"
 42
 43/* object hierarchy -
 44   this contains a helper + a amdgpu fb
 45   the helper contains a pointer to amdgpu framebuffer baseclass.
 46*/
 47
 48static int
 49amdgpufb_open(struct fb_info *info, int user)
 50{
 51	struct amdgpu_fbdev *rfbdev = info->par;
 52	struct amdgpu_device *adev = rfbdev->adev;
 53	int ret = pm_runtime_get_sync(adev->ddev->dev);
 54	if (ret < 0 && ret != -EACCES) {
 55		pm_runtime_mark_last_busy(adev->ddev->dev);
 56		pm_runtime_put_autosuspend(adev->ddev->dev);
 57		return ret;
 58	}
 59	return 0;
 60}
 61
 62static int
 63amdgpufb_release(struct fb_info *info, int user)
 64{
 65	struct amdgpu_fbdev *rfbdev = info->par;
 66	struct amdgpu_device *adev = rfbdev->adev;
 67
 68	pm_runtime_mark_last_busy(adev->ddev->dev);
 69	pm_runtime_put_autosuspend(adev->ddev->dev);
 70	return 0;
 71}
 72
 73static struct fb_ops amdgpufb_ops = {
 74	.owner = THIS_MODULE,
 75	DRM_FB_HELPER_DEFAULT_OPS,
 76	.fb_open = amdgpufb_open,
 77	.fb_release = amdgpufb_release,
 78	.fb_fillrect = drm_fb_helper_cfb_fillrect,
 79	.fb_copyarea = drm_fb_helper_cfb_copyarea,
 80	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 
 
 
 
 
 81};
 82
 83
 84int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled)
 85{
 86	int aligned = width;
 87	int pitch_mask = 0;
 88
 89	switch (cpp) {
 90	case 1:
 91		pitch_mask = 255;
 92		break;
 93	case 2:
 94		pitch_mask = 127;
 95		break;
 96	case 3:
 97	case 4:
 98		pitch_mask = 63;
 99		break;
100	}
101
102	aligned += pitch_mask;
103	aligned &= ~pitch_mask;
104	return aligned * cpp;
105}
106
107static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
108{
109	struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
110	int ret;
111
112	ret = amdgpu_bo_reserve(abo, true);
113	if (likely(ret == 0)) {
114		amdgpu_bo_kunmap(abo);
115		amdgpu_bo_unpin(abo);
116		amdgpu_bo_unreserve(abo);
117	}
118	drm_gem_object_put_unlocked(gobj);
119}
120
121static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
122					 struct drm_mode_fb_cmd2 *mode_cmd,
123					 struct drm_gem_object **gobj_p)
124{
125	struct amdgpu_device *adev = rfbdev->adev;
126	struct drm_gem_object *gobj = NULL;
127	struct amdgpu_bo *abo = NULL;
128	bool fb_tiled = false; /* useful for testing */
129	u32 tiling_flags = 0, domain;
130	int ret;
131	int aligned_size, size;
132	int height = mode_cmd->height;
133	u32 cpp;
134
135	cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
136
137	/* need to align pitch with crtc limits */
138	mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
139						  fb_tiled);
140	domain = amdgpu_display_framebuffer_domains(adev);
141
142	height = ALIGN(mode_cmd->height, 8);
143	size = mode_cmd->pitches[0] * height;
144	aligned_size = ALIGN(size, PAGE_SIZE);
145	ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain,
146				       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
147				       AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
148				       AMDGPU_GEM_CREATE_VRAM_CLEARED,
149				       true, NULL, &gobj);
150	if (ret) {
151		pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
 
152		return -ENOMEM;
153	}
154	abo = gem_to_amdgpu_bo(gobj);
155
156	if (fb_tiled)
157		tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1);
158
159	ret = amdgpu_bo_reserve(abo, false);
160	if (unlikely(ret != 0))
161		goto out_unref;
162
163	if (tiling_flags) {
164		ret = amdgpu_bo_set_tiling_flags(abo,
165						 tiling_flags);
166		if (ret)
167			dev_err(adev->dev, "FB failed to set tiling flags\n");
168	}
169
170
171	ret = amdgpu_bo_pin(abo, domain, NULL);
172	if (ret) {
173		amdgpu_bo_unreserve(abo);
174		goto out_unref;
175	}
176	ret = amdgpu_bo_kmap(abo, NULL);
177	amdgpu_bo_unreserve(abo);
178	if (ret) {
179		goto out_unref;
180	}
181
182	*gobj_p = gobj;
183	return 0;
184out_unref:
185	amdgpufb_destroy_pinned_object(gobj);
186	*gobj_p = NULL;
187	return ret;
188}
189
190static int amdgpufb_create(struct drm_fb_helper *helper,
191			   struct drm_fb_helper_surface_size *sizes)
192{
193	struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper;
194	struct amdgpu_device *adev = rfbdev->adev;
195	struct fb_info *info;
196	struct drm_framebuffer *fb = NULL;
197	struct drm_mode_fb_cmd2 mode_cmd;
198	struct drm_gem_object *gobj = NULL;
199	struct amdgpu_bo *abo = NULL;
200	int ret;
201	unsigned long tmp;
202
203	mode_cmd.width = sizes->surface_width;
204	mode_cmd.height = sizes->surface_height;
205
206	if (sizes->surface_bpp == 24)
207		sizes->surface_bpp = 32;
208
209	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
210							  sizes->surface_depth);
211
212	ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
213	if (ret) {
214		DRM_ERROR("failed to create fbcon object %d\n", ret);
215		return ret;
216	}
217
218	abo = gem_to_amdgpu_bo(gobj);
219
220	/* okay we have an object now allocate the framebuffer */
221	info = drm_fb_helper_alloc_fbi(helper);
222	if (IS_ERR(info)) {
223		ret = PTR_ERR(info);
224		goto out;
225	}
226
227	info->par = rfbdev;
228	info->skip_vt_switch = true;
229
230	ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb,
231					      &mode_cmd, gobj);
232	if (ret) {
233		DRM_ERROR("failed to initialize framebuffer %d\n", ret);
234		goto out;
235	}
236
237	fb = &rfbdev->rfb.base;
238
239	/* setup helper */
240	rfbdev->helper.fb = fb;
241
 
 
242	strcpy(info->fix.id, "amdgpudrmfb");
243
244	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
245
 
246	info->fbops = &amdgpufb_ops;
247
248	tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start;
249	info->fix.smem_start = adev->gmc.aper_base + tmp;
250	info->fix.smem_len = amdgpu_bo_size(abo);
251	info->screen_base = amdgpu_bo_kptr(abo);
252	info->screen_size = amdgpu_bo_size(abo);
253
254	drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
255
256	/* setup aperture base/size for vesafb takeover */
257	info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
258	info->apertures->ranges[0].size = adev->gmc.aper_size;
259
260	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
261
262	if (info->screen_base == NULL) {
263		ret = -ENOSPC;
264		goto out;
265	}
266
267	DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
268	DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)adev->gmc.aper_base);
269	DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo));
270	DRM_INFO("fb depth is %d\n", fb->format->depth);
271	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
272
273	vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
274	return 0;
275
276out:
277	if (abo) {
 
 
278
279	}
280	if (fb && ret) {
281		drm_gem_object_put_unlocked(gobj);
282		drm_framebuffer_unregister_private(fb);
283		drm_framebuffer_cleanup(fb);
284		kfree(fb);
285	}
286	return ret;
287}
288
 
 
 
 
 
 
289static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
290{
291	struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
292
293	drm_fb_helper_unregister_fbi(&rfbdev->helper);
 
294
295	if (rfb->obj) {
296		amdgpufb_destroy_pinned_object(rfb->obj);
297		rfb->obj = NULL;
298		drm_framebuffer_unregister_private(&rfb->base);
299		drm_framebuffer_cleanup(&rfb->base);
300	}
301	drm_fb_helper_fini(&rfbdev->helper);
 
 
302
303	return 0;
304}
305
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = {
 
 
307	.fb_probe = amdgpufb_create,
308};
309
310int amdgpu_fbdev_init(struct amdgpu_device *adev)
311{
312	struct amdgpu_fbdev *rfbdev;
313	int bpp_sel = 32;
314	int ret;
315
316	/* don't init fbdev on hw without DCE */
317	if (!adev->mode_info.mode_config_initialized)
318		return 0;
319
320	/* don't init fbdev if there are no connectors */
321	if (list_empty(&adev->ddev->mode_config.connector_list))
322		return 0;
323
324	/* select 8 bpp console on low vram cards */
325	if (adev->gmc.real_vram_size <= (32*1024*1024))
326		bpp_sel = 8;
327
328	rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL);
329	if (!rfbdev)
330		return -ENOMEM;
331
332	rfbdev->adev = adev;
333	adev->mode_info.rfbdev = rfbdev;
334
335	drm_fb_helper_prepare(adev->ddev, &rfbdev->helper,
336			&amdgpu_fb_helper_funcs);
337
338	ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper,
 
339				 AMDGPUFB_CONN_LIMIT);
340	if (ret) {
341		kfree(rfbdev);
342		return ret;
343	}
344
345	drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
346
347	/* disable all the possible outputs/crtcs before entering KMS mode */
348	if (!amdgpu_device_has_dc_support(adev))
349		drm_helper_disable_unused_functions(adev->ddev);
350
351	drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
352	return 0;
353}
354
355void amdgpu_fbdev_fini(struct amdgpu_device *adev)
356{
357	if (!adev->mode_info.rfbdev)
358		return;
359
360	amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev);
361	kfree(adev->mode_info.rfbdev);
362	adev->mode_info.rfbdev = NULL;
363}
364
365void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
366{
367	if (adev->mode_info.rfbdev)
368		drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper,
369			state);
370}
371
372int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
373{
374	struct amdgpu_bo *robj;
375	int size = 0;
376
377	if (!adev->mode_info.rfbdev)
378		return 0;
379
380	robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj);
381	size += amdgpu_bo_size(robj);
382	return size;
383}
384
385bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
386{
387	if (!adev->mode_info.rfbdev)
388		return false;
389	if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj))
390		return true;
391	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392}