Loading...
Note: File does not exist in v5.4.
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#ifndef _XE_DEVICE_H_
7#define _XE_DEVICE_H_
8
9#include <drm/drm_util.h>
10
11#include "xe_device_types.h"
12#include "xe_gt_types.h"
13#include "xe_sriov.h"
14
15static inline struct xe_device *to_xe_device(const struct drm_device *dev)
16{
17 return container_of(dev, struct xe_device, drm);
18}
19
20static inline struct xe_device *kdev_to_xe_device(struct device *kdev)
21{
22 struct drm_device *drm = dev_get_drvdata(kdev);
23
24 return drm ? to_xe_device(drm) : NULL;
25}
26
27static inline struct xe_device *pdev_to_xe_device(struct pci_dev *pdev)
28{
29 struct drm_device *drm = pci_get_drvdata(pdev);
30
31 return drm ? to_xe_device(drm) : NULL;
32}
33
34static inline struct xe_device *xe_device_const_cast(const struct xe_device *xe)
35{
36 return (struct xe_device *)xe;
37}
38
39static inline struct xe_device *ttm_to_xe_device(struct ttm_device *ttm)
40{
41 return container_of(ttm, struct xe_device, ttm);
42}
43
44struct xe_device *xe_device_create(struct pci_dev *pdev,
45 const struct pci_device_id *ent);
46int xe_device_probe_early(struct xe_device *xe);
47int xe_device_probe(struct xe_device *xe);
48void xe_device_remove(struct xe_device *xe);
49void xe_device_shutdown(struct xe_device *xe);
50
51void xe_device_wmb(struct xe_device *xe);
52
53static inline struct xe_file *to_xe_file(const struct drm_file *file)
54{
55 return file->driver_priv;
56}
57
58static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe)
59{
60 return &xe->tiles[0];
61}
62
63#define XE_MAX_GT_PER_TILE 2
64
65static inline struct xe_gt *xe_tile_get_gt(struct xe_tile *tile, u8 gt_id)
66{
67 if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id >= XE_MAX_GT_PER_TILE))
68 gt_id = 0;
69
70 return gt_id ? tile->media_gt : tile->primary_gt;
71}
72
73static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
74{
75 struct xe_tile *root_tile = xe_device_get_root_tile(xe);
76 struct xe_gt *gt;
77
78 /*
79 * FIXME: This only works for now because multi-tile and standalone
80 * media are mutually exclusive on the platforms we have today.
81 *
82 * id => GT mapping may change once we settle on how we want to handle
83 * our UAPI.
84 */
85 if (MEDIA_VER(xe) >= 13) {
86 gt = xe_tile_get_gt(root_tile, gt_id);
87 } else {
88 if (drm_WARN_ON(&xe->drm, gt_id >= XE_MAX_TILES_PER_DEVICE))
89 gt_id = 0;
90
91 gt = xe->tiles[gt_id].primary_gt;
92 }
93
94 if (!gt)
95 return NULL;
96
97 drm_WARN_ON(&xe->drm, gt->info.id != gt_id);
98 drm_WARN_ON(&xe->drm, gt->info.type == XE_GT_TYPE_UNINITIALIZED);
99
100 return gt;
101}
102
103/*
104 * Provide a GT structure suitable for performing non-GT MMIO operations against
105 * the primary tile. Primarily intended for early tile initialization, display
106 * handling, top-most interrupt enable/disable, etc. Since anything using the
107 * MMIO handle returned by this function doesn't need GSI offset translation,
108 * we'll return the primary GT from the root tile.
109 *
110 * FIXME: Fix the driver design so that 'gt' isn't the target of all MMIO
111 * operations.
112 *
113 * Returns the primary gt of the root tile.
114 */
115static inline struct xe_gt *xe_root_mmio_gt(struct xe_device *xe)
116{
117 return xe_device_get_root_tile(xe)->primary_gt;
118}
119
120static inline bool xe_device_uc_enabled(struct xe_device *xe)
121{
122 return !xe->info.force_execlist;
123}
124
125#define for_each_tile(tile__, xe__, id__) \
126 for ((id__) = 0; (id__) < (xe__)->info.tile_count; (id__)++) \
127 for_each_if((tile__) = &(xe__)->tiles[(id__)])
128
129#define for_each_remote_tile(tile__, xe__, id__) \
130 for ((id__) = 1; (id__) < (xe__)->info.tile_count; (id__)++) \
131 for_each_if((tile__) = &(xe__)->tiles[(id__)])
132
133/*
134 * FIXME: This only works for now since multi-tile and standalone media
135 * happen to be mutually exclusive. Future platforms may change this...
136 */
137#define for_each_gt(gt__, xe__, id__) \
138 for ((id__) = 0; (id__) < (xe__)->info.gt_count; (id__)++) \
139 for_each_if((gt__) = xe_device_get_gt((xe__), (id__)))
140
141static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt)
142{
143 return >->pm.fw;
144}
145
146void xe_device_assert_mem_access(struct xe_device *xe);
147
148static inline bool xe_device_has_flat_ccs(struct xe_device *xe)
149{
150 return xe->info.has_flat_ccs;
151}
152
153static inline bool xe_device_has_sriov(struct xe_device *xe)
154{
155 return xe->info.has_sriov;
156}
157
158static inline bool xe_device_has_msix(struct xe_device *xe)
159{
160 return xe->irq.msix.nvec > 0;
161}
162
163static inline bool xe_device_has_memirq(struct xe_device *xe)
164{
165 return GRAPHICS_VERx100(xe) >= 1250;
166}
167
168static inline bool xe_device_uses_memirq(struct xe_device *xe)
169{
170 return xe_device_has_memirq(xe) && (IS_SRIOV_VF(xe) || xe_device_has_msix(xe));
171}
172
173u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size);
174
175void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p);
176
177u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address);
178u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address);
179
180void xe_device_td_flush(struct xe_device *xe);
181void xe_device_l2_flush(struct xe_device *xe);
182
183static inline bool xe_device_wedged(struct xe_device *xe)
184{
185 return atomic_read(&xe->wedged.flag);
186}
187
188void xe_device_declare_wedged(struct xe_device *xe);
189
190struct xe_file *xe_file_get(struct xe_file *xef);
191void xe_file_put(struct xe_file *xef);
192
193/*
194 * Occasionally it is seen that the G2H worker starts running after a delay of more than
195 * a second even after being queued and activated by the Linux workqueue subsystem. This
196 * leads to G2H timeout error. The root cause of issue lies with scheduling latency of
197 * Lunarlake Hybrid CPU. Issue disappears if we disable Lunarlake atom cores from BIOS
198 * and this is beyond xe kmd.
199 *
200 * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
201 */
202#define LNL_FLUSH_WORKQUEUE(wq__) \
203 flush_workqueue(wq__)
204#define LNL_FLUSH_WORK(wrk__) \
205 flush_work(wrk__)
206
207#endif