Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
Note: File does not exist in v5.9.
  1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
  2/*
  3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
  4 */
  5
  6#ifndef MLX5_VFIO_CMD_H
  7#define MLX5_VFIO_CMD_H
  8
  9#include <linux/kernel.h>
 10#include <linux/vfio_pci_core.h>
 11#include <linux/mlx5/driver.h>
 12#include <linux/mlx5/vport.h>
 13#include <linux/mlx5/cq.h>
 14#include <linux/mlx5/qp.h>
 15
 16#define MLX5VF_PRE_COPY_SUPP(mvdev) \
 17	((mvdev)->core_device.vdev.migration_flags & VFIO_MIGRATION_PRE_COPY)
 18
 19enum mlx5_vf_migf_state {
 20	MLX5_MIGF_STATE_ERROR = 1,
 21	MLX5_MIGF_STATE_PRE_COPY_ERROR,
 22	MLX5_MIGF_STATE_PRE_COPY,
 23	MLX5_MIGF_STATE_SAVE_STOP_COPY_CHUNK,
 24	MLX5_MIGF_STATE_COMPLETE,
 25};
 26
 27enum mlx5_vf_load_state {
 28	MLX5_VF_LOAD_STATE_READ_IMAGE_NO_HEADER,
 29	MLX5_VF_LOAD_STATE_READ_HEADER,
 30	MLX5_VF_LOAD_STATE_PREP_HEADER_DATA,
 31	MLX5_VF_LOAD_STATE_READ_HEADER_DATA,
 32	MLX5_VF_LOAD_STATE_PREP_IMAGE,
 33	MLX5_VF_LOAD_STATE_READ_IMAGE,
 34	MLX5_VF_LOAD_STATE_LOAD_IMAGE,
 35};
 36
 37struct mlx5_vf_migration_tag_stop_copy_data {
 38	__le64 stop_copy_size;
 39};
 40
 41enum mlx5_vf_migf_header_flags {
 42	MLX5_MIGF_HEADER_FLAGS_TAG_MANDATORY = 0,
 43	MLX5_MIGF_HEADER_FLAGS_TAG_OPTIONAL = 1 << 0,
 44};
 45
 46enum mlx5_vf_migf_header_tag {
 47	MLX5_MIGF_HEADER_TAG_FW_DATA = 0,
 48	MLX5_MIGF_HEADER_TAG_STOP_COPY_SIZE = 1 << 0,
 49};
 50
 51struct mlx5_vf_migration_header {
 52	__le64 record_size;
 53	/* For future use in case we may need to change the kernel protocol */
 54	__le32 flags; /* Use mlx5_vf_migf_header_flags */
 55	__le32 tag; /* Use mlx5_vf_migf_header_tag */
 56	__u8 data[]; /* Its size is given in the record_size */
 57};
 58
 59struct mlx5_vhca_data_buffer {
 60	struct sg_append_table table;
 61	loff_t start_pos;
 62	u64 length;
 63	u64 allocated_length;
 64	u32 mkey;
 65	enum dma_data_direction dma_dir;
 66	u8 dmaed:1;
 67	u8 stop_copy_chunk_num;
 68	struct list_head buf_elm;
 69	struct mlx5_vf_migration_file *migf;
 70	/* Optimize mlx5vf_get_migration_page() for sequential access */
 71	struct scatterlist *last_offset_sg;
 72	unsigned int sg_last_entry;
 73	unsigned long last_offset;
 74};
 75
 76struct mlx5vf_async_data {
 77	struct mlx5_async_work cb_work;
 78	struct work_struct work;
 79	struct mlx5_vhca_data_buffer *buf;
 80	struct mlx5_vhca_data_buffer *header_buf;
 81	int status;
 82	u8 stop_copy_chunk:1;
 83	void *out;
 84};
 85
 86struct mlx5vf_save_work_data {
 87	struct mlx5_vf_migration_file *migf;
 88	size_t next_required_umem_size;
 89	struct work_struct work;
 90	u8 chunk_num;
 91};
 92
 93#define MAX_NUM_CHUNKS 2
 94
 95struct mlx5_vf_migration_file {
 96	struct file *filp;
 97	struct mutex lock;
 98	enum mlx5_vf_migf_state state;
 99
100	enum mlx5_vf_load_state load_state;
101	u32 pdn;
102	loff_t max_pos;
103	u64 record_size;
104	u32 record_tag;
105	u64 stop_copy_prep_size;
106	u64 pre_copy_initial_bytes;
107	size_t next_required_umem_size;
108	u8 num_ready_chunks;
109	/* Upon chunk mode preserve another set of buffers for stop_copy phase */
110	struct mlx5_vhca_data_buffer *buf[MAX_NUM_CHUNKS];
111	struct mlx5_vhca_data_buffer *buf_header[MAX_NUM_CHUNKS];
112	struct mlx5vf_save_work_data save_data[MAX_NUM_CHUNKS];
113	spinlock_t list_lock;
114	struct list_head buf_list;
115	struct list_head avail_list;
116	struct mlx5vf_pci_core_device *mvdev;
117	wait_queue_head_t poll_wait;
118	struct completion save_comp;
119	struct mlx5_async_ctx async_ctx;
120	struct mlx5vf_async_data async_data;
121};
122
123struct mlx5_vhca_cq_buf {
124	struct mlx5_frag_buf_ctrl fbc;
125	struct mlx5_frag_buf frag_buf;
126	int cqe_size;
127	int nent;
128};
129
130struct mlx5_vhca_cq {
131	struct mlx5_vhca_cq_buf buf;
132	struct mlx5_db db;
133	struct mlx5_core_cq mcq;
134	size_t ncqe;
135};
136
137struct mlx5_vhca_recv_buf {
138	u32 npages;
139	struct page **page_list;
140	dma_addr_t *dma_addrs;
141	u32 next_rq_offset;
142	u32 mkey;
143};
144
145struct mlx5_vhca_qp {
146	struct mlx5_frag_buf buf;
147	struct mlx5_db db;
148	struct mlx5_vhca_recv_buf recv_buf;
149	u32 tracked_page_size;
150	u32 max_msg_size;
151	u32 qpn;
152	struct {
153		unsigned int pc;
154		unsigned int cc;
155		unsigned int wqe_cnt;
156		__be32 *db;
157		struct mlx5_frag_buf_ctrl fbc;
158	} rq;
159};
160
161struct mlx5_vhca_page_tracker {
162	u32 id;
163	u32 pdn;
164	u8 is_err:1;
165	struct mlx5_uars_page *uar;
166	struct mlx5_vhca_cq cq;
167	struct mlx5_vhca_qp *host_qp;
168	struct mlx5_vhca_qp *fw_qp;
169	struct mlx5_nb nb;
170	int status;
171};
172
173struct mlx5vf_pci_core_device {
174	struct vfio_pci_core_device core_device;
175	int vf_id;
176	u16 vhca_id;
177	u8 migrate_cap:1;
178	u8 deferred_reset:1;
179	u8 mdev_detach:1;
180	u8 log_active:1;
181	u8 chunk_mode:1;
182	struct completion tracker_comp;
183	/* protect migration state */
184	struct mutex state_mutex;
185	enum vfio_device_mig_state mig_state;
186	/* protect the reset_done flow */
187	spinlock_t reset_lock;
188	struct mlx5_vf_migration_file *resuming_migf;
189	struct mlx5_vf_migration_file *saving_migf;
190	struct mlx5_vhca_page_tracker tracker;
191	struct workqueue_struct *cb_wq;
192	struct notifier_block nb;
193	struct mlx5_core_dev *mdev;
194};
195
196enum {
197	MLX5VF_QUERY_INC = (1UL << 0),
198	MLX5VF_QUERY_FINAL = (1UL << 1),
199};
200
201int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
202int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
203int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
204					  size_t *state_size, u64 *total_size,
205					  u8 query_flags);
206void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
207			       const struct vfio_migration_ops *mig_ops,
208			       const struct vfio_log_ops *log_ops);
209void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev);
210void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev);
211int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
212			       struct mlx5_vf_migration_file *migf,
213			       struct mlx5_vhca_data_buffer *buf, bool inc,
214			       bool track);
215int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
216			       struct mlx5_vf_migration_file *migf,
217			       struct mlx5_vhca_data_buffer *buf);
218int mlx5vf_cmd_alloc_pd(struct mlx5_vf_migration_file *migf);
219void mlx5vf_cmd_dealloc_pd(struct mlx5_vf_migration_file *migf);
220void mlx5fv_cmd_clean_migf_resources(struct mlx5_vf_migration_file *migf);
221struct mlx5_vhca_data_buffer *
222mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf,
223			 size_t length, enum dma_data_direction dma_dir);
224void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf);
225struct mlx5_vhca_data_buffer *
226mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf,
227		       size_t length, enum dma_data_direction dma_dir);
228void mlx5vf_put_data_buffer(struct mlx5_vhca_data_buffer *buf);
229int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
230			       unsigned int npages);
231struct page *mlx5vf_get_migration_page(struct mlx5_vhca_data_buffer *buf,
232				       unsigned long offset);
233void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev);
234void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev);
235void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work);
236void mlx5vf_mig_file_set_save_work(struct mlx5_vf_migration_file *migf,
237				   u8 chunk_num, size_t next_required_umem_size);
238int mlx5vf_start_page_tracker(struct vfio_device *vdev,
239		struct rb_root_cached *ranges, u32 nnodes, u64 *page_size);
240int mlx5vf_stop_page_tracker(struct vfio_device *vdev);
241int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova,
242			unsigned long length, struct iova_bitmap *dirty);
243#endif /* MLX5_VFIO_CMD_H */