Loading...
Note: File does not exist in v3.1.
1/*
2 * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "cmd.h"
34
35int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
36{
37 u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
38 u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
39 int err;
40
41 MLX5_SET(query_special_contexts_in, in, opcode,
42 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
43 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
44 if (!err)
45 *null_mkey = MLX5_GET(query_special_contexts_out, out,
46 null_mkey);
47 return err;
48}
49
50int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
51 void *out, int out_size)
52{
53 u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = { };
54
55 MLX5_SET(query_cong_params_in, in, opcode,
56 MLX5_CMD_OP_QUERY_CONG_PARAMS);
57 MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
58
59 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
60}
61
62int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *dev,
63 void *in, int in_size)
64{
65 u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)] = { };
66
67 return mlx5_cmd_exec(dev, in, in_size, out, sizeof(out));
68}
69
70int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr,
71 u64 length, u32 alignment)
72{
73 struct mlx5_core_dev *dev = memic->dev;
74 u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
75 >> PAGE_SHIFT;
76 u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
77 u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
78 u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
79 u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
80 u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
81 u32 mlx5_alignment;
82 u64 page_idx = 0;
83 int ret = 0;
84
85 if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
86 return -EINVAL;
87
88 /* mlx5 device sets alignment as 64*2^driver_value
89 * so normalizing is needed.
90 */
91 mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
92 alignment - MLX5_MEMIC_BASE_ALIGN;
93 if (mlx5_alignment > max_alignment)
94 return -EINVAL;
95
96 MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
97 MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
98 MLX5_SET(alloc_memic_in, in, memic_size, length);
99 MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
100 mlx5_alignment);
101
102 while (page_idx < num_memic_hw_pages) {
103 spin_lock(&memic->memic_lock);
104 page_idx = bitmap_find_next_zero_area(memic->memic_alloc_pages,
105 num_memic_hw_pages,
106 page_idx,
107 num_pages, 0);
108
109 if (page_idx < num_memic_hw_pages)
110 bitmap_set(memic->memic_alloc_pages,
111 page_idx, num_pages);
112
113 spin_unlock(&memic->memic_lock);
114
115 if (page_idx >= num_memic_hw_pages)
116 break;
117
118 MLX5_SET64(alloc_memic_in, in, range_start_addr,
119 hw_start_addr + (page_idx * PAGE_SIZE));
120
121 ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
122 if (ret) {
123 spin_lock(&memic->memic_lock);
124 bitmap_clear(memic->memic_alloc_pages,
125 page_idx, num_pages);
126 spin_unlock(&memic->memic_lock);
127
128 if (ret == -EAGAIN) {
129 page_idx++;
130 continue;
131 }
132
133 return ret;
134 }
135
136 *addr = pci_resource_start(dev->pdev, 0) +
137 MLX5_GET64(alloc_memic_out, out, memic_start_addr);
138
139 return 0;
140 }
141
142 return -ENOMEM;
143}
144
145int mlx5_cmd_dealloc_memic(struct mlx5_memic *memic, u64 addr, u64 length)
146{
147 struct mlx5_core_dev *dev = memic->dev;
148 u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
149 u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
150 u32 out[MLX5_ST_SZ_DW(dealloc_memic_out)] = {0};
151 u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {0};
152 u64 start_page_idx;
153 int err;
154
155 addr -= pci_resource_start(dev->pdev, 0);
156 start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
157
158 MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
159 MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
160 MLX5_SET(dealloc_memic_in, in, memic_size, length);
161
162 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
163
164 if (!err) {
165 spin_lock(&memic->memic_lock);
166 bitmap_clear(memic->memic_alloc_pages,
167 start_page_idx, num_pages);
168 spin_unlock(&memic->memic_lock);
169 }
170
171 return err;
172}