Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9/*
10 * Cross Partition Communication (XPC) uv-based functions.
11 *
12 * Architecture specific implementation of common functions.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/interrupt.h>
19#include <linux/delay.h>
20#include <linux/device.h>
21#include <linux/err.h>
22#include <linux/slab.h>
23#include <asm/uv/uv_hub.h>
24#if defined CONFIG_X86_64
25#include <asm/uv/bios.h>
26#include <asm/uv/uv_irq.h>
27#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
28#include <asm/sn/intr.h>
29#include <asm/sn/sn_sal.h>
30#endif
31#include "../sgi-gru/gru.h"
32#include "../sgi-gru/grukservices.h"
33#include "xpc.h"
34
35#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
36struct uv_IO_APIC_route_entry {
37 __u64 vector : 8,
38 delivery_mode : 3,
39 dest_mode : 1,
40 delivery_status : 1,
41 polarity : 1,
42 __reserved_1 : 1,
43 trigger : 1,
44 mask : 1,
45 __reserved_2 : 15,
46 dest : 32;
47};
48#endif
49
50static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
51
52#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
53#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
54 XPC_ACTIVATE_MSG_SIZE_UV)
55#define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
56
57#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
58#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
59 XPC_NOTIFY_MSG_SIZE_UV)
60#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
61
62static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
63static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
64
65static int
66xpc_setup_partitions_uv(void)
67{
68 short partid;
69 struct xpc_partition_uv *part_uv;
70
71 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
72 part_uv = &xpc_partitions[partid].sn.uv;
73
74 mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex);
75 spin_lock_init(&part_uv->flags_lock);
76 part_uv->remote_act_state = XPC_P_AS_INACTIVE;
77 }
78 return 0;
79}
80
81static void
82xpc_teardown_partitions_uv(void)
83{
84 short partid;
85 struct xpc_partition_uv *part_uv;
86 unsigned long irq_flags;
87
88 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
89 part_uv = &xpc_partitions[partid].sn.uv;
90
91 if (part_uv->cached_activate_gru_mq_desc != NULL) {
92 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
93 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
94 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
95 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
96 kfree(part_uv->cached_activate_gru_mq_desc);
97 part_uv->cached_activate_gru_mq_desc = NULL;
98 mutex_unlock(&part_uv->
99 cached_activate_gru_mq_desc_mutex);
100 }
101 }
102}
103
104static int
105xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
106{
107 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
108
109#if defined CONFIG_X86_64
110 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
111 UV_AFFINITY_CPU);
112 if (mq->irq < 0) {
113 dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
114 -mq->irq);
115 return mq->irq;
116 }
117
118 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
119
120#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
121 if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
122 mq->irq = SGI_XPC_ACTIVATE;
123 else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
124 mq->irq = SGI_XPC_NOTIFY;
125 else
126 return -EINVAL;
127
128 mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
129 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value);
130#else
131 #error not a supported configuration
132#endif
133
134 return 0;
135}
136
137static void
138xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
139{
140#if defined CONFIG_X86_64
141 uv_teardown_irq(mq->irq);
142
143#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
144 int mmr_pnode;
145 unsigned long mmr_value;
146
147 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
148 mmr_value = 1UL << 16;
149
150 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
151#else
152 #error not a supported configuration
153#endif
154}
155
156static int
157xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
158{
159 int ret;
160
161#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
162 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
163
164 ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
165 mq->order, &mq->mmr_offset);
166 if (ret < 0) {
167 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
168 ret);
169 return -EBUSY;
170 }
171#elif defined CONFIG_X86_64
172 ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
173 mq->order, &mq->mmr_offset);
174 if (ret < 0) {
175 dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
176 "ret=%d\n", ret);
177 return ret;
178 }
179#else
180 #error not a supported configuration
181#endif
182
183 mq->watchlist_num = ret;
184 return 0;
185}
186
187static void
188xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
189{
190 int ret;
191 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
192
193#if defined CONFIG_X86_64
194 ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
195 BUG_ON(ret != BIOS_STATUS_SUCCESS);
196#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
197 ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
198 BUG_ON(ret != SALRET_OK);
199#else
200 #error not a supported configuration
201#endif
202}
203
204static struct xpc_gru_mq_uv *
205xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
206 irq_handler_t irq_handler)
207{
208 enum xp_retval xp_ret;
209 int ret;
210 int nid;
211 int nasid;
212 int pg_order;
213 struct page *page;
214 struct xpc_gru_mq_uv *mq;
215 struct uv_IO_APIC_route_entry *mmr_value;
216
217 mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
218 if (mq == NULL) {
219 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
220 "a xpc_gru_mq_uv structure\n");
221 ret = -ENOMEM;
222 goto out_0;
223 }
224
225 mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
226 GFP_KERNEL);
227 if (mq->gru_mq_desc == NULL) {
228 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
229 "a gru_message_queue_desc structure\n");
230 ret = -ENOMEM;
231 goto out_1;
232 }
233
234 pg_order = get_order(mq_size);
235 mq->order = pg_order + PAGE_SHIFT;
236 mq_size = 1UL << mq->order;
237
238 mq->mmr_blade = uv_cpu_to_blade_id(cpu);
239
240 nid = cpu_to_node(cpu);
241 page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
242 pg_order);
243 if (page == NULL) {
244 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
245 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
246 ret = -ENOMEM;
247 goto out_2;
248 }
249 mq->address = page_address(page);
250
251 /* enable generation of irq when GRU mq operation occurs to this mq */
252 ret = xpc_gru_mq_watchlist_alloc_uv(mq);
253 if (ret != 0)
254 goto out_3;
255
256 ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
257 if (ret != 0)
258 goto out_4;
259
260 ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
261 if (ret != 0) {
262 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
263 mq->irq, -ret);
264 goto out_5;
265 }
266
267 nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
268
269 mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
270 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
271 nasid, mmr_value->vector, mmr_value->dest);
272 if (ret != 0) {
273 dev_err(xpc_part, "gru_create_message_queue() returned "
274 "error=%d\n", ret);
275 ret = -EINVAL;
276 goto out_6;
277 }
278
279 /* allow other partitions to access this GRU mq */
280 xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
281 if (xp_ret != xpSuccess) {
282 ret = -EACCES;
283 goto out_6;
284 }
285
286 return mq;
287
288 /* something went wrong */
289out_6:
290 free_irq(mq->irq, NULL);
291out_5:
292 xpc_release_gru_mq_irq_uv(mq);
293out_4:
294 xpc_gru_mq_watchlist_free_uv(mq);
295out_3:
296 free_pages((unsigned long)mq->address, pg_order);
297out_2:
298 kfree(mq->gru_mq_desc);
299out_1:
300 kfree(mq);
301out_0:
302 return ERR_PTR(ret);
303}
304
305static void
306xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
307{
308 unsigned int mq_size;
309 int pg_order;
310 int ret;
311
312 /* disallow other partitions to access GRU mq */
313 mq_size = 1UL << mq->order;
314 ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
315 BUG_ON(ret != xpSuccess);
316
317 /* unregister irq handler and release mq irq/vector mapping */
318 free_irq(mq->irq, NULL);
319 xpc_release_gru_mq_irq_uv(mq);
320
321 /* disable generation of irq when GRU mq op occurs to this mq */
322 xpc_gru_mq_watchlist_free_uv(mq);
323
324 pg_order = mq->order - PAGE_SHIFT;
325 free_pages((unsigned long)mq->address, pg_order);
326
327 kfree(mq);
328}
329
330static enum xp_retval
331xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg,
332 size_t msg_size)
333{
334 enum xp_retval xp_ret;
335 int ret;
336
337 while (1) {
338 ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size);
339 if (ret == MQE_OK) {
340 xp_ret = xpSuccess;
341 break;
342 }
343
344 if (ret == MQE_QUEUE_FULL) {
345 dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
346 "error=MQE_QUEUE_FULL\n");
347 /* !!! handle QLimit reached; delay & try again */
348 /* ??? Do we add a limit to the number of retries? */
349 (void)msleep_interruptible(10);
350 } else if (ret == MQE_CONGESTION) {
351 dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
352 "error=MQE_CONGESTION\n");
353 /* !!! handle LB Overflow; simply try again */
354 /* ??? Do we add a limit to the number of retries? */
355 } else {
356 /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
357 dev_err(xpc_chan, "gru_send_message_gpa() returned "
358 "error=%d\n", ret);
359 xp_ret = xpGruSendMqError;
360 break;
361 }
362 }
363 return xp_ret;
364}
365
366static void
367xpc_process_activate_IRQ_rcvd_uv(void)
368{
369 unsigned long irq_flags;
370 short partid;
371 struct xpc_partition *part;
372 u8 act_state_req;
373
374 DBUG_ON(xpc_activate_IRQ_rcvd == 0);
375
376 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
377 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
378 part = &xpc_partitions[partid];
379
380 if (part->sn.uv.act_state_req == 0)
381 continue;
382
383 xpc_activate_IRQ_rcvd--;
384 BUG_ON(xpc_activate_IRQ_rcvd < 0);
385
386 act_state_req = part->sn.uv.act_state_req;
387 part->sn.uv.act_state_req = 0;
388 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
389
390 if (act_state_req == XPC_P_ASR_ACTIVATE_UV) {
391 if (part->act_state == XPC_P_AS_INACTIVE)
392 xpc_activate_partition(part);
393 else if (part->act_state == XPC_P_AS_DEACTIVATING)
394 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
395
396 } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) {
397 if (part->act_state == XPC_P_AS_INACTIVE)
398 xpc_activate_partition(part);
399 else
400 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
401
402 } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) {
403 XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason);
404
405 } else {
406 BUG();
407 }
408
409 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
410 if (xpc_activate_IRQ_rcvd == 0)
411 break;
412 }
413 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
414
415}
416
417static void
418xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
419 struct xpc_activate_mq_msghdr_uv *msg_hdr,
420 int part_setup,
421 int *wakeup_hb_checker)
422{
423 unsigned long irq_flags;
424 struct xpc_partition_uv *part_uv = &part->sn.uv;
425 struct xpc_openclose_args *args;
426
427 part_uv->remote_act_state = msg_hdr->act_state;
428
429 switch (msg_hdr->type) {
430 case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV:
431 /* syncing of remote_act_state was just done above */
432 break;
433
434 case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
435 struct xpc_activate_mq_msg_activate_req_uv *msg;
436
437 /*
438 * ??? Do we deal here with ts_jiffies being different
439 * ??? if act_state != XPC_P_AS_INACTIVE instead of
440 * ??? below?
441 */
442 msg = container_of(msg_hdr, struct
443 xpc_activate_mq_msg_activate_req_uv, hdr);
444
445 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
446 if (part_uv->act_state_req == 0)
447 xpc_activate_IRQ_rcvd++;
448 part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
449 part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
450 part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
451 part_uv->heartbeat_gpa = msg->heartbeat_gpa;
452
453 if (msg->activate_gru_mq_desc_gpa !=
454 part_uv->activate_gru_mq_desc_gpa) {
455 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
456 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
457 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
458 part_uv->activate_gru_mq_desc_gpa =
459 msg->activate_gru_mq_desc_gpa;
460 }
461 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
462
463 (*wakeup_hb_checker)++;
464 break;
465 }
466 case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: {
467 struct xpc_activate_mq_msg_deactivate_req_uv *msg;
468
469 msg = container_of(msg_hdr, struct
470 xpc_activate_mq_msg_deactivate_req_uv, hdr);
471
472 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
473 if (part_uv->act_state_req == 0)
474 xpc_activate_IRQ_rcvd++;
475 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
476 part_uv->reason = msg->reason;
477 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
478
479 (*wakeup_hb_checker)++;
480 return;
481 }
482 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
483 struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
484
485 if (!part_setup)
486 break;
487
488 msg = container_of(msg_hdr, struct
489 xpc_activate_mq_msg_chctl_closerequest_uv,
490 hdr);
491 args = &part->remote_openclose_args[msg->ch_number];
492 args->reason = msg->reason;
493
494 spin_lock_irqsave(&part->chctl_lock, irq_flags);
495 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST;
496 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
497
498 xpc_wakeup_channel_mgr(part);
499 break;
500 }
501 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
502 struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
503
504 if (!part_setup)
505 break;
506
507 msg = container_of(msg_hdr, struct
508 xpc_activate_mq_msg_chctl_closereply_uv,
509 hdr);
510
511 spin_lock_irqsave(&part->chctl_lock, irq_flags);
512 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY;
513 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
514
515 xpc_wakeup_channel_mgr(part);
516 break;
517 }
518 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
519 struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
520
521 if (!part_setup)
522 break;
523
524 msg = container_of(msg_hdr, struct
525 xpc_activate_mq_msg_chctl_openrequest_uv,
526 hdr);
527 args = &part->remote_openclose_args[msg->ch_number];
528 args->entry_size = msg->entry_size;
529 args->local_nentries = msg->local_nentries;
530
531 spin_lock_irqsave(&part->chctl_lock, irq_flags);
532 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST;
533 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
534
535 xpc_wakeup_channel_mgr(part);
536 break;
537 }
538 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
539 struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
540
541 if (!part_setup)
542 break;
543
544 msg = container_of(msg_hdr, struct
545 xpc_activate_mq_msg_chctl_openreply_uv, hdr);
546 args = &part->remote_openclose_args[msg->ch_number];
547 args->remote_nentries = msg->remote_nentries;
548 args->local_nentries = msg->local_nentries;
549 args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa;
550
551 spin_lock_irqsave(&part->chctl_lock, irq_flags);
552 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY;
553 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
554
555 xpc_wakeup_channel_mgr(part);
556 break;
557 }
558 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
559 struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
560
561 if (!part_setup)
562 break;
563
564 msg = container_of(msg_hdr, struct
565 xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
566 spin_lock_irqsave(&part->chctl_lock, irq_flags);
567 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE;
568 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
569
570 xpc_wakeup_channel_mgr(part);
571 }
572 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
573 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
574 part_uv->flags |= XPC_P_ENGAGED_UV;
575 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
576 break;
577
578 case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV:
579 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
580 part_uv->flags &= ~XPC_P_ENGAGED_UV;
581 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
582 break;
583
584 default:
585 dev_err(xpc_part, "received unknown activate_mq msg type=%d "
586 "from partition=%d\n", msg_hdr->type, XPC_PARTID(part));
587
588 /* get hb checker to deactivate from the remote partition */
589 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
590 if (part_uv->act_state_req == 0)
591 xpc_activate_IRQ_rcvd++;
592 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
593 part_uv->reason = xpBadMsgType;
594 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
595
596 (*wakeup_hb_checker)++;
597 return;
598 }
599
600 if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies &&
601 part->remote_rp_ts_jiffies != 0) {
602 /*
603 * ??? Does what we do here need to be sensitive to
604 * ??? act_state or remote_act_state?
605 */
606 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
607 if (part_uv->act_state_req == 0)
608 xpc_activate_IRQ_rcvd++;
609 part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV;
610 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
611
612 (*wakeup_hb_checker)++;
613 }
614}
615
616static irqreturn_t
617xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
618{
619 struct xpc_activate_mq_msghdr_uv *msg_hdr;
620 short partid;
621 struct xpc_partition *part;
622 int wakeup_hb_checker = 0;
623 int part_referenced;
624
625 while (1) {
626 msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc);
627 if (msg_hdr == NULL)
628 break;
629
630 partid = msg_hdr->partid;
631 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
632 dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() "
633 "received invalid partid=0x%x in message\n",
634 partid);
635 } else {
636 part = &xpc_partitions[partid];
637
638 part_referenced = xpc_part_ref(part);
639 xpc_handle_activate_mq_msg_uv(part, msg_hdr,
640 part_referenced,
641 &wakeup_hb_checker);
642 if (part_referenced)
643 xpc_part_deref(part);
644 }
645
646 gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr);
647 }
648
649 if (wakeup_hb_checker)
650 wake_up_interruptible(&xpc_activate_IRQ_wq);
651
652 return IRQ_HANDLED;
653}
654
655static enum xp_retval
656xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc,
657 unsigned long gru_mq_desc_gpa)
658{
659 enum xp_retval ret;
660
661 ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa,
662 sizeof(struct gru_message_queue_desc));
663 if (ret == xpSuccess)
664 gru_mq_desc->mq = NULL;
665
666 return ret;
667}
668
669static enum xp_retval
670xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
671 int msg_type)
672{
673 struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
674 struct xpc_partition_uv *part_uv = &part->sn.uv;
675 struct gru_message_queue_desc *gru_mq_desc;
676 unsigned long irq_flags;
677 enum xp_retval ret;
678
679 DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);
680
681 msg_hdr->type = msg_type;
682 msg_hdr->partid = xp_partition_id;
683 msg_hdr->act_state = part->act_state;
684 msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;
685
686 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
687again:
688 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) {
689 gru_mq_desc = part_uv->cached_activate_gru_mq_desc;
690 if (gru_mq_desc == NULL) {
691 gru_mq_desc = kmalloc(sizeof(struct
692 gru_message_queue_desc),
693 GFP_KERNEL);
694 if (gru_mq_desc == NULL) {
695 ret = xpNoMemory;
696 goto done;
697 }
698 part_uv->cached_activate_gru_mq_desc = gru_mq_desc;
699 }
700
701 ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc,
702 part_uv->
703 activate_gru_mq_desc_gpa);
704 if (ret != xpSuccess)
705 goto done;
706
707 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
708 part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
709 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
710 }
711
712 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
713 ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg,
714 msg_size);
715 if (ret != xpSuccess) {
716 smp_rmb(); /* ensure a fresh copy of part_uv->flags */
717 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV))
718 goto again;
719 }
720done:
721 mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex);
722 return ret;
723}
724
725static void
726xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg,
727 size_t msg_size, int msg_type)
728{
729 enum xp_retval ret;
730
731 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
732 if (unlikely(ret != xpSuccess))
733 XPC_DEACTIVATE_PARTITION(part, ret);
734}
735
736static void
737xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags,
738 void *msg, size_t msg_size, int msg_type)
739{
740 struct xpc_partition *part = &xpc_partitions[ch->partid];
741 enum xp_retval ret;
742
743 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
744 if (unlikely(ret != xpSuccess)) {
745 if (irq_flags != NULL)
746 spin_unlock_irqrestore(&ch->lock, *irq_flags);
747
748 XPC_DEACTIVATE_PARTITION(part, ret);
749
750 if (irq_flags != NULL)
751 spin_lock_irqsave(&ch->lock, *irq_flags);
752 }
753}
754
755static void
756xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
757{
758 unsigned long irq_flags;
759 struct xpc_partition_uv *part_uv = &part->sn.uv;
760
761 /*
762 * !!! Make our side think that the remote partition sent an activate
763 * !!! mq message our way by doing what the activate IRQ handler would
764 * !!! do had one really been sent.
765 */
766
767 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
768 if (part_uv->act_state_req == 0)
769 xpc_activate_IRQ_rcvd++;
770 part_uv->act_state_req = act_state_req;
771 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
772
773 wake_up_interruptible(&xpc_activate_IRQ_wq);
774}
775
776static enum xp_retval
777xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
778 size_t *len)
779{
780 s64 status;
781 enum xp_retval ret;
782
783#if defined CONFIG_X86_64
784 status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
785 (u64 *)len);
786 if (status == BIOS_STATUS_SUCCESS)
787 ret = xpSuccess;
788 else if (status == BIOS_STATUS_MORE_PASSES)
789 ret = xpNeedMoreInfo;
790 else
791 ret = xpBiosError;
792
793#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
794 status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
795 if (status == SALRET_OK)
796 ret = xpSuccess;
797 else if (status == SALRET_MORE_PASSES)
798 ret = xpNeedMoreInfo;
799 else
800 ret = xpSalError;
801
802#else
803 #error not a supported configuration
804#endif
805
806 return ret;
807}
808
809static int
810xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp)
811{
812 xpc_heartbeat_uv =
813 &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat;
814 rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv);
815 rp->sn.uv.activate_gru_mq_desc_gpa =
816 uv_gpa(xpc_activate_mq_uv->gru_mq_desc);
817 return 0;
818}
819
820static void
821xpc_allow_hb_uv(short partid)
822{
823}
824
825static void
826xpc_disallow_hb_uv(short partid)
827{
828}
829
830static void
831xpc_disallow_all_hbs_uv(void)
832{
833}
834
835static void
836xpc_increment_heartbeat_uv(void)
837{
838 xpc_heartbeat_uv->value++;
839}
840
841static void
842xpc_offline_heartbeat_uv(void)
843{
844 xpc_increment_heartbeat_uv();
845 xpc_heartbeat_uv->offline = 1;
846}
847
848static void
849xpc_online_heartbeat_uv(void)
850{
851 xpc_increment_heartbeat_uv();
852 xpc_heartbeat_uv->offline = 0;
853}
854
855static void
856xpc_heartbeat_init_uv(void)
857{
858 xpc_heartbeat_uv->value = 1;
859 xpc_heartbeat_uv->offline = 0;
860}
861
862static void
863xpc_heartbeat_exit_uv(void)
864{
865 xpc_offline_heartbeat_uv();
866}
867
868static enum xp_retval
869xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
870{
871 struct xpc_partition_uv *part_uv = &part->sn.uv;
872 enum xp_retval ret;
873
874 ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat),
875 part_uv->heartbeat_gpa,
876 sizeof(struct xpc_heartbeat_uv));
877 if (ret != xpSuccess)
878 return ret;
879
880 if (part_uv->cached_heartbeat.value == part->last_heartbeat &&
881 !part_uv->cached_heartbeat.offline) {
882
883 ret = xpNoHeartbeat;
884 } else {
885 part->last_heartbeat = part_uv->cached_heartbeat.value;
886 }
887 return ret;
888}
889
890static void
891xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
892 unsigned long remote_rp_gpa, int nasid)
893{
894 short partid = remote_rp->SAL_partid;
895 struct xpc_partition *part = &xpc_partitions[partid];
896 struct xpc_activate_mq_msg_activate_req_uv msg;
897
898 part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
899 part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
900 part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa;
901 part->sn.uv.activate_gru_mq_desc_gpa =
902 remote_rp->sn.uv.activate_gru_mq_desc_gpa;
903
904 /*
905 * ??? Is it a good idea to make this conditional on what is
906 * ??? potentially stale state information?
907 */
908 if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
909 msg.rp_gpa = uv_gpa(xpc_rsvd_page);
910 msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa;
911 msg.activate_gru_mq_desc_gpa =
912 xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa;
913 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
914 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
915 }
916
917 if (part->act_state == XPC_P_AS_INACTIVE)
918 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
919}
920
921static void
922xpc_request_partition_reactivation_uv(struct xpc_partition *part)
923{
924 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
925}
926
927static void
928xpc_request_partition_deactivation_uv(struct xpc_partition *part)
929{
930 struct xpc_activate_mq_msg_deactivate_req_uv msg;
931
932 /*
933 * ??? Is it a good idea to make this conditional on what is
934 * ??? potentially stale state information?
935 */
936 if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING &&
937 part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) {
938
939 msg.reason = part->reason;
940 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
941 XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV);
942 }
943}
944
945static void
946xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part)
947{
948 /* nothing needs to be done */
949 return;
950}
951
952static void
953xpc_init_fifo_uv(struct xpc_fifo_head_uv *head)
954{
955 head->first = NULL;
956 head->last = NULL;
957 spin_lock_init(&head->lock);
958 head->n_entries = 0;
959}
960
961static void *
962xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
963{
964 unsigned long irq_flags;
965 struct xpc_fifo_entry_uv *first;
966
967 spin_lock_irqsave(&head->lock, irq_flags);
968 first = head->first;
969 if (head->first != NULL) {
970 head->first = first->next;
971 if (head->first == NULL)
972 head->last = NULL;
973
974 head->n_entries--;
975 BUG_ON(head->n_entries < 0);
976
977 first->next = NULL;
978 }
979 spin_unlock_irqrestore(&head->lock, irq_flags);
980 return first;
981}
982
983static void
984xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head,
985 struct xpc_fifo_entry_uv *last)
986{
987 unsigned long irq_flags;
988
989 last->next = NULL;
990 spin_lock_irqsave(&head->lock, irq_flags);
991 if (head->last != NULL)
992 head->last->next = last;
993 else
994 head->first = last;
995 head->last = last;
996 head->n_entries++;
997 spin_unlock_irqrestore(&head->lock, irq_flags);
998}
999
1000static int
1001xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head)
1002{
1003 return head->n_entries;
1004}
1005
1006/*
1007 * Setup the channel structures that are uv specific.
1008 */
1009static enum xp_retval
1010xpc_setup_ch_structures_uv(struct xpc_partition *part)
1011{
1012 struct xpc_channel_uv *ch_uv;
1013 int ch_number;
1014
1015 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
1016 ch_uv = &part->channels[ch_number].sn.uv;
1017
1018 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
1019 xpc_init_fifo_uv(&ch_uv->recv_msg_list);
1020 }
1021
1022 return xpSuccess;
1023}
1024
1025/*
1026 * Teardown the channel structures that are uv specific.
1027 */
1028static void
1029xpc_teardown_ch_structures_uv(struct xpc_partition *part)
1030{
1031 /* nothing needs to be done */
1032 return;
1033}
1034
1035static enum xp_retval
1036xpc_make_first_contact_uv(struct xpc_partition *part)
1037{
1038 struct xpc_activate_mq_msg_uv msg;
1039
1040 /*
1041 * We send a sync msg to get the remote partition's remote_act_state
1042 * updated to our current act_state which at this point should
1043 * be XPC_P_AS_ACTIVATING.
1044 */
1045 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1046 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
1047
1048 while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
1049 (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
1050
1051 dev_dbg(xpc_part, "waiting to make first contact with "
1052 "partition %d\n", XPC_PARTID(part));
1053
1054 /* wait a 1/4 of a second or so */
1055 (void)msleep_interruptible(250);
1056
1057 if (part->act_state == XPC_P_AS_DEACTIVATING)
1058 return part->reason;
1059 }
1060
1061 return xpSuccess;
1062}
1063
1064static u64
1065xpc_get_chctl_all_flags_uv(struct xpc_partition *part)
1066{
1067 unsigned long irq_flags;
1068 union xpc_channel_ctl_flags chctl;
1069
1070 spin_lock_irqsave(&part->chctl_lock, irq_flags);
1071 chctl = part->chctl;
1072 if (chctl.all_flags != 0)
1073 part->chctl.all_flags = 0;
1074
1075 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
1076 return chctl.all_flags;
1077}
1078
1079static enum xp_retval
1080xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch)
1081{
1082 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1083 struct xpc_send_msg_slot_uv *msg_slot;
1084 unsigned long irq_flags;
1085 int nentries;
1086 int entry;
1087 size_t nbytes;
1088
1089 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
1090 nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv);
1091 ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL);
1092 if (ch_uv->send_msg_slots == NULL)
1093 continue;
1094
1095 for (entry = 0; entry < nentries; entry++) {
1096 msg_slot = &ch_uv->send_msg_slots[entry];
1097
1098 msg_slot->msg_slot_number = entry;
1099 xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list,
1100 &msg_slot->next);
1101 }
1102
1103 spin_lock_irqsave(&ch->lock, irq_flags);
1104 if (nentries < ch->local_nentries)
1105 ch->local_nentries = nentries;
1106 spin_unlock_irqrestore(&ch->lock, irq_flags);
1107 return xpSuccess;
1108 }
1109
1110 return xpNoMemory;
1111}
1112
1113static enum xp_retval
1114xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch)
1115{
1116 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1117 struct xpc_notify_mq_msg_uv *msg_slot;
1118 unsigned long irq_flags;
1119 int nentries;
1120 int entry;
1121 size_t nbytes;
1122
1123 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
1124 nbytes = nentries * ch->entry_size;
1125 ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL);
1126 if (ch_uv->recv_msg_slots == NULL)
1127 continue;
1128
1129 for (entry = 0; entry < nentries; entry++) {
1130 msg_slot = ch_uv->recv_msg_slots +
1131 entry * ch->entry_size;
1132
1133 msg_slot->hdr.msg_slot_number = entry;
1134 }
1135
1136 spin_lock_irqsave(&ch->lock, irq_flags);
1137 if (nentries < ch->remote_nentries)
1138 ch->remote_nentries = nentries;
1139 spin_unlock_irqrestore(&ch->lock, irq_flags);
1140 return xpSuccess;
1141 }
1142
1143 return xpNoMemory;
1144}
1145
1146/*
1147 * Allocate msg_slots associated with the channel.
1148 */
1149static enum xp_retval
1150xpc_setup_msg_structures_uv(struct xpc_channel *ch)
1151{
1152 static enum xp_retval ret;
1153 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1154
1155 DBUG_ON(ch->flags & XPC_C_SETUP);
1156
1157 ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct
1158 gru_message_queue_desc),
1159 GFP_KERNEL);
1160 if (ch_uv->cached_notify_gru_mq_desc == NULL)
1161 return xpNoMemory;
1162
1163 ret = xpc_allocate_send_msg_slot_uv(ch);
1164 if (ret == xpSuccess) {
1165
1166 ret = xpc_allocate_recv_msg_slot_uv(ch);
1167 if (ret != xpSuccess) {
1168 kfree(ch_uv->send_msg_slots);
1169 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
1170 }
1171 }
1172 return ret;
1173}
1174
1175/*
1176 * Free up msg_slots and clear other stuff that were setup for the specified
1177 * channel.
1178 */
1179static void
1180xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
1181{
1182 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1183
1184 DBUG_ON(!spin_is_locked(&ch->lock));
1185
1186 kfree(ch_uv->cached_notify_gru_mq_desc);
1187 ch_uv->cached_notify_gru_mq_desc = NULL;
1188
1189 if (ch->flags & XPC_C_SETUP) {
1190 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
1191 kfree(ch_uv->send_msg_slots);
1192 xpc_init_fifo_uv(&ch_uv->recv_msg_list);
1193 kfree(ch_uv->recv_msg_slots);
1194 }
1195}
1196
1197static void
1198xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1199{
1200 struct xpc_activate_mq_msg_chctl_closerequest_uv msg;
1201
1202 msg.ch_number = ch->number;
1203 msg.reason = ch->reason;
1204 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1205 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV);
1206}
1207
1208static void
1209xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1210{
1211 struct xpc_activate_mq_msg_chctl_closereply_uv msg;
1212
1213 msg.ch_number = ch->number;
1214 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1215 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV);
1216}
1217
1218static void
1219xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1220{
1221 struct xpc_activate_mq_msg_chctl_openrequest_uv msg;
1222
1223 msg.ch_number = ch->number;
1224 msg.entry_size = ch->entry_size;
1225 msg.local_nentries = ch->local_nentries;
1226 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1227 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV);
1228}
1229
1230static void
1231xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1232{
1233 struct xpc_activate_mq_msg_chctl_openreply_uv msg;
1234
1235 msg.ch_number = ch->number;
1236 msg.local_nentries = ch->local_nentries;
1237 msg.remote_nentries = ch->remote_nentries;
1238 msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc);
1239 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1240 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV);
1241}
1242
1243static void
1244xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1245{
1246 struct xpc_activate_mq_msg_chctl_opencomplete_uv msg;
1247
1248 msg.ch_number = ch->number;
1249 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1250 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV);
1251}
1252
1253static void
1254xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
1255{
1256 unsigned long irq_flags;
1257
1258 spin_lock_irqsave(&part->chctl_lock, irq_flags);
1259 part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST;
1260 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
1261
1262 xpc_wakeup_channel_mgr(part);
1263}
1264
1265static enum xp_retval
1266xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
1267 unsigned long gru_mq_desc_gpa)
1268{
1269 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1270
1271 DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL);
1272 return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc,
1273 gru_mq_desc_gpa);
1274}
1275
1276static void
1277xpc_indicate_partition_engaged_uv(struct xpc_partition *part)
1278{
1279 struct xpc_activate_mq_msg_uv msg;
1280
1281 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1282 XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV);
1283}
1284
1285static void
1286xpc_indicate_partition_disengaged_uv(struct xpc_partition *part)
1287{
1288 struct xpc_activate_mq_msg_uv msg;
1289
1290 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1291 XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV);
1292}
1293
1294static void
1295xpc_assume_partition_disengaged_uv(short partid)
1296{
1297 struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv;
1298 unsigned long irq_flags;
1299
1300 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
1301 part_uv->flags &= ~XPC_P_ENGAGED_UV;
1302 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
1303}
1304
1305static int
1306xpc_partition_engaged_uv(short partid)
1307{
1308 return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0;
1309}
1310
1311static int
1312xpc_any_partition_engaged_uv(void)
1313{
1314 struct xpc_partition_uv *part_uv;
1315 short partid;
1316
1317 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
1318 part_uv = &xpc_partitions[partid].sn.uv;
1319 if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0)
1320 return 1;
1321 }
1322 return 0;
1323}
1324
1325static enum xp_retval
1326xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags,
1327 struct xpc_send_msg_slot_uv **address_of_msg_slot)
1328{
1329 enum xp_retval ret;
1330 struct xpc_send_msg_slot_uv *msg_slot;
1331 struct xpc_fifo_entry_uv *entry;
1332
1333 while (1) {
1334 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list);
1335 if (entry != NULL)
1336 break;
1337
1338 if (flags & XPC_NOWAIT)
1339 return xpNoWait;
1340
1341 ret = xpc_allocate_msg_wait(ch);
1342 if (ret != xpInterrupted && ret != xpTimeout)
1343 return ret;
1344 }
1345
1346 msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next);
1347 *address_of_msg_slot = msg_slot;
1348 return xpSuccess;
1349}
1350
1351static void
1352xpc_free_msg_slot_uv(struct xpc_channel *ch,
1353 struct xpc_send_msg_slot_uv *msg_slot)
1354{
1355 xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next);
1356
1357 /* wakeup anyone waiting for a free msg slot */
1358 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1359 wake_up(&ch->msg_allocate_wq);
1360}
1361
1362static void
1363xpc_notify_sender_uv(struct xpc_channel *ch,
1364 struct xpc_send_msg_slot_uv *msg_slot,
1365 enum xp_retval reason)
1366{
1367 xpc_notify_func func = msg_slot->func;
1368
1369 if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) {
1370
1371 atomic_dec(&ch->n_to_notify);
1372
1373 dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p "
1374 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
1375 msg_slot->msg_slot_number, ch->partid, ch->number);
1376
1377 func(reason, ch->partid, ch->number, msg_slot->key);
1378
1379 dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p "
1380 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
1381 msg_slot->msg_slot_number, ch->partid, ch->number);
1382 }
1383}
1384
1385static void
1386xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch,
1387 struct xpc_notify_mq_msg_uv *msg)
1388{
1389 struct xpc_send_msg_slot_uv *msg_slot;
1390 int entry = msg->hdr.msg_slot_number % ch->local_nentries;
1391
1392 msg_slot = &ch->sn.uv.send_msg_slots[entry];
1393
1394 BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number);
1395 msg_slot->msg_slot_number += ch->local_nentries;
1396
1397 if (msg_slot->func != NULL)
1398 xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered);
1399
1400 xpc_free_msg_slot_uv(ch, msg_slot);
1401}
1402
1403static void
1404xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
1405 struct xpc_notify_mq_msg_uv *msg)
1406{
1407 struct xpc_partition_uv *part_uv = &part->sn.uv;
1408 struct xpc_channel *ch;
1409 struct xpc_channel_uv *ch_uv;
1410 struct xpc_notify_mq_msg_uv *msg_slot;
1411 unsigned long irq_flags;
1412 int ch_number = msg->hdr.ch_number;
1413
1414 if (unlikely(ch_number >= part->nchannels)) {
1415 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid "
1416 "channel number=0x%x in message from partid=%d\n",
1417 ch_number, XPC_PARTID(part));
1418
1419 /* get hb checker to deactivate from the remote partition */
1420 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1421 if (part_uv->act_state_req == 0)
1422 xpc_activate_IRQ_rcvd++;
1423 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
1424 part_uv->reason = xpBadChannelNumber;
1425 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1426
1427 wake_up_interruptible(&xpc_activate_IRQ_wq);
1428 return;
1429 }
1430
1431 ch = &part->channels[ch_number];
1432 xpc_msgqueue_ref(ch);
1433
1434 if (!(ch->flags & XPC_C_CONNECTED)) {
1435 xpc_msgqueue_deref(ch);
1436 return;
1437 }
1438
1439 /* see if we're really dealing with an ACK for a previously sent msg */
1440 if (msg->hdr.size == 0) {
1441 xpc_handle_notify_mq_ack_uv(ch, msg);
1442 xpc_msgqueue_deref(ch);
1443 return;
1444 }
1445
1446 /* we're dealing with a normal message sent via the notify_mq */
1447 ch_uv = &ch->sn.uv;
1448
1449 msg_slot = ch_uv->recv_msg_slots +
1450 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
1451
1452 BUG_ON(msg_slot->hdr.size != 0);
1453
1454 memcpy(msg_slot, msg, msg->hdr.size);
1455
1456 xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next);
1457
1458 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
1459 /*
1460 * If there is an existing idle kthread get it to deliver
1461 * the payload, otherwise we'll have to get the channel mgr
1462 * for this partition to create a kthread to do the delivery.
1463 */
1464 if (atomic_read(&ch->kthreads_idle) > 0)
1465 wake_up_nr(&ch->idle_wq, 1);
1466 else
1467 xpc_send_chctl_local_msgrequest_uv(part, ch->number);
1468 }
1469 xpc_msgqueue_deref(ch);
1470}
1471
1472static irqreturn_t
1473xpc_handle_notify_IRQ_uv(int irq, void *dev_id)
1474{
1475 struct xpc_notify_mq_msg_uv *msg;
1476 short partid;
1477 struct xpc_partition *part;
1478
1479 while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) !=
1480 NULL) {
1481
1482 partid = msg->hdr.partid;
1483 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
1484 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received "
1485 "invalid partid=0x%x in message\n", partid);
1486 } else {
1487 part = &xpc_partitions[partid];
1488
1489 if (xpc_part_ref(part)) {
1490 xpc_handle_notify_mq_msg_uv(part, msg);
1491 xpc_part_deref(part);
1492 }
1493 }
1494
1495 gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg);
1496 }
1497
1498 return IRQ_HANDLED;
1499}
1500
1501static int
1502xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch)
1503{
1504 return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list);
1505}
1506
1507static void
1508xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number)
1509{
1510 struct xpc_channel *ch = &part->channels[ch_number];
1511 int ndeliverable_payloads;
1512
1513 xpc_msgqueue_ref(ch);
1514
1515 ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch);
1516
1517 if (ndeliverable_payloads > 0 &&
1518 (ch->flags & XPC_C_CONNECTED) &&
1519 (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) {
1520
1521 xpc_activate_kthreads(ch, ndeliverable_payloads);
1522 }
1523
1524 xpc_msgqueue_deref(ch);
1525}
1526
1527static enum xp_retval
1528xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload,
1529 u16 payload_size, u8 notify_type, xpc_notify_func func,
1530 void *key)
1531{
1532 enum xp_retval ret = xpSuccess;
1533 struct xpc_send_msg_slot_uv *msg_slot = NULL;
1534 struct xpc_notify_mq_msg_uv *msg;
1535 u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV];
1536 size_t msg_size;
1537
1538 DBUG_ON(notify_type != XPC_N_CALL);
1539
1540 msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size;
1541 if (msg_size > ch->entry_size)
1542 return xpPayloadTooBig;
1543
1544 xpc_msgqueue_ref(ch);
1545
1546 if (ch->flags & XPC_C_DISCONNECTING) {
1547 ret = ch->reason;
1548 goto out_1;
1549 }
1550 if (!(ch->flags & XPC_C_CONNECTED)) {
1551 ret = xpNotConnected;
1552 goto out_1;
1553 }
1554
1555 ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot);
1556 if (ret != xpSuccess)
1557 goto out_1;
1558
1559 if (func != NULL) {
1560 atomic_inc(&ch->n_to_notify);
1561
1562 msg_slot->key = key;
1563 smp_wmb(); /* a non-NULL func must hit memory after the key */
1564 msg_slot->func = func;
1565
1566 if (ch->flags & XPC_C_DISCONNECTING) {
1567 ret = ch->reason;
1568 goto out_2;
1569 }
1570 }
1571
1572 msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer;
1573 msg->hdr.partid = xp_partition_id;
1574 msg->hdr.ch_number = ch->number;
1575 msg->hdr.size = msg_size;
1576 msg->hdr.msg_slot_number = msg_slot->msg_slot_number;
1577 memcpy(&msg->payload, payload, payload_size);
1578
1579 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
1580 msg_size);
1581 if (ret == xpSuccess)
1582 goto out_1;
1583
1584 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
1585out_2:
1586 if (func != NULL) {
1587 /*
1588 * Try to NULL the msg_slot's func field. If we fail, then
1589 * xpc_notify_senders_of_disconnect_uv() beat us to it, in which
1590 * case we need to pretend we succeeded to send the message
1591 * since the user will get a callout for the disconnect error
1592 * by xpc_notify_senders_of_disconnect_uv(), and to also get an
1593 * error returned here will confuse them. Additionally, since
1594 * in this case the channel is being disconnected we don't need
1595 * to put the the msg_slot back on the free list.
1596 */
1597 if (cmpxchg(&msg_slot->func, func, NULL) != func) {
1598 ret = xpSuccess;
1599 goto out_1;
1600 }
1601
1602 msg_slot->key = NULL;
1603 atomic_dec(&ch->n_to_notify);
1604 }
1605 xpc_free_msg_slot_uv(ch, msg_slot);
1606out_1:
1607 xpc_msgqueue_deref(ch);
1608 return ret;
1609}
1610
1611/*
1612 * Tell the callers of xpc_send_notify() that the status of their payloads
1613 * is unknown because the channel is now disconnecting.
1614 *
1615 * We don't worry about putting these msg_slots on the free list since the
1616 * msg_slots themselves are about to be kfree'd.
1617 */
1618static void
1619xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch)
1620{
1621 struct xpc_send_msg_slot_uv *msg_slot;
1622 int entry;
1623
1624 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
1625
1626 for (entry = 0; entry < ch->local_nentries; entry++) {
1627
1628 if (atomic_read(&ch->n_to_notify) == 0)
1629 break;
1630
1631 msg_slot = &ch->sn.uv.send_msg_slots[entry];
1632 if (msg_slot->func != NULL)
1633 xpc_notify_sender_uv(ch, msg_slot, ch->reason);
1634 }
1635}
1636
1637/*
1638 * Get the next deliverable message's payload.
1639 */
1640static void *
1641xpc_get_deliverable_payload_uv(struct xpc_channel *ch)
1642{
1643 struct xpc_fifo_entry_uv *entry;
1644 struct xpc_notify_mq_msg_uv *msg;
1645 void *payload = NULL;
1646
1647 if (!(ch->flags & XPC_C_DISCONNECTING)) {
1648 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list);
1649 if (entry != NULL) {
1650 msg = container_of(entry, struct xpc_notify_mq_msg_uv,
1651 hdr.u.next);
1652 payload = &msg->payload;
1653 }
1654 }
1655 return payload;
1656}
1657
1658static void
1659xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
1660{
1661 struct xpc_notify_mq_msg_uv *msg;
1662 enum xp_retval ret;
1663
1664 msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload);
1665
1666 /* return an ACK to the sender of this message */
1667
1668 msg->hdr.partid = xp_partition_id;
1669 msg->hdr.size = 0; /* size of zero indicates this is an ACK */
1670
1671 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
1672 sizeof(struct xpc_notify_mq_msghdr_uv));
1673 if (ret != xpSuccess)
1674 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
1675}
1676
1677static struct xpc_arch_operations xpc_arch_ops_uv = {
1678 .setup_partitions = xpc_setup_partitions_uv,
1679 .teardown_partitions = xpc_teardown_partitions_uv,
1680 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
1681 .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv,
1682 .setup_rsvd_page = xpc_setup_rsvd_page_uv,
1683
1684 .allow_hb = xpc_allow_hb_uv,
1685 .disallow_hb = xpc_disallow_hb_uv,
1686 .disallow_all_hbs = xpc_disallow_all_hbs_uv,
1687 .increment_heartbeat = xpc_increment_heartbeat_uv,
1688 .offline_heartbeat = xpc_offline_heartbeat_uv,
1689 .online_heartbeat = xpc_online_heartbeat_uv,
1690 .heartbeat_init = xpc_heartbeat_init_uv,
1691 .heartbeat_exit = xpc_heartbeat_exit_uv,
1692 .get_remote_heartbeat = xpc_get_remote_heartbeat_uv,
1693
1694 .request_partition_activation =
1695 xpc_request_partition_activation_uv,
1696 .request_partition_reactivation =
1697 xpc_request_partition_reactivation_uv,
1698 .request_partition_deactivation =
1699 xpc_request_partition_deactivation_uv,
1700 .cancel_partition_deactivation_request =
1701 xpc_cancel_partition_deactivation_request_uv,
1702
1703 .setup_ch_structures = xpc_setup_ch_structures_uv,
1704 .teardown_ch_structures = xpc_teardown_ch_structures_uv,
1705
1706 .make_first_contact = xpc_make_first_contact_uv,
1707
1708 .get_chctl_all_flags = xpc_get_chctl_all_flags_uv,
1709 .send_chctl_closerequest = xpc_send_chctl_closerequest_uv,
1710 .send_chctl_closereply = xpc_send_chctl_closereply_uv,
1711 .send_chctl_openrequest = xpc_send_chctl_openrequest_uv,
1712 .send_chctl_openreply = xpc_send_chctl_openreply_uv,
1713 .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv,
1714 .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv,
1715
1716 .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv,
1717
1718 .setup_msg_structures = xpc_setup_msg_structures_uv,
1719 .teardown_msg_structures = xpc_teardown_msg_structures_uv,
1720
1721 .indicate_partition_engaged = xpc_indicate_partition_engaged_uv,
1722 .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv,
1723 .assume_partition_disengaged = xpc_assume_partition_disengaged_uv,
1724 .partition_engaged = xpc_partition_engaged_uv,
1725 .any_partition_engaged = xpc_any_partition_engaged_uv,
1726
1727 .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv,
1728 .send_payload = xpc_send_payload_uv,
1729 .get_deliverable_payload = xpc_get_deliverable_payload_uv,
1730 .received_payload = xpc_received_payload_uv,
1731 .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
1732};
1733
1734int
1735xpc_init_uv(void)
1736{
1737 xpc_arch_ops = xpc_arch_ops_uv;
1738
1739 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
1740 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
1741 XPC_MSG_HDR_MAX_SIZE);
1742 return -E2BIG;
1743 }
1744
1745 xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
1746 XPC_ACTIVATE_IRQ_NAME,
1747 xpc_handle_activate_IRQ_uv);
1748 if (IS_ERR(xpc_activate_mq_uv))
1749 return PTR_ERR(xpc_activate_mq_uv);
1750
1751 xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
1752 XPC_NOTIFY_IRQ_NAME,
1753 xpc_handle_notify_IRQ_uv);
1754 if (IS_ERR(xpc_notify_mq_uv)) {
1755 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1756 return PTR_ERR(xpc_notify_mq_uv);
1757 }
1758
1759 return 0;
1760}
1761
1762void
1763xpc_exit_uv(void)
1764{
1765 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
1766 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1767}
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9/*
10 * Cross Partition Communication (XPC) uv-based functions.
11 *
12 * Architecture specific implementation of common functions.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/interrupt.h>
19#include <linux/delay.h>
20#include <linux/device.h>
21#include <linux/cpu.h>
22#include <linux/module.h>
23#include <linux/err.h>
24#include <linux/slab.h>
25#include <linux/numa.h>
26#include <asm/uv/uv_hub.h>
27#if defined CONFIG_X86_64
28#include <asm/uv/bios.h>
29#include <asm/uv/uv_irq.h>
30#elif defined CONFIG_IA64_SGI_UV
31#include <asm/sn/intr.h>
32#include <asm/sn/sn_sal.h>
33#endif
34#include "../sgi-gru/gru.h"
35#include "../sgi-gru/grukservices.h"
36#include "xpc.h"
37
38#if defined CONFIG_IA64_SGI_UV
39struct uv_IO_APIC_route_entry {
40 __u64 vector : 8,
41 delivery_mode : 3,
42 dest_mode : 1,
43 delivery_status : 1,
44 polarity : 1,
45 __reserved_1 : 1,
46 trigger : 1,
47 mask : 1,
48 __reserved_2 : 15,
49 dest : 32;
50};
51
52#define sn_partition_id 0
53#endif
54
55static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
56
57#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
58#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
59 XPC_ACTIVATE_MSG_SIZE_UV)
60#define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
61
62#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
63#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
64 XPC_NOTIFY_MSG_SIZE_UV)
65#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
66
67static int xpc_mq_node = NUMA_NO_NODE;
68
69static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
70static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
71
72static int
73xpc_setup_partitions_uv(void)
74{
75 short partid;
76 struct xpc_partition_uv *part_uv;
77
78 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
79 part_uv = &xpc_partitions[partid].sn.uv;
80
81 mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex);
82 spin_lock_init(&part_uv->flags_lock);
83 part_uv->remote_act_state = XPC_P_AS_INACTIVE;
84 }
85 return 0;
86}
87
88static void
89xpc_teardown_partitions_uv(void)
90{
91 short partid;
92 struct xpc_partition_uv *part_uv;
93 unsigned long irq_flags;
94
95 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
96 part_uv = &xpc_partitions[partid].sn.uv;
97
98 if (part_uv->cached_activate_gru_mq_desc != NULL) {
99 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
100 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
101 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
102 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
103 kfree(part_uv->cached_activate_gru_mq_desc);
104 part_uv->cached_activate_gru_mq_desc = NULL;
105 mutex_unlock(&part_uv->
106 cached_activate_gru_mq_desc_mutex);
107 }
108 }
109}
110
111static int
112xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
113{
114 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
115
116#if defined CONFIG_X86_64
117 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
118 UV_AFFINITY_CPU);
119 if (mq->irq < 0)
120 return mq->irq;
121
122 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
123
124#elif defined CONFIG_IA64_SGI_UV
125 if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
126 mq->irq = SGI_XPC_ACTIVATE;
127 else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
128 mq->irq = SGI_XPC_NOTIFY;
129 else
130 return -EINVAL;
131
132 mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
133 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value);
134#else
135 #error not a supported configuration
136#endif
137
138 return 0;
139}
140
141static void
142xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
143{
144#if defined CONFIG_X86_64
145 uv_teardown_irq(mq->irq);
146
147#elif defined CONFIG_IA64_SGI_UV
148 int mmr_pnode;
149 unsigned long mmr_value;
150
151 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
152 mmr_value = 1UL << 16;
153
154 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
155#else
156 #error not a supported configuration
157#endif
158}
159
160static int
161xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
162{
163 int ret;
164
165#if defined CONFIG_IA64_SGI_UV
166 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
167
168 ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
169 mq->order, &mq->mmr_offset);
170 if (ret < 0) {
171 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
172 ret);
173 return -EBUSY;
174 }
175#elif defined CONFIG_X86_64
176 ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
177 mq->order, &mq->mmr_offset);
178 if (ret < 0) {
179 dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
180 "ret=%d\n", ret);
181 return ret;
182 }
183#else
184 #error not a supported configuration
185#endif
186
187 mq->watchlist_num = ret;
188 return 0;
189}
190
191static void
192xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
193{
194 int ret;
195 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
196
197#if defined CONFIG_X86_64
198 ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
199 BUG_ON(ret != BIOS_STATUS_SUCCESS);
200#elif defined CONFIG_IA64_SGI_UV
201 ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
202 BUG_ON(ret != SALRET_OK);
203#else
204 #error not a supported configuration
205#endif
206}
207
208static struct xpc_gru_mq_uv *
209xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
210 irq_handler_t irq_handler)
211{
212 enum xp_retval xp_ret;
213 int ret;
214 int nid;
215 int nasid;
216 int pg_order;
217 struct page *page;
218 struct xpc_gru_mq_uv *mq;
219 struct uv_IO_APIC_route_entry *mmr_value;
220
221 mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
222 if (mq == NULL) {
223 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
224 "a xpc_gru_mq_uv structure\n");
225 ret = -ENOMEM;
226 goto out_0;
227 }
228
229 mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
230 GFP_KERNEL);
231 if (mq->gru_mq_desc == NULL) {
232 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
233 "a gru_message_queue_desc structure\n");
234 ret = -ENOMEM;
235 goto out_1;
236 }
237
238 pg_order = get_order(mq_size);
239 mq->order = pg_order + PAGE_SHIFT;
240 mq_size = 1UL << mq->order;
241
242 mq->mmr_blade = uv_cpu_to_blade_id(cpu);
243
244 nid = cpu_to_node(cpu);
245 page = __alloc_pages_node(nid,
246 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
247 pg_order);
248 if (page == NULL) {
249 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
250 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
251 ret = -ENOMEM;
252 goto out_2;
253 }
254 mq->address = page_address(page);
255
256 /* enable generation of irq when GRU mq operation occurs to this mq */
257 ret = xpc_gru_mq_watchlist_alloc_uv(mq);
258 if (ret != 0)
259 goto out_3;
260
261 ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
262 if (ret != 0)
263 goto out_4;
264
265 ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
266 if (ret != 0) {
267 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
268 mq->irq, -ret);
269 goto out_5;
270 }
271
272 nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
273
274 mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
275 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
276 nasid, mmr_value->vector, mmr_value->dest);
277 if (ret != 0) {
278 dev_err(xpc_part, "gru_create_message_queue() returned "
279 "error=%d\n", ret);
280 ret = -EINVAL;
281 goto out_6;
282 }
283
284 /* allow other partitions to access this GRU mq */
285 xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
286 if (xp_ret != xpSuccess) {
287 ret = -EACCES;
288 goto out_6;
289 }
290
291 return mq;
292
293 /* something went wrong */
294out_6:
295 free_irq(mq->irq, NULL);
296out_5:
297 xpc_release_gru_mq_irq_uv(mq);
298out_4:
299 xpc_gru_mq_watchlist_free_uv(mq);
300out_3:
301 free_pages((unsigned long)mq->address, pg_order);
302out_2:
303 kfree(mq->gru_mq_desc);
304out_1:
305 kfree(mq);
306out_0:
307 return ERR_PTR(ret);
308}
309
310static void
311xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
312{
313 unsigned int mq_size;
314 int pg_order;
315 int ret;
316
317 /* disallow other partitions to access GRU mq */
318 mq_size = 1UL << mq->order;
319 ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
320 BUG_ON(ret != xpSuccess);
321
322 /* unregister irq handler and release mq irq/vector mapping */
323 free_irq(mq->irq, NULL);
324 xpc_release_gru_mq_irq_uv(mq);
325
326 /* disable generation of irq when GRU mq op occurs to this mq */
327 xpc_gru_mq_watchlist_free_uv(mq);
328
329 pg_order = mq->order - PAGE_SHIFT;
330 free_pages((unsigned long)mq->address, pg_order);
331
332 kfree(mq);
333}
334
335static enum xp_retval
336xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg,
337 size_t msg_size)
338{
339 enum xp_retval xp_ret;
340 int ret;
341
342 while (1) {
343 ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size);
344 if (ret == MQE_OK) {
345 xp_ret = xpSuccess;
346 break;
347 }
348
349 if (ret == MQE_QUEUE_FULL) {
350 dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
351 "error=MQE_QUEUE_FULL\n");
352 /* !!! handle QLimit reached; delay & try again */
353 /* ??? Do we add a limit to the number of retries? */
354 (void)msleep_interruptible(10);
355 } else if (ret == MQE_CONGESTION) {
356 dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
357 "error=MQE_CONGESTION\n");
358 /* !!! handle LB Overflow; simply try again */
359 /* ??? Do we add a limit to the number of retries? */
360 } else {
361 /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
362 dev_err(xpc_chan, "gru_send_message_gpa() returned "
363 "error=%d\n", ret);
364 xp_ret = xpGruSendMqError;
365 break;
366 }
367 }
368 return xp_ret;
369}
370
371static void
372xpc_process_activate_IRQ_rcvd_uv(void)
373{
374 unsigned long irq_flags;
375 short partid;
376 struct xpc_partition *part;
377 u8 act_state_req;
378
379 DBUG_ON(xpc_activate_IRQ_rcvd == 0);
380
381 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
382 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
383 part = &xpc_partitions[partid];
384
385 if (part->sn.uv.act_state_req == 0)
386 continue;
387
388 xpc_activate_IRQ_rcvd--;
389 BUG_ON(xpc_activate_IRQ_rcvd < 0);
390
391 act_state_req = part->sn.uv.act_state_req;
392 part->sn.uv.act_state_req = 0;
393 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
394
395 if (act_state_req == XPC_P_ASR_ACTIVATE_UV) {
396 if (part->act_state == XPC_P_AS_INACTIVE)
397 xpc_activate_partition(part);
398 else if (part->act_state == XPC_P_AS_DEACTIVATING)
399 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
400
401 } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) {
402 if (part->act_state == XPC_P_AS_INACTIVE)
403 xpc_activate_partition(part);
404 else
405 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
406
407 } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) {
408 XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason);
409
410 } else {
411 BUG();
412 }
413
414 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
415 if (xpc_activate_IRQ_rcvd == 0)
416 break;
417 }
418 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
419
420}
421
422static void
423xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
424 struct xpc_activate_mq_msghdr_uv *msg_hdr,
425 int part_setup,
426 int *wakeup_hb_checker)
427{
428 unsigned long irq_flags;
429 struct xpc_partition_uv *part_uv = &part->sn.uv;
430 struct xpc_openclose_args *args;
431
432 part_uv->remote_act_state = msg_hdr->act_state;
433
434 switch (msg_hdr->type) {
435 case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV:
436 /* syncing of remote_act_state was just done above */
437 break;
438
439 case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
440 struct xpc_activate_mq_msg_activate_req_uv *msg;
441
442 /*
443 * ??? Do we deal here with ts_jiffies being different
444 * ??? if act_state != XPC_P_AS_INACTIVE instead of
445 * ??? below?
446 */
447 msg = container_of(msg_hdr, struct
448 xpc_activate_mq_msg_activate_req_uv, hdr);
449
450 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
451 if (part_uv->act_state_req == 0)
452 xpc_activate_IRQ_rcvd++;
453 part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
454 part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
455 part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
456 part_uv->heartbeat_gpa = msg->heartbeat_gpa;
457
458 if (msg->activate_gru_mq_desc_gpa !=
459 part_uv->activate_gru_mq_desc_gpa) {
460 spin_lock(&part_uv->flags_lock);
461 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
462 spin_unlock(&part_uv->flags_lock);
463 part_uv->activate_gru_mq_desc_gpa =
464 msg->activate_gru_mq_desc_gpa;
465 }
466 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
467
468 (*wakeup_hb_checker)++;
469 break;
470 }
471 case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: {
472 struct xpc_activate_mq_msg_deactivate_req_uv *msg;
473
474 msg = container_of(msg_hdr, struct
475 xpc_activate_mq_msg_deactivate_req_uv, hdr);
476
477 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
478 if (part_uv->act_state_req == 0)
479 xpc_activate_IRQ_rcvd++;
480 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
481 part_uv->reason = msg->reason;
482 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
483
484 (*wakeup_hb_checker)++;
485 return;
486 }
487 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
488 struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
489
490 if (!part_setup)
491 break;
492
493 msg = container_of(msg_hdr, struct
494 xpc_activate_mq_msg_chctl_closerequest_uv,
495 hdr);
496 args = &part->remote_openclose_args[msg->ch_number];
497 args->reason = msg->reason;
498
499 spin_lock_irqsave(&part->chctl_lock, irq_flags);
500 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST;
501 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
502
503 xpc_wakeup_channel_mgr(part);
504 break;
505 }
506 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
507 struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
508
509 if (!part_setup)
510 break;
511
512 msg = container_of(msg_hdr, struct
513 xpc_activate_mq_msg_chctl_closereply_uv,
514 hdr);
515
516 spin_lock_irqsave(&part->chctl_lock, irq_flags);
517 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY;
518 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
519
520 xpc_wakeup_channel_mgr(part);
521 break;
522 }
523 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
524 struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
525
526 if (!part_setup)
527 break;
528
529 msg = container_of(msg_hdr, struct
530 xpc_activate_mq_msg_chctl_openrequest_uv,
531 hdr);
532 args = &part->remote_openclose_args[msg->ch_number];
533 args->entry_size = msg->entry_size;
534 args->local_nentries = msg->local_nentries;
535
536 spin_lock_irqsave(&part->chctl_lock, irq_flags);
537 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST;
538 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
539
540 xpc_wakeup_channel_mgr(part);
541 break;
542 }
543 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
544 struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
545
546 if (!part_setup)
547 break;
548
549 msg = container_of(msg_hdr, struct
550 xpc_activate_mq_msg_chctl_openreply_uv, hdr);
551 args = &part->remote_openclose_args[msg->ch_number];
552 args->remote_nentries = msg->remote_nentries;
553 args->local_nentries = msg->local_nentries;
554 args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa;
555
556 spin_lock_irqsave(&part->chctl_lock, irq_flags);
557 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY;
558 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
559
560 xpc_wakeup_channel_mgr(part);
561 break;
562 }
563 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
564 struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
565
566 if (!part_setup)
567 break;
568
569 msg = container_of(msg_hdr, struct
570 xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
571 spin_lock_irqsave(&part->chctl_lock, irq_flags);
572 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE;
573 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
574
575 xpc_wakeup_channel_mgr(part);
576 }
577 fallthrough;
578 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
579 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
580 part_uv->flags |= XPC_P_ENGAGED_UV;
581 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
582 break;
583
584 case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV:
585 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
586 part_uv->flags &= ~XPC_P_ENGAGED_UV;
587 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
588 break;
589
590 default:
591 dev_err(xpc_part, "received unknown activate_mq msg type=%d "
592 "from partition=%d\n", msg_hdr->type, XPC_PARTID(part));
593
594 /* get hb checker to deactivate from the remote partition */
595 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
596 if (part_uv->act_state_req == 0)
597 xpc_activate_IRQ_rcvd++;
598 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
599 part_uv->reason = xpBadMsgType;
600 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
601
602 (*wakeup_hb_checker)++;
603 return;
604 }
605
606 if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies &&
607 part->remote_rp_ts_jiffies != 0) {
608 /*
609 * ??? Does what we do here need to be sensitive to
610 * ??? act_state or remote_act_state?
611 */
612 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
613 if (part_uv->act_state_req == 0)
614 xpc_activate_IRQ_rcvd++;
615 part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV;
616 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
617
618 (*wakeup_hb_checker)++;
619 }
620}
621
622static irqreturn_t
623xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
624{
625 struct xpc_activate_mq_msghdr_uv *msg_hdr;
626 short partid;
627 struct xpc_partition *part;
628 int wakeup_hb_checker = 0;
629 int part_referenced;
630
631 while (1) {
632 msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc);
633 if (msg_hdr == NULL)
634 break;
635
636 partid = msg_hdr->partid;
637 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
638 dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() "
639 "received invalid partid=0x%x in message\n",
640 partid);
641 } else {
642 part = &xpc_partitions[partid];
643
644 part_referenced = xpc_part_ref(part);
645 xpc_handle_activate_mq_msg_uv(part, msg_hdr,
646 part_referenced,
647 &wakeup_hb_checker);
648 if (part_referenced)
649 xpc_part_deref(part);
650 }
651
652 gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr);
653 }
654
655 if (wakeup_hb_checker)
656 wake_up_interruptible(&xpc_activate_IRQ_wq);
657
658 return IRQ_HANDLED;
659}
660
661static enum xp_retval
662xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc,
663 unsigned long gru_mq_desc_gpa)
664{
665 enum xp_retval ret;
666
667 ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa,
668 sizeof(struct gru_message_queue_desc));
669 if (ret == xpSuccess)
670 gru_mq_desc->mq = NULL;
671
672 return ret;
673}
674
675static enum xp_retval
676xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
677 int msg_type)
678{
679 struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
680 struct xpc_partition_uv *part_uv = &part->sn.uv;
681 struct gru_message_queue_desc *gru_mq_desc;
682 unsigned long irq_flags;
683 enum xp_retval ret;
684
685 DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);
686
687 msg_hdr->type = msg_type;
688 msg_hdr->partid = xp_partition_id;
689 msg_hdr->act_state = part->act_state;
690 msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;
691
692 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
693again:
694 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) {
695 gru_mq_desc = part_uv->cached_activate_gru_mq_desc;
696 if (gru_mq_desc == NULL) {
697 gru_mq_desc = kmalloc(sizeof(struct
698 gru_message_queue_desc),
699 GFP_ATOMIC);
700 if (gru_mq_desc == NULL) {
701 ret = xpNoMemory;
702 goto done;
703 }
704 part_uv->cached_activate_gru_mq_desc = gru_mq_desc;
705 }
706
707 ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc,
708 part_uv->
709 activate_gru_mq_desc_gpa);
710 if (ret != xpSuccess)
711 goto done;
712
713 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
714 part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
715 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
716 }
717
718 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
719 ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg,
720 msg_size);
721 if (ret != xpSuccess) {
722 smp_rmb(); /* ensure a fresh copy of part_uv->flags */
723 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV))
724 goto again;
725 }
726done:
727 mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex);
728 return ret;
729}
730
731static void
732xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg,
733 size_t msg_size, int msg_type)
734{
735 enum xp_retval ret;
736
737 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
738 if (unlikely(ret != xpSuccess))
739 XPC_DEACTIVATE_PARTITION(part, ret);
740}
741
742static void
743xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags,
744 void *msg, size_t msg_size, int msg_type)
745{
746 struct xpc_partition *part = &xpc_partitions[ch->partid];
747 enum xp_retval ret;
748
749 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
750 if (unlikely(ret != xpSuccess)) {
751 if (irq_flags != NULL)
752 spin_unlock_irqrestore(&ch->lock, *irq_flags);
753
754 XPC_DEACTIVATE_PARTITION(part, ret);
755
756 if (irq_flags != NULL)
757 spin_lock_irqsave(&ch->lock, *irq_flags);
758 }
759}
760
761static void
762xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
763{
764 unsigned long irq_flags;
765 struct xpc_partition_uv *part_uv = &part->sn.uv;
766
767 /*
768 * !!! Make our side think that the remote partition sent an activate
769 * !!! mq message our way by doing what the activate IRQ handler would
770 * !!! do had one really been sent.
771 */
772
773 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
774 if (part_uv->act_state_req == 0)
775 xpc_activate_IRQ_rcvd++;
776 part_uv->act_state_req = act_state_req;
777 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
778
779 wake_up_interruptible(&xpc_activate_IRQ_wq);
780}
781
782static enum xp_retval
783xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
784 size_t *len)
785{
786 s64 status;
787 enum xp_retval ret;
788
789#if defined CONFIG_X86_64
790 status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
791 (u64 *)len);
792 if (status == BIOS_STATUS_SUCCESS)
793 ret = xpSuccess;
794 else if (status == BIOS_STATUS_MORE_PASSES)
795 ret = xpNeedMoreInfo;
796 else
797 ret = xpBiosError;
798
799#elif defined CONFIG_IA64_SGI_UV
800 status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
801 if (status == SALRET_OK)
802 ret = xpSuccess;
803 else if (status == SALRET_MORE_PASSES)
804 ret = xpNeedMoreInfo;
805 else
806 ret = xpSalError;
807
808#else
809 #error not a supported configuration
810#endif
811
812 return ret;
813}
814
815static int
816xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp)
817{
818 xpc_heartbeat_uv =
819 &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat;
820 rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv);
821 rp->sn.uv.activate_gru_mq_desc_gpa =
822 uv_gpa(xpc_activate_mq_uv->gru_mq_desc);
823 return 0;
824}
825
826static void
827xpc_allow_hb_uv(short partid)
828{
829}
830
831static void
832xpc_disallow_hb_uv(short partid)
833{
834}
835
836static void
837xpc_disallow_all_hbs_uv(void)
838{
839}
840
841static void
842xpc_increment_heartbeat_uv(void)
843{
844 xpc_heartbeat_uv->value++;
845}
846
847static void
848xpc_offline_heartbeat_uv(void)
849{
850 xpc_increment_heartbeat_uv();
851 xpc_heartbeat_uv->offline = 1;
852}
853
854static void
855xpc_online_heartbeat_uv(void)
856{
857 xpc_increment_heartbeat_uv();
858 xpc_heartbeat_uv->offline = 0;
859}
860
861static void
862xpc_heartbeat_init_uv(void)
863{
864 xpc_heartbeat_uv->value = 1;
865 xpc_heartbeat_uv->offline = 0;
866}
867
868static void
869xpc_heartbeat_exit_uv(void)
870{
871 xpc_offline_heartbeat_uv();
872}
873
874static enum xp_retval
875xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
876{
877 struct xpc_partition_uv *part_uv = &part->sn.uv;
878 enum xp_retval ret;
879
880 ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat),
881 part_uv->heartbeat_gpa,
882 sizeof(struct xpc_heartbeat_uv));
883 if (ret != xpSuccess)
884 return ret;
885
886 if (part_uv->cached_heartbeat.value == part->last_heartbeat &&
887 !part_uv->cached_heartbeat.offline) {
888
889 ret = xpNoHeartbeat;
890 } else {
891 part->last_heartbeat = part_uv->cached_heartbeat.value;
892 }
893 return ret;
894}
895
896static void
897xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
898 unsigned long remote_rp_gpa, int nasid)
899{
900 short partid = remote_rp->SAL_partid;
901 struct xpc_partition *part = &xpc_partitions[partid];
902 struct xpc_activate_mq_msg_activate_req_uv msg;
903
904 part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
905 part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
906 part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa;
907 part->sn.uv.activate_gru_mq_desc_gpa =
908 remote_rp->sn.uv.activate_gru_mq_desc_gpa;
909
910 /*
911 * ??? Is it a good idea to make this conditional on what is
912 * ??? potentially stale state information?
913 */
914 if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
915 msg.rp_gpa = uv_gpa(xpc_rsvd_page);
916 msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa;
917 msg.activate_gru_mq_desc_gpa =
918 xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa;
919 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
920 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
921 }
922
923 if (part->act_state == XPC_P_AS_INACTIVE)
924 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
925}
926
927static void
928xpc_request_partition_reactivation_uv(struct xpc_partition *part)
929{
930 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
931}
932
933static void
934xpc_request_partition_deactivation_uv(struct xpc_partition *part)
935{
936 struct xpc_activate_mq_msg_deactivate_req_uv msg;
937
938 /*
939 * ??? Is it a good idea to make this conditional on what is
940 * ??? potentially stale state information?
941 */
942 if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING &&
943 part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) {
944
945 msg.reason = part->reason;
946 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
947 XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV);
948 }
949}
950
951static void
952xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part)
953{
954 /* nothing needs to be done */
955 return;
956}
957
958static void
959xpc_init_fifo_uv(struct xpc_fifo_head_uv *head)
960{
961 head->first = NULL;
962 head->last = NULL;
963 spin_lock_init(&head->lock);
964 head->n_entries = 0;
965}
966
967static void *
968xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
969{
970 unsigned long irq_flags;
971 struct xpc_fifo_entry_uv *first;
972
973 spin_lock_irqsave(&head->lock, irq_flags);
974 first = head->first;
975 if (head->first != NULL) {
976 head->first = first->next;
977 if (head->first == NULL)
978 head->last = NULL;
979
980 head->n_entries--;
981 BUG_ON(head->n_entries < 0);
982
983 first->next = NULL;
984 }
985 spin_unlock_irqrestore(&head->lock, irq_flags);
986 return first;
987}
988
989static void
990xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head,
991 struct xpc_fifo_entry_uv *last)
992{
993 unsigned long irq_flags;
994
995 last->next = NULL;
996 spin_lock_irqsave(&head->lock, irq_flags);
997 if (head->last != NULL)
998 head->last->next = last;
999 else
1000 head->first = last;
1001 head->last = last;
1002 head->n_entries++;
1003 spin_unlock_irqrestore(&head->lock, irq_flags);
1004}
1005
1006static int
1007xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head)
1008{
1009 return head->n_entries;
1010}
1011
1012/*
1013 * Setup the channel structures that are uv specific.
1014 */
1015static enum xp_retval
1016xpc_setup_ch_structures_uv(struct xpc_partition *part)
1017{
1018 struct xpc_channel_uv *ch_uv;
1019 int ch_number;
1020
1021 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
1022 ch_uv = &part->channels[ch_number].sn.uv;
1023
1024 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
1025 xpc_init_fifo_uv(&ch_uv->recv_msg_list);
1026 }
1027
1028 return xpSuccess;
1029}
1030
1031/*
1032 * Teardown the channel structures that are uv specific.
1033 */
1034static void
1035xpc_teardown_ch_structures_uv(struct xpc_partition *part)
1036{
1037 /* nothing needs to be done */
1038 return;
1039}
1040
1041static enum xp_retval
1042xpc_make_first_contact_uv(struct xpc_partition *part)
1043{
1044 struct xpc_activate_mq_msg_uv msg;
1045
1046 /*
1047 * We send a sync msg to get the remote partition's remote_act_state
1048 * updated to our current act_state which at this point should
1049 * be XPC_P_AS_ACTIVATING.
1050 */
1051 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1052 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
1053
1054 while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
1055 (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
1056
1057 dev_dbg(xpc_part, "waiting to make first contact with "
1058 "partition %d\n", XPC_PARTID(part));
1059
1060 /* wait a 1/4 of a second or so */
1061 (void)msleep_interruptible(250);
1062
1063 if (part->act_state == XPC_P_AS_DEACTIVATING)
1064 return part->reason;
1065 }
1066
1067 return xpSuccess;
1068}
1069
1070static u64
1071xpc_get_chctl_all_flags_uv(struct xpc_partition *part)
1072{
1073 unsigned long irq_flags;
1074 union xpc_channel_ctl_flags chctl;
1075
1076 spin_lock_irqsave(&part->chctl_lock, irq_flags);
1077 chctl = part->chctl;
1078 if (chctl.all_flags != 0)
1079 part->chctl.all_flags = 0;
1080
1081 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
1082 return chctl.all_flags;
1083}
1084
1085static enum xp_retval
1086xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch)
1087{
1088 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1089 struct xpc_send_msg_slot_uv *msg_slot;
1090 unsigned long irq_flags;
1091 int nentries;
1092 int entry;
1093 size_t nbytes;
1094
1095 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
1096 nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv);
1097 ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL);
1098 if (ch_uv->send_msg_slots == NULL)
1099 continue;
1100
1101 for (entry = 0; entry < nentries; entry++) {
1102 msg_slot = &ch_uv->send_msg_slots[entry];
1103
1104 msg_slot->msg_slot_number = entry;
1105 xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list,
1106 &msg_slot->next);
1107 }
1108
1109 spin_lock_irqsave(&ch->lock, irq_flags);
1110 if (nentries < ch->local_nentries)
1111 ch->local_nentries = nentries;
1112 spin_unlock_irqrestore(&ch->lock, irq_flags);
1113 return xpSuccess;
1114 }
1115
1116 return xpNoMemory;
1117}
1118
1119static enum xp_retval
1120xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch)
1121{
1122 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1123 struct xpc_notify_mq_msg_uv *msg_slot;
1124 unsigned long irq_flags;
1125 int nentries;
1126 int entry;
1127 size_t nbytes;
1128
1129 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
1130 nbytes = nentries * ch->entry_size;
1131 ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL);
1132 if (ch_uv->recv_msg_slots == NULL)
1133 continue;
1134
1135 for (entry = 0; entry < nentries; entry++) {
1136 msg_slot = ch_uv->recv_msg_slots +
1137 entry * ch->entry_size;
1138
1139 msg_slot->hdr.msg_slot_number = entry;
1140 }
1141
1142 spin_lock_irqsave(&ch->lock, irq_flags);
1143 if (nentries < ch->remote_nentries)
1144 ch->remote_nentries = nentries;
1145 spin_unlock_irqrestore(&ch->lock, irq_flags);
1146 return xpSuccess;
1147 }
1148
1149 return xpNoMemory;
1150}
1151
1152/*
1153 * Allocate msg_slots associated with the channel.
1154 */
1155static enum xp_retval
1156xpc_setup_msg_structures_uv(struct xpc_channel *ch)
1157{
1158 static enum xp_retval ret;
1159 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1160
1161 DBUG_ON(ch->flags & XPC_C_SETUP);
1162
1163 ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct
1164 gru_message_queue_desc),
1165 GFP_KERNEL);
1166 if (ch_uv->cached_notify_gru_mq_desc == NULL)
1167 return xpNoMemory;
1168
1169 ret = xpc_allocate_send_msg_slot_uv(ch);
1170 if (ret == xpSuccess) {
1171
1172 ret = xpc_allocate_recv_msg_slot_uv(ch);
1173 if (ret != xpSuccess) {
1174 kfree(ch_uv->send_msg_slots);
1175 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
1176 }
1177 }
1178 return ret;
1179}
1180
1181/*
1182 * Free up msg_slots and clear other stuff that were setup for the specified
1183 * channel.
1184 */
1185static void
1186xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
1187{
1188 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1189
1190 lockdep_assert_held(&ch->lock);
1191
1192 kfree(ch_uv->cached_notify_gru_mq_desc);
1193 ch_uv->cached_notify_gru_mq_desc = NULL;
1194
1195 if (ch->flags & XPC_C_SETUP) {
1196 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
1197 kfree(ch_uv->send_msg_slots);
1198 xpc_init_fifo_uv(&ch_uv->recv_msg_list);
1199 kfree(ch_uv->recv_msg_slots);
1200 }
1201}
1202
1203static void
1204xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1205{
1206 struct xpc_activate_mq_msg_chctl_closerequest_uv msg;
1207
1208 msg.ch_number = ch->number;
1209 msg.reason = ch->reason;
1210 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1211 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV);
1212}
1213
1214static void
1215xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1216{
1217 struct xpc_activate_mq_msg_chctl_closereply_uv msg;
1218
1219 msg.ch_number = ch->number;
1220 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1221 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV);
1222}
1223
1224static void
1225xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1226{
1227 struct xpc_activate_mq_msg_chctl_openrequest_uv msg;
1228
1229 msg.ch_number = ch->number;
1230 msg.entry_size = ch->entry_size;
1231 msg.local_nentries = ch->local_nentries;
1232 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1233 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV);
1234}
1235
1236static void
1237xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1238{
1239 struct xpc_activate_mq_msg_chctl_openreply_uv msg;
1240
1241 msg.ch_number = ch->number;
1242 msg.local_nentries = ch->local_nentries;
1243 msg.remote_nentries = ch->remote_nentries;
1244 msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc);
1245 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1246 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV);
1247}
1248
1249static void
1250xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1251{
1252 struct xpc_activate_mq_msg_chctl_opencomplete_uv msg;
1253
1254 msg.ch_number = ch->number;
1255 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1256 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV);
1257}
1258
1259static void
1260xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
1261{
1262 unsigned long irq_flags;
1263
1264 spin_lock_irqsave(&part->chctl_lock, irq_flags);
1265 part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST;
1266 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
1267
1268 xpc_wakeup_channel_mgr(part);
1269}
1270
1271static enum xp_retval
1272xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
1273 unsigned long gru_mq_desc_gpa)
1274{
1275 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1276
1277 DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL);
1278 return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc,
1279 gru_mq_desc_gpa);
1280}
1281
1282static void
1283xpc_indicate_partition_engaged_uv(struct xpc_partition *part)
1284{
1285 struct xpc_activate_mq_msg_uv msg;
1286
1287 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1288 XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV);
1289}
1290
1291static void
1292xpc_indicate_partition_disengaged_uv(struct xpc_partition *part)
1293{
1294 struct xpc_activate_mq_msg_uv msg;
1295
1296 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1297 XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV);
1298}
1299
1300static void
1301xpc_assume_partition_disengaged_uv(short partid)
1302{
1303 struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv;
1304 unsigned long irq_flags;
1305
1306 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
1307 part_uv->flags &= ~XPC_P_ENGAGED_UV;
1308 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
1309}
1310
1311static int
1312xpc_partition_engaged_uv(short partid)
1313{
1314 return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0;
1315}
1316
1317static int
1318xpc_any_partition_engaged_uv(void)
1319{
1320 struct xpc_partition_uv *part_uv;
1321 short partid;
1322
1323 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
1324 part_uv = &xpc_partitions[partid].sn.uv;
1325 if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0)
1326 return 1;
1327 }
1328 return 0;
1329}
1330
1331static enum xp_retval
1332xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags,
1333 struct xpc_send_msg_slot_uv **address_of_msg_slot)
1334{
1335 enum xp_retval ret;
1336 struct xpc_send_msg_slot_uv *msg_slot;
1337 struct xpc_fifo_entry_uv *entry;
1338
1339 while (1) {
1340 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list);
1341 if (entry != NULL)
1342 break;
1343
1344 if (flags & XPC_NOWAIT)
1345 return xpNoWait;
1346
1347 ret = xpc_allocate_msg_wait(ch);
1348 if (ret != xpInterrupted && ret != xpTimeout)
1349 return ret;
1350 }
1351
1352 msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next);
1353 *address_of_msg_slot = msg_slot;
1354 return xpSuccess;
1355}
1356
1357static void
1358xpc_free_msg_slot_uv(struct xpc_channel *ch,
1359 struct xpc_send_msg_slot_uv *msg_slot)
1360{
1361 xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next);
1362
1363 /* wakeup anyone waiting for a free msg slot */
1364 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1365 wake_up(&ch->msg_allocate_wq);
1366}
1367
1368static void
1369xpc_notify_sender_uv(struct xpc_channel *ch,
1370 struct xpc_send_msg_slot_uv *msg_slot,
1371 enum xp_retval reason)
1372{
1373 xpc_notify_func func = msg_slot->func;
1374
1375 if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) {
1376
1377 atomic_dec(&ch->n_to_notify);
1378
1379 dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p "
1380 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
1381 msg_slot->msg_slot_number, ch->partid, ch->number);
1382
1383 func(reason, ch->partid, ch->number, msg_slot->key);
1384
1385 dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p "
1386 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
1387 msg_slot->msg_slot_number, ch->partid, ch->number);
1388 }
1389}
1390
1391static void
1392xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch,
1393 struct xpc_notify_mq_msg_uv *msg)
1394{
1395 struct xpc_send_msg_slot_uv *msg_slot;
1396 int entry = msg->hdr.msg_slot_number % ch->local_nentries;
1397
1398 msg_slot = &ch->sn.uv.send_msg_slots[entry];
1399
1400 BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number);
1401 msg_slot->msg_slot_number += ch->local_nentries;
1402
1403 if (msg_slot->func != NULL)
1404 xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered);
1405
1406 xpc_free_msg_slot_uv(ch, msg_slot);
1407}
1408
1409static void
1410xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
1411 struct xpc_notify_mq_msg_uv *msg)
1412{
1413 struct xpc_partition_uv *part_uv = &part->sn.uv;
1414 struct xpc_channel *ch;
1415 struct xpc_channel_uv *ch_uv;
1416 struct xpc_notify_mq_msg_uv *msg_slot;
1417 unsigned long irq_flags;
1418 int ch_number = msg->hdr.ch_number;
1419
1420 if (unlikely(ch_number >= part->nchannels)) {
1421 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid "
1422 "channel number=0x%x in message from partid=%d\n",
1423 ch_number, XPC_PARTID(part));
1424
1425 /* get hb checker to deactivate from the remote partition */
1426 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1427 if (part_uv->act_state_req == 0)
1428 xpc_activate_IRQ_rcvd++;
1429 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
1430 part_uv->reason = xpBadChannelNumber;
1431 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1432
1433 wake_up_interruptible(&xpc_activate_IRQ_wq);
1434 return;
1435 }
1436
1437 ch = &part->channels[ch_number];
1438 xpc_msgqueue_ref(ch);
1439
1440 if (!(ch->flags & XPC_C_CONNECTED)) {
1441 xpc_msgqueue_deref(ch);
1442 return;
1443 }
1444
1445 /* see if we're really dealing with an ACK for a previously sent msg */
1446 if (msg->hdr.size == 0) {
1447 xpc_handle_notify_mq_ack_uv(ch, msg);
1448 xpc_msgqueue_deref(ch);
1449 return;
1450 }
1451
1452 /* we're dealing with a normal message sent via the notify_mq */
1453 ch_uv = &ch->sn.uv;
1454
1455 msg_slot = ch_uv->recv_msg_slots +
1456 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
1457
1458 BUG_ON(msg_slot->hdr.size != 0);
1459
1460 memcpy(msg_slot, msg, msg->hdr.size);
1461
1462 xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next);
1463
1464 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
1465 /*
1466 * If there is an existing idle kthread get it to deliver
1467 * the payload, otherwise we'll have to get the channel mgr
1468 * for this partition to create a kthread to do the delivery.
1469 */
1470 if (atomic_read(&ch->kthreads_idle) > 0)
1471 wake_up_nr(&ch->idle_wq, 1);
1472 else
1473 xpc_send_chctl_local_msgrequest_uv(part, ch->number);
1474 }
1475 xpc_msgqueue_deref(ch);
1476}
1477
1478static irqreturn_t
1479xpc_handle_notify_IRQ_uv(int irq, void *dev_id)
1480{
1481 struct xpc_notify_mq_msg_uv *msg;
1482 short partid;
1483 struct xpc_partition *part;
1484
1485 while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) !=
1486 NULL) {
1487
1488 partid = msg->hdr.partid;
1489 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
1490 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received "
1491 "invalid partid=0x%x in message\n", partid);
1492 } else {
1493 part = &xpc_partitions[partid];
1494
1495 if (xpc_part_ref(part)) {
1496 xpc_handle_notify_mq_msg_uv(part, msg);
1497 xpc_part_deref(part);
1498 }
1499 }
1500
1501 gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg);
1502 }
1503
1504 return IRQ_HANDLED;
1505}
1506
1507static int
1508xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch)
1509{
1510 return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list);
1511}
1512
1513static void
1514xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number)
1515{
1516 struct xpc_channel *ch = &part->channels[ch_number];
1517 int ndeliverable_payloads;
1518
1519 xpc_msgqueue_ref(ch);
1520
1521 ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch);
1522
1523 if (ndeliverable_payloads > 0 &&
1524 (ch->flags & XPC_C_CONNECTED) &&
1525 (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) {
1526
1527 xpc_activate_kthreads(ch, ndeliverable_payloads);
1528 }
1529
1530 xpc_msgqueue_deref(ch);
1531}
1532
1533static enum xp_retval
1534xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload,
1535 u16 payload_size, u8 notify_type, xpc_notify_func func,
1536 void *key)
1537{
1538 enum xp_retval ret = xpSuccess;
1539 struct xpc_send_msg_slot_uv *msg_slot = NULL;
1540 struct xpc_notify_mq_msg_uv *msg;
1541 u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV];
1542 size_t msg_size;
1543
1544 DBUG_ON(notify_type != XPC_N_CALL);
1545
1546 msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size;
1547 if (msg_size > ch->entry_size)
1548 return xpPayloadTooBig;
1549
1550 xpc_msgqueue_ref(ch);
1551
1552 if (ch->flags & XPC_C_DISCONNECTING) {
1553 ret = ch->reason;
1554 goto out_1;
1555 }
1556 if (!(ch->flags & XPC_C_CONNECTED)) {
1557 ret = xpNotConnected;
1558 goto out_1;
1559 }
1560
1561 ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot);
1562 if (ret != xpSuccess)
1563 goto out_1;
1564
1565 if (func != NULL) {
1566 atomic_inc(&ch->n_to_notify);
1567
1568 msg_slot->key = key;
1569 smp_wmb(); /* a non-NULL func must hit memory after the key */
1570 msg_slot->func = func;
1571
1572 if (ch->flags & XPC_C_DISCONNECTING) {
1573 ret = ch->reason;
1574 goto out_2;
1575 }
1576 }
1577
1578 msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer;
1579 msg->hdr.partid = xp_partition_id;
1580 msg->hdr.ch_number = ch->number;
1581 msg->hdr.size = msg_size;
1582 msg->hdr.msg_slot_number = msg_slot->msg_slot_number;
1583 memcpy(&msg->payload, payload, payload_size);
1584
1585 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
1586 msg_size);
1587 if (ret == xpSuccess)
1588 goto out_1;
1589
1590 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
1591out_2:
1592 if (func != NULL) {
1593 /*
1594 * Try to NULL the msg_slot's func field. If we fail, then
1595 * xpc_notify_senders_of_disconnect_uv() beat us to it, in which
1596 * case we need to pretend we succeeded to send the message
1597 * since the user will get a callout for the disconnect error
1598 * by xpc_notify_senders_of_disconnect_uv(), and to also get an
1599 * error returned here will confuse them. Additionally, since
1600 * in this case the channel is being disconnected we don't need
1601 * to put the msg_slot back on the free list.
1602 */
1603 if (cmpxchg(&msg_slot->func, func, NULL) != func) {
1604 ret = xpSuccess;
1605 goto out_1;
1606 }
1607
1608 msg_slot->key = NULL;
1609 atomic_dec(&ch->n_to_notify);
1610 }
1611 xpc_free_msg_slot_uv(ch, msg_slot);
1612out_1:
1613 xpc_msgqueue_deref(ch);
1614 return ret;
1615}
1616
1617/*
1618 * Tell the callers of xpc_send_notify() that the status of their payloads
1619 * is unknown because the channel is now disconnecting.
1620 *
1621 * We don't worry about putting these msg_slots on the free list since the
1622 * msg_slots themselves are about to be kfree'd.
1623 */
1624static void
1625xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch)
1626{
1627 struct xpc_send_msg_slot_uv *msg_slot;
1628 int entry;
1629
1630 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
1631
1632 for (entry = 0; entry < ch->local_nentries; entry++) {
1633
1634 if (atomic_read(&ch->n_to_notify) == 0)
1635 break;
1636
1637 msg_slot = &ch->sn.uv.send_msg_slots[entry];
1638 if (msg_slot->func != NULL)
1639 xpc_notify_sender_uv(ch, msg_slot, ch->reason);
1640 }
1641}
1642
1643/*
1644 * Get the next deliverable message's payload.
1645 */
1646static void *
1647xpc_get_deliverable_payload_uv(struct xpc_channel *ch)
1648{
1649 struct xpc_fifo_entry_uv *entry;
1650 struct xpc_notify_mq_msg_uv *msg;
1651 void *payload = NULL;
1652
1653 if (!(ch->flags & XPC_C_DISCONNECTING)) {
1654 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list);
1655 if (entry != NULL) {
1656 msg = container_of(entry, struct xpc_notify_mq_msg_uv,
1657 hdr.u.next);
1658 payload = &msg->payload;
1659 }
1660 }
1661 return payload;
1662}
1663
1664static void
1665xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
1666{
1667 struct xpc_notify_mq_msg_uv *msg;
1668 enum xp_retval ret;
1669
1670 msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload);
1671
1672 /* return an ACK to the sender of this message */
1673
1674 msg->hdr.partid = xp_partition_id;
1675 msg->hdr.size = 0; /* size of zero indicates this is an ACK */
1676
1677 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
1678 sizeof(struct xpc_notify_mq_msghdr_uv));
1679 if (ret != xpSuccess)
1680 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
1681}
1682
1683static const struct xpc_arch_operations xpc_arch_ops_uv = {
1684 .setup_partitions = xpc_setup_partitions_uv,
1685 .teardown_partitions = xpc_teardown_partitions_uv,
1686 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
1687 .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv,
1688 .setup_rsvd_page = xpc_setup_rsvd_page_uv,
1689
1690 .allow_hb = xpc_allow_hb_uv,
1691 .disallow_hb = xpc_disallow_hb_uv,
1692 .disallow_all_hbs = xpc_disallow_all_hbs_uv,
1693 .increment_heartbeat = xpc_increment_heartbeat_uv,
1694 .offline_heartbeat = xpc_offline_heartbeat_uv,
1695 .online_heartbeat = xpc_online_heartbeat_uv,
1696 .heartbeat_init = xpc_heartbeat_init_uv,
1697 .heartbeat_exit = xpc_heartbeat_exit_uv,
1698 .get_remote_heartbeat = xpc_get_remote_heartbeat_uv,
1699
1700 .request_partition_activation =
1701 xpc_request_partition_activation_uv,
1702 .request_partition_reactivation =
1703 xpc_request_partition_reactivation_uv,
1704 .request_partition_deactivation =
1705 xpc_request_partition_deactivation_uv,
1706 .cancel_partition_deactivation_request =
1707 xpc_cancel_partition_deactivation_request_uv,
1708
1709 .setup_ch_structures = xpc_setup_ch_structures_uv,
1710 .teardown_ch_structures = xpc_teardown_ch_structures_uv,
1711
1712 .make_first_contact = xpc_make_first_contact_uv,
1713
1714 .get_chctl_all_flags = xpc_get_chctl_all_flags_uv,
1715 .send_chctl_closerequest = xpc_send_chctl_closerequest_uv,
1716 .send_chctl_closereply = xpc_send_chctl_closereply_uv,
1717 .send_chctl_openrequest = xpc_send_chctl_openrequest_uv,
1718 .send_chctl_openreply = xpc_send_chctl_openreply_uv,
1719 .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv,
1720 .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv,
1721
1722 .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv,
1723
1724 .setup_msg_structures = xpc_setup_msg_structures_uv,
1725 .teardown_msg_structures = xpc_teardown_msg_structures_uv,
1726
1727 .indicate_partition_engaged = xpc_indicate_partition_engaged_uv,
1728 .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv,
1729 .assume_partition_disengaged = xpc_assume_partition_disengaged_uv,
1730 .partition_engaged = xpc_partition_engaged_uv,
1731 .any_partition_engaged = xpc_any_partition_engaged_uv,
1732
1733 .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv,
1734 .send_payload = xpc_send_payload_uv,
1735 .get_deliverable_payload = xpc_get_deliverable_payload_uv,
1736 .received_payload = xpc_received_payload_uv,
1737 .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
1738};
1739
1740static int
1741xpc_init_mq_node(int nid)
1742{
1743 int cpu;
1744
1745 cpus_read_lock();
1746
1747 for_each_cpu(cpu, cpumask_of_node(nid)) {
1748 xpc_activate_mq_uv =
1749 xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
1750 XPC_ACTIVATE_IRQ_NAME,
1751 xpc_handle_activate_IRQ_uv);
1752 if (!IS_ERR(xpc_activate_mq_uv))
1753 break;
1754 }
1755 if (IS_ERR(xpc_activate_mq_uv)) {
1756 cpus_read_unlock();
1757 return PTR_ERR(xpc_activate_mq_uv);
1758 }
1759
1760 for_each_cpu(cpu, cpumask_of_node(nid)) {
1761 xpc_notify_mq_uv =
1762 xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
1763 XPC_NOTIFY_IRQ_NAME,
1764 xpc_handle_notify_IRQ_uv);
1765 if (!IS_ERR(xpc_notify_mq_uv))
1766 break;
1767 }
1768 if (IS_ERR(xpc_notify_mq_uv)) {
1769 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1770 cpus_read_unlock();
1771 return PTR_ERR(xpc_notify_mq_uv);
1772 }
1773
1774 cpus_read_unlock();
1775 return 0;
1776}
1777
1778int
1779xpc_init_uv(void)
1780{
1781 int nid;
1782 int ret = 0;
1783
1784 xpc_arch_ops = xpc_arch_ops_uv;
1785
1786 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
1787 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
1788 XPC_MSG_HDR_MAX_SIZE);
1789 return -E2BIG;
1790 }
1791
1792 if (xpc_mq_node < 0)
1793 for_each_online_node(nid) {
1794 ret = xpc_init_mq_node(nid);
1795
1796 if (!ret)
1797 break;
1798 }
1799 else
1800 ret = xpc_init_mq_node(xpc_mq_node);
1801
1802 if (ret < 0)
1803 dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
1804 -ret);
1805
1806 return ret;
1807}
1808
1809void
1810xpc_exit_uv(void)
1811{
1812 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
1813 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1814}
1815
1816module_param(xpc_mq_node, int, 0);
1817MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");