Loading...
1#include <linux/pci.h>
2#include <linux/acpi.h>
3#include <linux/init.h>
4#include <linux/irq.h>
5#include <linux/dmi.h>
6#include <linux/slab.h>
7#include <asm/numa.h>
8#include <asm/pci_x86.h>
9
10struct pci_root_info {
11 struct acpi_device *bridge;
12 char name[16];
13 unsigned int res_num;
14 struct resource *res;
15 int busnum;
16 struct pci_sysdata sd;
17};
18
19static bool pci_use_crs = true;
20
21static int __init set_use_crs(const struct dmi_system_id *id)
22{
23 pci_use_crs = true;
24 return 0;
25}
26
27static int __init set_nouse_crs(const struct dmi_system_id *id)
28{
29 pci_use_crs = false;
30 return 0;
31}
32
33static const struct dmi_system_id pci_use_crs_table[] __initconst = {
34 /* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
35 {
36 .callback = set_use_crs,
37 .ident = "IBM System x3800",
38 .matches = {
39 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
40 DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
41 },
42 },
43 /* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */
44 /* 2006 AMD HT/VIA system with two host bridges */
45 {
46 .callback = set_use_crs,
47 .ident = "ASRock ALiveSATA2-GLAN",
48 .matches = {
49 DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
50 },
51 },
52 /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
53 /* 2006 AMD HT/VIA system with two host bridges */
54 {
55 .callback = set_use_crs,
56 .ident = "ASUS M2V-MX SE",
57 .matches = {
58 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
59 DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
60 DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
61 },
62 },
63 /* https://bugzilla.kernel.org/show_bug.cgi?id=42619 */
64 {
65 .callback = set_use_crs,
66 .ident = "MSI MS-7253",
67 .matches = {
68 DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
69 DMI_MATCH(DMI_BOARD_NAME, "MS-7253"),
70 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
71 },
72 },
73
74 /* Now for the blacklist.. */
75
76 /* https://bugzilla.redhat.com/show_bug.cgi?id=769657 */
77 {
78 .callback = set_nouse_crs,
79 .ident = "Dell Studio 1557",
80 .matches = {
81 DMI_MATCH(DMI_BOARD_VENDOR, "Dell Inc."),
82 DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
83 DMI_MATCH(DMI_BIOS_VERSION, "A09"),
84 },
85 },
86 /* https://bugzilla.redhat.com/show_bug.cgi?id=769657 */
87 {
88 .callback = set_nouse_crs,
89 .ident = "Thinkpad SL510",
90 .matches = {
91 DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
92 DMI_MATCH(DMI_BOARD_NAME, "2847DFG"),
93 DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
94 },
95 },
96 {}
97};
98
99void __init pci_acpi_crs_quirks(void)
100{
101 int year;
102
103 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
104 pci_use_crs = false;
105
106 dmi_check_system(pci_use_crs_table);
107
108 /*
109 * If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
110 * takes precedence over anything we figured out above.
111 */
112 if (pci_probe & PCI_ROOT_NO_CRS)
113 pci_use_crs = false;
114 else if (pci_probe & PCI_USE__CRS)
115 pci_use_crs = true;
116
117 printk(KERN_INFO "PCI: %s host bridge windows from ACPI; "
118 "if necessary, use \"pci=%s\" and report a bug\n",
119 pci_use_crs ? "Using" : "Ignoring",
120 pci_use_crs ? "nocrs" : "use_crs");
121}
122
123static acpi_status
124resource_to_addr(struct acpi_resource *resource,
125 struct acpi_resource_address64 *addr)
126{
127 acpi_status status;
128 struct acpi_resource_memory24 *memory24;
129 struct acpi_resource_memory32 *memory32;
130 struct acpi_resource_fixed_memory32 *fixed_memory32;
131
132 memset(addr, 0, sizeof(*addr));
133 switch (resource->type) {
134 case ACPI_RESOURCE_TYPE_MEMORY24:
135 memory24 = &resource->data.memory24;
136 addr->resource_type = ACPI_MEMORY_RANGE;
137 addr->minimum = memory24->minimum;
138 addr->address_length = memory24->address_length;
139 addr->maximum = addr->minimum + addr->address_length - 1;
140 return AE_OK;
141 case ACPI_RESOURCE_TYPE_MEMORY32:
142 memory32 = &resource->data.memory32;
143 addr->resource_type = ACPI_MEMORY_RANGE;
144 addr->minimum = memory32->minimum;
145 addr->address_length = memory32->address_length;
146 addr->maximum = addr->minimum + addr->address_length - 1;
147 return AE_OK;
148 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
149 fixed_memory32 = &resource->data.fixed_memory32;
150 addr->resource_type = ACPI_MEMORY_RANGE;
151 addr->minimum = fixed_memory32->address;
152 addr->address_length = fixed_memory32->address_length;
153 addr->maximum = addr->minimum + addr->address_length - 1;
154 return AE_OK;
155 case ACPI_RESOURCE_TYPE_ADDRESS16:
156 case ACPI_RESOURCE_TYPE_ADDRESS32:
157 case ACPI_RESOURCE_TYPE_ADDRESS64:
158 status = acpi_resource_to_address64(resource, addr);
159 if (ACPI_SUCCESS(status) &&
160 (addr->resource_type == ACPI_MEMORY_RANGE ||
161 addr->resource_type == ACPI_IO_RANGE) &&
162 addr->address_length > 0) {
163 return AE_OK;
164 }
165 break;
166 }
167 return AE_ERROR;
168}
169
170static acpi_status
171count_resource(struct acpi_resource *acpi_res, void *data)
172{
173 struct pci_root_info *info = data;
174 struct acpi_resource_address64 addr;
175 acpi_status status;
176
177 status = resource_to_addr(acpi_res, &addr);
178 if (ACPI_SUCCESS(status))
179 info->res_num++;
180 return AE_OK;
181}
182
183static acpi_status
184setup_resource(struct acpi_resource *acpi_res, void *data)
185{
186 struct pci_root_info *info = data;
187 struct resource *res;
188 struct acpi_resource_address64 addr;
189 acpi_status status;
190 unsigned long flags;
191 u64 start, orig_end, end;
192
193 status = resource_to_addr(acpi_res, &addr);
194 if (!ACPI_SUCCESS(status))
195 return AE_OK;
196
197 if (addr.resource_type == ACPI_MEMORY_RANGE) {
198 flags = IORESOURCE_MEM;
199 if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
200 flags |= IORESOURCE_PREFETCH;
201 } else if (addr.resource_type == ACPI_IO_RANGE) {
202 flags = IORESOURCE_IO;
203 } else
204 return AE_OK;
205
206 start = addr.minimum + addr.translation_offset;
207 orig_end = end = addr.maximum + addr.translation_offset;
208
209 /* Exclude non-addressable range or non-addressable portion of range */
210 end = min(end, (u64)iomem_resource.end);
211 if (end <= start) {
212 dev_info(&info->bridge->dev,
213 "host bridge window [%#llx-%#llx] "
214 "(ignored, not CPU addressable)\n", start, orig_end);
215 return AE_OK;
216 } else if (orig_end != end) {
217 dev_info(&info->bridge->dev,
218 "host bridge window [%#llx-%#llx] "
219 "([%#llx-%#llx] ignored, not CPU addressable)\n",
220 start, orig_end, end + 1, orig_end);
221 }
222
223 res = &info->res[info->res_num];
224 res->name = info->name;
225 res->flags = flags;
226 res->start = start;
227 res->end = end;
228 res->child = NULL;
229
230 if (!pci_use_crs) {
231 dev_printk(KERN_DEBUG, &info->bridge->dev,
232 "host bridge window %pR (ignored)\n", res);
233 return AE_OK;
234 }
235
236 info->res_num++;
237 if (addr.translation_offset)
238 dev_info(&info->bridge->dev, "host bridge window %pR "
239 "(PCI address [%#llx-%#llx])\n",
240 res, res->start - addr.translation_offset,
241 res->end - addr.translation_offset);
242 else
243 dev_info(&info->bridge->dev, "host bridge window %pR\n", res);
244
245 return AE_OK;
246}
247
248static void coalesce_windows(struct pci_root_info *info, unsigned long type)
249{
250 int i, j;
251 struct resource *res1, *res2;
252
253 for (i = 0; i < info->res_num; i++) {
254 res1 = &info->res[i];
255 if (!(res1->flags & type))
256 continue;
257
258 for (j = i + 1; j < info->res_num; j++) {
259 res2 = &info->res[j];
260 if (!(res2->flags & type))
261 continue;
262
263 /*
264 * I don't like throwing away windows because then
265 * our resources no longer match the ACPI _CRS, but
266 * the kernel resource tree doesn't allow overlaps.
267 */
268 if (resource_overlaps(res1, res2)) {
269 res1->start = min(res1->start, res2->start);
270 res1->end = max(res1->end, res2->end);
271 dev_info(&info->bridge->dev,
272 "host bridge window expanded to %pR; %pR ignored\n",
273 res1, res2);
274 res2->flags = 0;
275 }
276 }
277 }
278}
279
280static void add_resources(struct pci_root_info *info,
281 struct list_head *resources)
282{
283 int i;
284 struct resource *res, *root, *conflict;
285
286 coalesce_windows(info, IORESOURCE_MEM);
287 coalesce_windows(info, IORESOURCE_IO);
288
289 for (i = 0; i < info->res_num; i++) {
290 res = &info->res[i];
291
292 if (res->flags & IORESOURCE_MEM)
293 root = &iomem_resource;
294 else if (res->flags & IORESOURCE_IO)
295 root = &ioport_resource;
296 else
297 continue;
298
299 conflict = insert_resource_conflict(root, res);
300 if (conflict)
301 dev_info(&info->bridge->dev,
302 "ignoring host bridge window %pR (conflicts with %s %pR)\n",
303 res, conflict->name, conflict);
304 else
305 pci_add_resource(resources, res);
306 }
307}
308
309static void free_pci_root_info_res(struct pci_root_info *info)
310{
311 kfree(info->res);
312 info->res = NULL;
313 info->res_num = 0;
314}
315
316static void __release_pci_root_info(struct pci_root_info *info)
317{
318 int i;
319 struct resource *res;
320
321 for (i = 0; i < info->res_num; i++) {
322 res = &info->res[i];
323
324 if (!res->parent)
325 continue;
326
327 if (!(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
328 continue;
329
330 release_resource(res);
331 }
332
333 free_pci_root_info_res(info);
334
335 kfree(info);
336}
337static void release_pci_root_info(struct pci_host_bridge *bridge)
338{
339 struct pci_root_info *info = bridge->release_data;
340
341 __release_pci_root_info(info);
342}
343
344static void
345probe_pci_root_info(struct pci_root_info *info, struct acpi_device *device,
346 int busnum, int domain)
347{
348 size_t size;
349
350 info->bridge = device;
351 info->res_num = 0;
352 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
353 info);
354 if (!info->res_num)
355 return;
356
357 size = sizeof(*info->res) * info->res_num;
358 info->res_num = 0;
359 info->res = kmalloc(size, GFP_KERNEL);
360 if (!info->res)
361 return;
362
363 sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
364
365 acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
366 info);
367}
368
369struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
370{
371 struct acpi_device *device = root->device;
372 struct pci_root_info *info = NULL;
373 int domain = root->segment;
374 int busnum = root->secondary.start;
375 LIST_HEAD(resources);
376 struct pci_bus *bus;
377 struct pci_sysdata *sd;
378 int node;
379#ifdef CONFIG_ACPI_NUMA
380 int pxm;
381#endif
382
383 if (domain && !pci_domains_supported) {
384 printk(KERN_WARNING "pci_bus %04x:%02x: "
385 "ignored (multiple domains not supported)\n",
386 domain, busnum);
387 return NULL;
388 }
389
390 node = -1;
391#ifdef CONFIG_ACPI_NUMA
392 pxm = acpi_get_pxm(device->handle);
393 if (pxm >= 0)
394 node = pxm_to_node(pxm);
395 if (node != -1)
396 set_mp_bus_to_node(busnum, node);
397 else
398#endif
399 node = get_mp_bus_to_node(busnum);
400
401 if (node != -1 && !node_online(node))
402 node = -1;
403
404 info = kzalloc(sizeof(*info), GFP_KERNEL);
405 if (!info) {
406 printk(KERN_WARNING "pci_bus %04x:%02x: "
407 "ignored (out of memory)\n", domain, busnum);
408 return NULL;
409 }
410
411 sd = &info->sd;
412 sd->domain = domain;
413 sd->node = node;
414 /*
415 * Maybe the desired pci bus has been already scanned. In such case
416 * it is unnecessary to scan the pci bus with the given domain,busnum.
417 */
418 bus = pci_find_bus(domain, busnum);
419 if (bus) {
420 /*
421 * If the desired bus exits, the content of bus->sysdata will
422 * be replaced by sd.
423 */
424 memcpy(bus->sysdata, sd, sizeof(*sd));
425 kfree(info);
426 } else {
427 probe_pci_root_info(info, device, busnum, domain);
428
429 /*
430 * _CRS with no apertures is normal, so only fall back to
431 * defaults or native bridge info if we're ignoring _CRS.
432 */
433 if (pci_use_crs)
434 add_resources(info, &resources);
435 else {
436 free_pci_root_info_res(info);
437 x86_pci_root_bus_resources(busnum, &resources);
438 }
439
440 bus = pci_create_root_bus(NULL, busnum, &pci_root_ops, sd,
441 &resources);
442 if (bus) {
443 bus->subordinate = pci_scan_child_bus(bus);
444 pci_set_host_bridge_release(
445 to_pci_host_bridge(bus->bridge),
446 release_pci_root_info, info);
447 } else {
448 pci_free_resource_list(&resources);
449 __release_pci_root_info(info);
450 }
451 }
452
453 /* After the PCI-E bus has been walked and all devices discovered,
454 * configure any settings of the fabric that might be necessary.
455 */
456 if (bus) {
457 struct pci_bus *child;
458 list_for_each_entry(child, &bus->children, node) {
459 struct pci_dev *self = child->self;
460 if (!self)
461 continue;
462
463 pcie_bus_configure_settings(child, self->pcie_mpss);
464 }
465 }
466
467 if (bus && node != -1) {
468#ifdef CONFIG_ACPI_NUMA
469 if (pxm >= 0)
470 dev_printk(KERN_DEBUG, &bus->dev,
471 "on NUMA node %d (pxm %d)\n", node, pxm);
472#else
473 dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
474#endif
475 }
476
477 return bus;
478}
479
480int __init pci_acpi_init(void)
481{
482 struct pci_dev *dev = NULL;
483
484 if (acpi_noirq)
485 return -ENODEV;
486
487 printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
488 acpi_irq_penalty_init();
489 pcibios_enable_irq = acpi_pci_irq_enable;
490 pcibios_disable_irq = acpi_pci_irq_disable;
491 x86_init.pci.init_irq = x86_init_noop;
492
493 if (pci_routeirq) {
494 /*
495 * PCI IRQ routing is set up by pci_enable_device(), but we
496 * also do it here in case there are still broken drivers that
497 * don't use pci_enable_device().
498 */
499 printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
500 for_each_pci_dev(dev)
501 acpi_pci_irq_enable(dev);
502 }
503
504 return 0;
505}
1#include <linux/pci.h>
2#include <linux/acpi.h>
3#include <linux/init.h>
4#include <linux/irq.h>
5#include <linux/dmi.h>
6#include <linux/slab.h>
7#include <asm/numa.h>
8#include <asm/pci_x86.h>
9
10struct pci_root_info {
11 struct acpi_device *bridge;
12 char *name;
13 unsigned int res_num;
14 struct resource *res;
15 struct pci_bus *bus;
16 int busnum;
17};
18
19static bool pci_use_crs = true;
20
21static int __init set_use_crs(const struct dmi_system_id *id)
22{
23 pci_use_crs = true;
24 return 0;
25}
26
27static const struct dmi_system_id pci_use_crs_table[] __initconst = {
28 /* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
29 {
30 .callback = set_use_crs,
31 .ident = "IBM System x3800",
32 .matches = {
33 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
34 DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
35 },
36 },
37 /* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */
38 /* 2006 AMD HT/VIA system with two host bridges */
39 {
40 .callback = set_use_crs,
41 .ident = "ASRock ALiveSATA2-GLAN",
42 .matches = {
43 DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
44 },
45 },
46 /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
47 /* 2006 AMD HT/VIA system with two host bridges */
48 {
49 .callback = set_use_crs,
50 .ident = "ASUS M2V-MX SE",
51 .matches = {
52 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
53 DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
54 DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
55 },
56 },
57 {}
58};
59
60void __init pci_acpi_crs_quirks(void)
61{
62 int year;
63
64 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
65 pci_use_crs = false;
66
67 dmi_check_system(pci_use_crs_table);
68
69 /*
70 * If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
71 * takes precedence over anything we figured out above.
72 */
73 if (pci_probe & PCI_ROOT_NO_CRS)
74 pci_use_crs = false;
75 else if (pci_probe & PCI_USE__CRS)
76 pci_use_crs = true;
77
78 printk(KERN_INFO "PCI: %s host bridge windows from ACPI; "
79 "if necessary, use \"pci=%s\" and report a bug\n",
80 pci_use_crs ? "Using" : "Ignoring",
81 pci_use_crs ? "nocrs" : "use_crs");
82}
83
84static acpi_status
85resource_to_addr(struct acpi_resource *resource,
86 struct acpi_resource_address64 *addr)
87{
88 acpi_status status;
89 struct acpi_resource_memory24 *memory24;
90 struct acpi_resource_memory32 *memory32;
91 struct acpi_resource_fixed_memory32 *fixed_memory32;
92
93 memset(addr, 0, sizeof(*addr));
94 switch (resource->type) {
95 case ACPI_RESOURCE_TYPE_MEMORY24:
96 memory24 = &resource->data.memory24;
97 addr->resource_type = ACPI_MEMORY_RANGE;
98 addr->minimum = memory24->minimum;
99 addr->address_length = memory24->address_length;
100 addr->maximum = addr->minimum + addr->address_length - 1;
101 return AE_OK;
102 case ACPI_RESOURCE_TYPE_MEMORY32:
103 memory32 = &resource->data.memory32;
104 addr->resource_type = ACPI_MEMORY_RANGE;
105 addr->minimum = memory32->minimum;
106 addr->address_length = memory32->address_length;
107 addr->maximum = addr->minimum + addr->address_length - 1;
108 return AE_OK;
109 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
110 fixed_memory32 = &resource->data.fixed_memory32;
111 addr->resource_type = ACPI_MEMORY_RANGE;
112 addr->minimum = fixed_memory32->address;
113 addr->address_length = fixed_memory32->address_length;
114 addr->maximum = addr->minimum + addr->address_length - 1;
115 return AE_OK;
116 case ACPI_RESOURCE_TYPE_ADDRESS16:
117 case ACPI_RESOURCE_TYPE_ADDRESS32:
118 case ACPI_RESOURCE_TYPE_ADDRESS64:
119 status = acpi_resource_to_address64(resource, addr);
120 if (ACPI_SUCCESS(status) &&
121 (addr->resource_type == ACPI_MEMORY_RANGE ||
122 addr->resource_type == ACPI_IO_RANGE) &&
123 addr->address_length > 0) {
124 return AE_OK;
125 }
126 break;
127 }
128 return AE_ERROR;
129}
130
131static acpi_status
132count_resource(struct acpi_resource *acpi_res, void *data)
133{
134 struct pci_root_info *info = data;
135 struct acpi_resource_address64 addr;
136 acpi_status status;
137
138 status = resource_to_addr(acpi_res, &addr);
139 if (ACPI_SUCCESS(status))
140 info->res_num++;
141 return AE_OK;
142}
143
144static acpi_status
145setup_resource(struct acpi_resource *acpi_res, void *data)
146{
147 struct pci_root_info *info = data;
148 struct resource *res;
149 struct acpi_resource_address64 addr;
150 acpi_status status;
151 unsigned long flags;
152 u64 start, end;
153
154 status = resource_to_addr(acpi_res, &addr);
155 if (!ACPI_SUCCESS(status))
156 return AE_OK;
157
158 if (addr.resource_type == ACPI_MEMORY_RANGE) {
159 flags = IORESOURCE_MEM;
160 if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
161 flags |= IORESOURCE_PREFETCH;
162 } else if (addr.resource_type == ACPI_IO_RANGE) {
163 flags = IORESOURCE_IO;
164 } else
165 return AE_OK;
166
167 start = addr.minimum + addr.translation_offset;
168 end = addr.maximum + addr.translation_offset;
169
170 res = &info->res[info->res_num];
171 res->name = info->name;
172 res->flags = flags;
173 res->start = start;
174 res->end = end;
175 res->child = NULL;
176
177 if (!pci_use_crs) {
178 dev_printk(KERN_DEBUG, &info->bridge->dev,
179 "host bridge window %pR (ignored)\n", res);
180 return AE_OK;
181 }
182
183 info->res_num++;
184 if (addr.translation_offset)
185 dev_info(&info->bridge->dev, "host bridge window %pR "
186 "(PCI address [%#llx-%#llx])\n",
187 res, res->start - addr.translation_offset,
188 res->end - addr.translation_offset);
189 else
190 dev_info(&info->bridge->dev, "host bridge window %pR\n", res);
191
192 return AE_OK;
193}
194
195static bool resource_contains(struct resource *res, resource_size_t point)
196{
197 if (res->start <= point && point <= res->end)
198 return true;
199 return false;
200}
201
202static void coalesce_windows(struct pci_root_info *info, unsigned long type)
203{
204 int i, j;
205 struct resource *res1, *res2;
206
207 for (i = 0; i < info->res_num; i++) {
208 res1 = &info->res[i];
209 if (!(res1->flags & type))
210 continue;
211
212 for (j = i + 1; j < info->res_num; j++) {
213 res2 = &info->res[j];
214 if (!(res2->flags & type))
215 continue;
216
217 /*
218 * I don't like throwing away windows because then
219 * our resources no longer match the ACPI _CRS, but
220 * the kernel resource tree doesn't allow overlaps.
221 */
222 if (resource_contains(res1, res2->start) ||
223 resource_contains(res1, res2->end) ||
224 resource_contains(res2, res1->start) ||
225 resource_contains(res2, res1->end)) {
226 res1->start = min(res1->start, res2->start);
227 res1->end = max(res1->end, res2->end);
228 dev_info(&info->bridge->dev,
229 "host bridge window expanded to %pR; %pR ignored\n",
230 res1, res2);
231 res2->flags = 0;
232 }
233 }
234 }
235}
236
237static void add_resources(struct pci_root_info *info)
238{
239 int i;
240 struct resource *res, *root, *conflict;
241
242 if (!pci_use_crs)
243 return;
244
245 coalesce_windows(info, IORESOURCE_MEM);
246 coalesce_windows(info, IORESOURCE_IO);
247
248 for (i = 0; i < info->res_num; i++) {
249 res = &info->res[i];
250
251 if (res->flags & IORESOURCE_MEM)
252 root = &iomem_resource;
253 else if (res->flags & IORESOURCE_IO)
254 root = &ioport_resource;
255 else
256 continue;
257
258 conflict = insert_resource_conflict(root, res);
259 if (conflict)
260 dev_info(&info->bridge->dev,
261 "ignoring host bridge window %pR (conflicts with %s %pR)\n",
262 res, conflict->name, conflict);
263 else
264 pci_bus_add_resource(info->bus, res, 0);
265 }
266}
267
268static void
269get_current_resources(struct acpi_device *device, int busnum,
270 int domain, struct pci_bus *bus)
271{
272 struct pci_root_info info;
273 size_t size;
274
275 if (pci_use_crs)
276 pci_bus_remove_resources(bus);
277
278 info.bridge = device;
279 info.bus = bus;
280 info.res_num = 0;
281 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
282 &info);
283 if (!info.res_num)
284 return;
285
286 size = sizeof(*info.res) * info.res_num;
287 info.res = kmalloc(size, GFP_KERNEL);
288 if (!info.res)
289 goto res_alloc_fail;
290
291 info.name = kasprintf(GFP_KERNEL, "PCI Bus %04x:%02x", domain, busnum);
292 if (!info.name)
293 goto name_alloc_fail;
294
295 info.res_num = 0;
296 acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
297 &info);
298
299 add_resources(&info);
300 return;
301
302name_alloc_fail:
303 kfree(info.res);
304res_alloc_fail:
305 return;
306}
307
308struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
309{
310 struct acpi_device *device = root->device;
311 int domain = root->segment;
312 int busnum = root->secondary.start;
313 struct pci_bus *bus;
314 struct pci_sysdata *sd;
315 int node;
316#ifdef CONFIG_ACPI_NUMA
317 int pxm;
318#endif
319
320 if (domain && !pci_domains_supported) {
321 printk(KERN_WARNING "pci_bus %04x:%02x: "
322 "ignored (multiple domains not supported)\n",
323 domain, busnum);
324 return NULL;
325 }
326
327 node = -1;
328#ifdef CONFIG_ACPI_NUMA
329 pxm = acpi_get_pxm(device->handle);
330 if (pxm >= 0)
331 node = pxm_to_node(pxm);
332 if (node != -1)
333 set_mp_bus_to_node(busnum, node);
334 else
335#endif
336 node = get_mp_bus_to_node(busnum);
337
338 if (node != -1 && !node_online(node))
339 node = -1;
340
341 /* Allocate per-root-bus (not per bus) arch-specific data.
342 * TODO: leak; this memory is never freed.
343 * It's arguable whether it's worth the trouble to care.
344 */
345 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
346 if (!sd) {
347 printk(KERN_WARNING "pci_bus %04x:%02x: "
348 "ignored (out of memory)\n", domain, busnum);
349 return NULL;
350 }
351
352 sd->domain = domain;
353 sd->node = node;
354 /*
355 * Maybe the desired pci bus has been already scanned. In such case
356 * it is unnecessary to scan the pci bus with the given domain,busnum.
357 */
358 bus = pci_find_bus(domain, busnum);
359 if (bus) {
360 /*
361 * If the desired bus exits, the content of bus->sysdata will
362 * be replaced by sd.
363 */
364 memcpy(bus->sysdata, sd, sizeof(*sd));
365 kfree(sd);
366 } else {
367 bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd);
368 if (bus) {
369 get_current_resources(device, busnum, domain, bus);
370 bus->subordinate = pci_scan_child_bus(bus);
371 }
372 }
373
374 /* After the PCI-E bus has been walked and all devices discovered,
375 * configure any settings of the fabric that might be necessary.
376 */
377 if (bus) {
378 struct pci_bus *child;
379 list_for_each_entry(child, &bus->children, node) {
380 struct pci_dev *self = child->self;
381 if (!self)
382 continue;
383
384 pcie_bus_configure_settings(child, self->pcie_mpss);
385 }
386 }
387
388 if (!bus)
389 kfree(sd);
390
391 if (bus && node != -1) {
392#ifdef CONFIG_ACPI_NUMA
393 if (pxm >= 0)
394 dev_printk(KERN_DEBUG, &bus->dev,
395 "on NUMA node %d (pxm %d)\n", node, pxm);
396#else
397 dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
398#endif
399 }
400
401 return bus;
402}
403
404int __init pci_acpi_init(void)
405{
406 struct pci_dev *dev = NULL;
407
408 if (acpi_noirq)
409 return -ENODEV;
410
411 printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
412 acpi_irq_penalty_init();
413 pcibios_enable_irq = acpi_pci_irq_enable;
414 pcibios_disable_irq = acpi_pci_irq_disable;
415 x86_init.pci.init_irq = x86_init_noop;
416
417 if (pci_routeirq) {
418 /*
419 * PCI IRQ routing is set up by pci_enable_device(), but we
420 * also do it here in case there are still broken drivers that
421 * don't use pci_enable_device().
422 */
423 printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
424 for_each_pci_dev(dev)
425 acpi_pci_irq_enable(dev);
426 }
427
428 return 0;
429}