Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * linux/mm/mmzone.c
  4 *
  5 * management codes for pgdats, zones and page flags
  6 */
  7
  8
  9#include <linux/stddef.h>
 10#include <linux/mm.h>
 11#include <linux/mmzone.h>
 12
 13struct pglist_data *first_online_pgdat(void)
 14{
 15	return NODE_DATA(first_online_node);
 16}
 17
 18struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
 19{
 20	int nid = next_online_node(pgdat->node_id);
 21
 22	if (nid == MAX_NUMNODES)
 23		return NULL;
 24	return NODE_DATA(nid);
 25}
 26
 27/*
 28 * next_zone - helper magic for for_each_zone()
 29 */
 30struct zone *next_zone(struct zone *zone)
 31{
 32	pg_data_t *pgdat = zone->zone_pgdat;
 33
 34	if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
 35		zone++;
 36	else {
 37		pgdat = next_online_pgdat(pgdat);
 38		if (pgdat)
 39			zone = pgdat->node_zones;
 40		else
 41			zone = NULL;
 42	}
 43	return zone;
 44}
 45
 46static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
 47{
 48#ifdef CONFIG_NUMA
 49	return node_isset(zonelist_node_idx(zref), *nodes);
 50#else
 51	return 1;
 52#endif /* CONFIG_NUMA */
 53}
 54
 55/* Returns the next zone at or below highest_zoneidx in a zonelist */
 56struct zoneref *__next_zones_zonelist(struct zoneref *z,
 57					enum zone_type highest_zoneidx,
 58					nodemask_t *nodes)
 
 59{
 60	/*
 61	 * Find the next suitable zone to use for the allocation.
 62	 * Only filter based on nodemask if it's set
 63	 */
 64	if (unlikely(nodes == NULL))
 65		while (zonelist_zone_idx(z) > highest_zoneidx)
 66			z++;
 67	else
 68		while (zonelist_zone_idx(z) > highest_zoneidx ||
 69				(zonelist_zone(z) && !zref_in_nodemask(z, nodes)))
 70			z++;
 71
 
 72	return z;
 73}
 74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75void lruvec_init(struct lruvec *lruvec)
 76{
 77	enum lru_list lru;
 78
 79	memset(lruvec, 0, sizeof(struct lruvec));
 80	spin_lock_init(&lruvec->lru_lock);
 81	zswap_lruvec_state_init(lruvec);
 82
 83	for_each_lru(lru)
 84		INIT_LIST_HEAD(&lruvec->lists[lru]);
 85	/*
 86	 * The "Unevictable LRU" is imaginary: though its size is maintained,
 87	 * it is never scanned, and unevictable pages are not threaded on it
 88	 * (so that their lru fields can be reused to hold mlock_count).
 89	 * Poison its list head, so that any operations on it would crash.
 90	 */
 91	list_del(&lruvec->lists[LRU_UNEVICTABLE]);
 92
 93	lru_gen_init_lruvec(lruvec);
 94}
 95
 96#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
 97int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
 98{
 99	unsigned long old_flags, flags;
100	int last_cpupid;
101
102	old_flags = READ_ONCE(folio->flags);
103	do {
104		flags = old_flags;
105		last_cpupid = (flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
106
107		flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
108		flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
109	} while (unlikely(!try_cmpxchg(&folio->flags, &old_flags, flags)));
110
111	return last_cpupid;
112}
113#endif
v3.15
 
  1/*
  2 * linux/mm/mmzone.c
  3 *
  4 * management codes for pgdats, zones and page flags
  5 */
  6
  7
  8#include <linux/stddef.h>
  9#include <linux/mm.h>
 10#include <linux/mmzone.h>
 11
 12struct pglist_data *first_online_pgdat(void)
 13{
 14	return NODE_DATA(first_online_node);
 15}
 16
 17struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
 18{
 19	int nid = next_online_node(pgdat->node_id);
 20
 21	if (nid == MAX_NUMNODES)
 22		return NULL;
 23	return NODE_DATA(nid);
 24}
 25
 26/*
 27 * next_zone - helper magic for for_each_zone()
 28 */
 29struct zone *next_zone(struct zone *zone)
 30{
 31	pg_data_t *pgdat = zone->zone_pgdat;
 32
 33	if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
 34		zone++;
 35	else {
 36		pgdat = next_online_pgdat(pgdat);
 37		if (pgdat)
 38			zone = pgdat->node_zones;
 39		else
 40			zone = NULL;
 41	}
 42	return zone;
 43}
 44
 45static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
 46{
 47#ifdef CONFIG_NUMA
 48	return node_isset(zonelist_node_idx(zref), *nodes);
 49#else
 50	return 1;
 51#endif /* CONFIG_NUMA */
 52}
 53
 54/* Returns the next zone at or below highest_zoneidx in a zonelist */
 55struct zoneref *next_zones_zonelist(struct zoneref *z,
 56					enum zone_type highest_zoneidx,
 57					nodemask_t *nodes,
 58					struct zone **zone)
 59{
 60	/*
 61	 * Find the next suitable zone to use for the allocation.
 62	 * Only filter based on nodemask if it's set
 63	 */
 64	if (likely(nodes == NULL))
 65		while (zonelist_zone_idx(z) > highest_zoneidx)
 66			z++;
 67	else
 68		while (zonelist_zone_idx(z) > highest_zoneidx ||
 69				(z->zone && !zref_in_nodemask(z, nodes)))
 70			z++;
 71
 72	*zone = zonelist_zone(z);
 73	return z;
 74}
 75
 76#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
 77int memmap_valid_within(unsigned long pfn,
 78					struct page *page, struct zone *zone)
 79{
 80	if (page_to_pfn(page) != pfn)
 81		return 0;
 82
 83	if (page_zone(page) != zone)
 84		return 0;
 85
 86	return 1;
 87}
 88#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
 89
 90void lruvec_init(struct lruvec *lruvec)
 91{
 92	enum lru_list lru;
 93
 94	memset(lruvec, 0, sizeof(struct lruvec));
 
 
 95
 96	for_each_lru(lru)
 97		INIT_LIST_HEAD(&lruvec->lists[lru]);
 
 
 
 
 
 
 
 
 
 98}
 99
100#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
101int page_cpupid_xchg_last(struct page *page, int cpupid)
102{
103	unsigned long old_flags, flags;
104	int last_cpupid;
105
 
106	do {
107		old_flags = flags = page->flags;
108		last_cpupid = page_cpupid_last(page);
109
110		flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
111		flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
112	} while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
113
114	return last_cpupid;
115}
116#endif