// file: include/linux/mm_types.h
...
struct page {unsigned long flags; /* Atomic flags, some possibly* updated asynchronously *//** Five words (20/40 bytes) are available in this union.* WARNING: bit 0 of the first word is used for PageTail(). That* means the other users of this union MUST NOT use the bit to* avoid collision and false-positive PageTail().*/union {struct { /* Page cache and anonymous pages *//*** @lru: Pageout list, eg. active_list protected by* pgdat->lru_lock. Sometimes used as a generic list* by the page owner.*/struct list_head lru;/* See page-flags.h for PAGE_MAPPING_FLAGS */struct address_space *mapping;pgoff_t index; /* Our offset within mapping. *//*** @private: Mapping-private opaque data.* Usually used for buffer_heads if PagePrivate.* Used for swp_entry_t if PageSwapCache.* Indicates order in the buddy system if PageBuddy.*/unsigned long private;};....
} _struct_page_alignment;
占用空间大小
// file: mm/page_alloc.c
static unsigned long __init calc_memmap_size(unsigned long spanned_pages, unsigned long present_pages)
{ unsigned long pages = spanned_pages;/* * Provide a more accurate estimation if there are holes within* the zone and SPARSEMEM is in use. If there are holes within the* zone, each populated memory region may cost us one or two extra* memmap pages due to alignment because memmap pages for each* populated regions may not be naturally aligned on page boundary.* So the (present_pages >> 4) heuristic is a tradeoff for that.*/if (spanned_pages > present_pages + (present_pages >> 4) &&IS_ENABLED(CONFIG_SPARSEMEM))pages = present_pages;return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
}static void __init free_area_init_core(struct pglist_data *pgdat)
{enum zone_type j;int nid = pgdat->node_id;pgdat_init_internals(pgdat);pgdat->per_cpu_nodestats = &boot_nodestats;for (j = 0; j < MAX_NR_ZONES; j++) {struct zone *zone = pgdat->node_zones + j;unsigned long size, freesize, memmap_pages;unsigned long zone_start_pfn = zone->zone_start_pfn;size = zone->spanned_pages;freesize = zone->present_pages;/** Adjust freesize so that it accounts for how much memory* is used by this zone for memmap. This affects the watermark* and per-cpu initialisations*/memmap_pages = calc_memmap_size(size, freesize);if (!is_highmem_idx(j)) { if (freesize >= memmap_pages) {freesize -= memmap_pages;if (memmap_pages)printk(KERN_DEBUG" %s zone: %lu pages used for memmap\n",zone_names[j], memmap_pages);} elsepr_warn(" %s zone: %lu pages exceeds freesize %lu\n",zone_names[j], memmap_pages, freesize);}....}
}