Separate all the MM types that are embedded directly in 'struct task_struct'
into the new <linux/mm_types_task.h> header.

The goal is to include this header in <linux/sched.h>, not the full 
<linux/mm_types.h>
header, to reduce the size, complexity and coupling of <linux/sched.h>.

(This patch does not change <linux/sched.h> yet.)

Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Mike Galbraith <efa...@gmx.de>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 include/linux/mm_types.h      | 55 
+++--------------------------------------------------
 include/linux/mm_types_task.h | 65 
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 68 insertions(+), 52 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index a2696980a7bf..6083670bbbec 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1,9 +1,9 @@
 #ifndef _LINUX_MM_TYPES_H
 #define _LINUX_MM_TYPES_H
 
+#include <linux/mm_types_task.h>
+
 #include <linux/auxvec.h>
-#include <linux/types.h>
-#include <linux/threads.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/rbtree.h>
@@ -13,7 +13,7 @@
 #include <linux/uprobes.h>
 #include <linux/page-flags-layout.h>
 #include <linux/workqueue.h>
-#include <asm/page.h>
+
 #include <asm/mmu.h>
 
 #ifndef AT_VECTOR_SIZE_ARCH
@@ -24,11 +24,6 @@
 struct address_space;
 struct mem_cgroup;
 
-#define USE_SPLIT_PTE_PTLOCKS  (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
-#define USE_SPLIT_PMD_PTLOCKS  (USE_SPLIT_PTE_PTLOCKS && \
-               IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
-#define ALLOC_SPLIT_PTLOCKS    (SPINLOCK_SIZE > BITS_PER_LONG/8)
-
 /*
  * Each physical page in the system has a struct page associated with
  * it to keep track of whatever it is we are using the page for at the
@@ -231,17 +226,6 @@ struct page {
 #endif
 ;
 
-struct page_frag {
-       struct page *page;
-#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
-       __u32 offset;
-       __u32 size;
-#else
-       __u16 offset;
-       __u16 size;
-#endif
-};
-
 #define PAGE_FRAG_CACHE_MAX_SIZE       __ALIGN_MASK(32768, ~PAGE_MASK)
 #define PAGE_FRAG_CACHE_MAX_ORDER      get_order(PAGE_FRAG_CACHE_MAX_SIZE)
 
@@ -360,18 +344,6 @@ struct vm_area_struct {
        struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
 };
 
-/*
- * The per task VMA cache array:
- */
-#define VMACACHE_BITS 2
-#define VMACACHE_SIZE (1U << VMACACHE_BITS)
-#define VMACACHE_MASK (VMACACHE_SIZE - 1)
-
-struct vmacache {
-       u32 seqnum;
-       struct vm_area_struct *vmas[VMACACHE_SIZE];
-};
-
 struct core_thread {
        struct task_struct *task;
        struct core_thread *next;
@@ -383,27 +355,6 @@ struct core_state {
        struct completion startup;
 };
 
-enum {
-       MM_FILEPAGES,   /* Resident file mapping pages */
-       MM_ANONPAGES,   /* Resident anonymous pages */
-       MM_SWAPENTS,    /* Anonymous swap entries */
-       MM_SHMEMPAGES,  /* Resident shared memory pages */
-       NR_MM_COUNTERS
-};
-
-#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
-#define SPLIT_RSS_COUNTING
-/* per-thread cached information, */
-struct task_rss_stat {
-       int events;     /* for synchronization threshold */
-       int count[NR_MM_COUNTERS];
-};
-#endif /* USE_SPLIT_PTE_PTLOCKS */
-
-struct mm_rss_stat {
-       atomic_long_t count[NR_MM_COUNTERS];
-};
-
 struct kioctx_table;
 struct mm_struct {
        struct vm_area_struct *mmap;            /* list of VMAs */
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
new file mode 100644
index 000000000000..9526d8b9fe0e
--- /dev/null
+++ b/include/linux/mm_types_task.h
@@ -0,0 +1,65 @@
+#ifndef _LINUX_MM_TYPES_TASK_H
+#define _LINUX_MM_TYPES_TASK_H
+
+/*
+ * Here are the definitions of the MM data types that are embedded in 'struct 
task_struct'.
+ *
+ * (These are defined separately to decouple sched.h from mm_types.h as much 
as possible.)
+ */
+
+#include <linux/types.h>
+#include <linux/threads.h>
+#include <linux/atomic.h>
+
+#include <asm/page.h>
+
+#define USE_SPLIT_PTE_PTLOCKS  (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
+#define USE_SPLIT_PMD_PTLOCKS  (USE_SPLIT_PTE_PTLOCKS && \
+               IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
+#define ALLOC_SPLIT_PTLOCKS    (SPINLOCK_SIZE > BITS_PER_LONG/8)
+
+/*
+ * The per task VMA cache array:
+ */
+#define VMACACHE_BITS 2
+#define VMACACHE_SIZE (1U << VMACACHE_BITS)
+#define VMACACHE_MASK (VMACACHE_SIZE - 1)
+
+struct vmacache {
+       u32 seqnum;
+       struct vm_area_struct *vmas[VMACACHE_SIZE];
+};
+
+enum {
+       MM_FILEPAGES,   /* Resident file mapping pages */
+       MM_ANONPAGES,   /* Resident anonymous pages */
+       MM_SWAPENTS,    /* Anonymous swap entries */
+       MM_SHMEMPAGES,  /* Resident shared memory pages */
+       NR_MM_COUNTERS
+};
+
+#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
+#define SPLIT_RSS_COUNTING
+/* per-thread cached information, */
+struct task_rss_stat {
+       int events;     /* for synchronization threshold */
+       int count[NR_MM_COUNTERS];
+};
+#endif /* USE_SPLIT_PTE_PTLOCKS */
+
+struct mm_rss_stat {
+       atomic_long_t count[NR_MM_COUNTERS];
+};
+
+struct page_frag {
+       struct page *page;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+       __u32 offset;
+       __u32 size;
+#else
+       __u16 offset;
+       __u16 size;
+#endif
+};
+
+#endif /* _LINUX_MM_TYPES_TASK_H */
-- 
2.7.4

Reply via email to