1
/*********************************************************
2
* Copyright (C) 2002 VMware, Inc. All rights reserved.
4
* This program is free software; you can redistribute it and/or modify it
5
* under the terms of the GNU General Public License as published by the
6
* Free Software Foundation version 2 and no later version.
8
* This program is distributed in the hope that it will be useful, but
9
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
10
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* You should have received a copy of the GNU General Public License along
14
* with this program; if not, write to the Free Software Foundation, Inc.,
15
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17
*********************************************************/
19
#ifndef __COMPAT_MM_H__
20
# define __COMPAT_MM_H__
26
/* The get_page() API appeared in 2.3.7 --hpreg */
27
/* Sometime during development it became function instead of macro --petr */
28
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) && !defined(get_page)
29
# define get_page(_page) atomic_inc(&(_page)->count)
30
/* The __free_page() API is exported in 2.1.67 --hpreg */
31
# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 67)
32
# define put_page __free_page
34
# include "compat_page.h"
36
# define page_to_phys(_page) (page_to_pfn(_page) << PAGE_SHIFT)
37
# define put_page(_page) free_page(page_to_phys(_page))
42
/* page_count() is 2.4.0 invention. Unfortunately unavailable in some RedHat
43
* kernels (for example 2.4.21-4-RHEL3). */
44
/* It is function since 2.6.0, and hopefully RedHat will not play silly games
45
* with mm_inline.h again... */
46
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(page_count)
47
# define page_count(page) atomic_read(&(page)->count)
51
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
52
# define compat_vm_pgoff(vma) ((vma)->vm_offset >> PAGE_SHIFT)
54
static inline unsigned long compat_do_mmap_pgoff(struct file *file, unsigned long addr,
55
unsigned long len, unsigned long prot,
56
unsigned long flag, unsigned long pgoff)
58
unsigned long ret = -EINVAL;
60
if (pgoff < 1 << (32 - PAGE_SHIFT)) {
61
ret = do_mmap(file, addr, len, prot, flag, pgoff << PAGE_SHIFT);
67
# define compat_vm_pgoff(vma) (vma)->vm_pgoff
69
# define compat_do_mmap_pgoff(f, a, l, p, g, o) \
70
do_mmap_pgoff(current->mm, f, a, l, p, g, o)
72
# define compat_do_mmap_pgoff(f, a, l, p, g, o) \
73
do_mmap_pgoff(f, a, l, p, g, o)
78
/* 2.2.x uses 0 instead of some define */
80
#define NOPAGE_SIGBUS (0)
84
/* 2.2.x does not have HIGHMEM support */
86
#define GFP_HIGHUSER (GFP_USER)
90
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
92
#include "compat_page.h"
94
static inline struct page * alloc_pages(unsigned int gfp_mask, unsigned int order)
98
addr = __get_free_pages(gfp_mask, order);
102
return virt_to_page(addr);
104
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
109
* In 2.4.14, the logic behind the UnlockPage macro was moved to the
110
* unlock_page() function. Later (in 2.5.12), the UnlockPage macro was removed
111
* altogether, and nowadays everyone uses unlock_page().
113
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 14)
114
#define compat_unlock_page(page) UnlockPage(page)
116
#define compat_unlock_page(page) unlock_page(page)
120
* In 2.4.10, vmtruncate was changed from returning void to returning int.
122
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10)
123
#define compat_vmtruncate(inode, size) \
126
vmtruncate(inode, size); \
130
#define compat_vmtruncate(inode, size) vmtruncate(inode, size)
134
#endif /* __COMPAT_MM_H__ */