~ubuntu-branches/ubuntu/edgy/rpm/edgy

« back to all changes in this revision

Viewing changes to db/mp/mp_alloc.c

  • Committer: Bazaar Package Importer
  • Author(s): Joey Hess
  • Date: 2002-01-22 20:56:57 UTC
  • Revision ID: james.westby@ubuntu.com-20020122205657-l74j50mr9z8ofcl5
Tags: upstream-4.0.3
ImportĀ upstreamĀ versionĀ 4.0.3

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/*-
 
2
 * See the file LICENSE for redistribution information.
 
3
 *
 
4
 * Copyright (c) 1996-2001
 
5
 *      Sleepycat Software.  All rights reserved.
 
6
 */
 
7
#include "db_config.h"
 
8
 
 
9
#ifndef lint
 
10
static const char revid[] = "$Id: mp_alloc.c,v 11.14 2001/05/10 21:25:22 bostic Exp $";
 
11
#endif /* not lint */
 
12
 
 
13
#ifndef NO_SYSTEM_INCLUDES
 
14
#include <sys/types.h>
 
15
#endif
 
16
 
 
17
#include "db_int.h"
 
18
#include "db_shash.h"
 
19
#include "mp.h"
 
20
 
 
21
/*
 
22
 * __memp_alloc --
 
23
 *      Allocate some space from a cache region.
 
24
 *
 
25
 * PUBLIC: int __memp_alloc __P((DB_MPOOL *,
 
26
 * PUBLIC:     REGINFO *, MPOOLFILE *, size_t, roff_t *, void *));
 
27
 */
 
28
int
 
29
__memp_alloc(dbmp, memreg, mfp, len, offsetp, retp)
 
30
        DB_MPOOL *dbmp;
 
31
        REGINFO *memreg;
 
32
        MPOOLFILE *mfp;
 
33
        size_t len;
 
34
        roff_t *offsetp;
 
35
        void *retp;
 
36
{
 
37
        BH *bhp, *nbhp;
 
38
        MPOOL *c_mp;
 
39
        MPOOLFILE *bh_mfp;
 
40
        int nomore, restart, ret, wrote;
 
41
        u_int32_t failed_writes, pages_reviewed;
 
42
        size_t total;
 
43
        void *p;
 
44
 
 
45
        c_mp = memreg->primary;
 
46
 
 
47
        failed_writes = 0;
 
48
 
 
49
        /*
 
50
         * If we're allocating a buffer, and the one we're discarding is the
 
51
         * same size, we don't want to waste the time to re-integrate it into
 
52
         * the shared memory free list.  If the DB_MPOOLFILE argument isn't
 
53
         * NULL, we'll compare the underlying page sizes of the two buffers
 
54
         * before free-ing and re-allocating buffers.
 
55
         */
 
56
        if (mfp != NULL)
 
57
                len = (sizeof(BH) - sizeof(u_int8_t)) + mfp->stat.st_pagesize;
 
58
 
 
59
        nomore = 0;
 
60
alloc:  if ((ret = __db_shalloc(memreg->addr, len, MUTEX_ALIGN, &p)) == 0) {
 
61
                if (offsetp != NULL)
 
62
                        *offsetp = R_OFFSET(memreg, p);
 
63
                *(void **)retp = p;
 
64
                return (0);
 
65
        }
 
66
        if (nomore == 1) {
 
67
                /*
 
68
                 * Things are really bad, let's try to sync the mpool.
 
69
                 * This will force any queue extent pages out.
 
70
                 * While it could be that we just don't have enough
 
71
                 * space for what we want, and this is rather expensive,
 
72
                 * we are about to fail, so, why not.
 
73
                 */
 
74
                R_UNLOCK(dbmp->dbenv, dbmp->reginfo);
 
75
                ret = memp_sync(dbmp->dbenv, NULL);
 
76
                R_LOCK(dbmp->dbenv, dbmp->reginfo);
 
77
                if (ret == DB_INCOMPLETE || ret == EIO)
 
78
                        ret = 0;
 
79
                else if (ret != 0)
 
80
                        return (ret);
 
81
        } else if (nomore == 2) {
 
82
                __db_err(dbmp->dbenv,
 
83
            "Unable to allocate %lu bytes from mpool shared region: %s",
 
84
                    (u_long)len, db_strerror(ret));
 
85
                return (ret);
 
86
        }
 
87
 
 
88
retry:  /* Find a buffer we can flush; pure LRU. */
 
89
        total = 0;
 
90
        restart = 0;
 
91
        pages_reviewed = 0;
 
92
        for (bhp =
 
93
            SH_TAILQ_FIRST(&c_mp->bhq, __bh); bhp != NULL; bhp = nbhp) {
 
94
                nbhp = SH_TAILQ_NEXT(bhp, q, __bh);
 
95
 
 
96
                ++pages_reviewed;
 
97
 
 
98
                /* Ignore pinned or locked (I/O in progress) buffers. */
 
99
                if (bhp->ref != 0 || F_ISSET(bhp, BH_LOCKED))
 
100
                        continue;
 
101
 
 
102
                /* Find the associated MPOOLFILE. */
 
103
                bh_mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
 
104
 
 
105
                /* Write the page if it's dirty. */
 
106
                if (F_ISSET(bhp, BH_DIRTY)) {
 
107
                        ++bhp->ref;
 
108
                        ret = __memp_bhwrite(dbmp,
 
109
                            bh_mfp, bhp, 0, &restart, &wrote);
 
110
                        --bhp->ref;
 
111
 
 
112
                        if (ret != 0) {
 
113
                                /*
 
114
                                 * Count the number of writes that have
 
115
                                 * failed.  If the number of writes that
 
116
                                 * have failed, total, plus the number
 
117
                                 * of pages we've reviewed on this pass
 
118
                                 * equals the number of buffers there
 
119
                                 * currently are, we've most likely
 
120
                                 * run out of buffers that are going to
 
121
                                 * succeed, and it's time to fail.
 
122
                                 * (We chuck failing buffers to the
 
123
                                 * end of the list.) [#0637]
 
124
                                 */
 
125
                                failed_writes++;
 
126
                                if (failed_writes + pages_reviewed >=
 
127
                                    c_mp->stat.st_page_dirty +
 
128
                                    c_mp->stat.st_page_clean)
 
129
                                        return (ret);
 
130
 
 
131
                                /*
 
132
                                 * Otherwise, relocate this buffer
 
133
                                 * to the end of the LRU queue
 
134
                                 * so we're less likely to encounter
 
135
                                 * it again, and try again.
 
136
                                 */
 
137
                                SH_TAILQ_REMOVE(&c_mp->bhq, bhp, q, __bh);
 
138
                                SH_TAILQ_INSERT_TAIL(&c_mp->bhq, bhp, q);
 
139
                                goto retry;
 
140
                        }
 
141
 
 
142
                        /*
 
143
                         * Another process may have acquired this buffer and
 
144
                         * incremented the ref count after we wrote it.
 
145
                         */
 
146
                        if (bhp->ref != 0)
 
147
                                goto retry;
 
148
 
 
149
                        /*
 
150
                         * If we wrote the page, continue and free the buffer.
 
151
                         * We don't have to rewalk the list to acquire the
 
152
                         * buffer because it was never available for any other
 
153
                         * process to modify it.
 
154
                         *
 
155
                         * If we didn't write the page, but we discarded and
 
156
                         * reacquired the region lock, restart the list walk.
 
157
                         *
 
158
                         * If we neither wrote the buffer nor discarded the
 
159
                         * region lock, continue down the buffer list.
 
160
                         */
 
161
                        if (wrote)
 
162
                                ++c_mp->stat.st_rw_evict;
 
163
                        else {
 
164
                                if (restart)
 
165
                                        goto retry;
 
166
                                continue;
 
167
                        }
 
168
                } else
 
169
                        ++c_mp->stat.st_ro_evict;
 
170
 
 
171
                /*
 
172
                 * Check to see if the buffer is the size we're looking for.
 
173
                 * If it is, simply reuse it.
 
174
                 */
 
175
                if (mfp != NULL &&
 
176
                    mfp->stat.st_pagesize == bh_mfp->stat.st_pagesize) {
 
177
                        __memp_bhfree(dbmp, bhp, 0);
 
178
 
 
179
                        if (offsetp != NULL)
 
180
                                *offsetp = R_OFFSET(memreg, bhp);
 
181
                        *(void **)retp = bhp;
 
182
                        return (0);
 
183
                }
 
184
 
 
185
                /* Note how much space we've freed, and free the buffer. */
 
186
                total += __db_shsizeof(bhp);
 
187
                __memp_bhfree(dbmp, bhp, 1);
 
188
 
 
189
                /*
 
190
                 * Retry as soon as we've freed up sufficient space.  If we
 
191
                 * have to coalesce of memory to satisfy the request, don't
 
192
                 * try until it's likely (possible?) that we'll succeed.
 
193
                 */
 
194
                if (total >= 3 * len)
 
195
                        goto alloc;
 
196
 
 
197
                /* Restart the walk if we discarded the region lock. */
 
198
                if (restart)
 
199
                        goto retry;
 
200
        }
 
201
        nomore++;
 
202
        goto alloc;
 
203
}