Bug Summary

File:afs/FBSD/osi_vm.c
Location:line 157, column 40
Description:Access to field 'flags' results in a dereference of a null pointer (loaded from variable 'obj')

Annotated Source Code

1/*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
9
10
11/* osi_vm.c implements:
12 *
13 * osi_VM_FlushVCache(avc, slept)
14 * osi_ubc_flush_dirty_and_wait(vp, flags)
15 * osi_VM_StoreAllSegments(avc)
16 * osi_VM_TryToSmush(avc, acred, sync)
17 * osi_VM_FlushPages(avc, credp)
18 * osi_VM_Truncate(avc, alen, acred)
19 */
20
21#include <afsconfig.h>
22#include "afs/param.h"
23#include <sys/param.h>
24#include <sys/vnode.h>
25
26
27#include "afs/sysincludes.h" /* Standard vendor system headers */
28#include "afsincludes.h" /* Afs-based standard headers */
29#include "afs/afs_stats.h" /* statistics */
30#include <vm/vm_object.h>
31#include <vm/vm_map.h>
32#include <limits.h>
33
34/*
35 * FreeBSD implementation notes:
36 * Most of these operations require us to frob vm_objects. Most
37 * functions require that the object be locked (with VM_OBJECT_LOCK)
38 * on entry and leave it locked on exit. In order to get the
39 * vm_object itself we call VOP_GETVOBJECT on the vnode; the
40 * locking protocol requires that we do so with the heavy vnode lock
41 * held and the vnode interlock unlocked, and it returns the same
42 * way.
43 *
44 * The locking protocol for vnodes is defined in
45 * kern/vnode_if.src and sys/vnode.h; unfortunately, it is not *quite*
46 * constant from version to version so to be properly correct we must
47 * check the VCS history of those files.
48 */
49
50#ifdef AFS_FBSD60_ENV1
51#define VOP_GETVOBJECT(vp, objp)(*(objp) = (vp)->v_bufobj.bo_object) (*(objp) = (vp)->v_objectv_bufobj.bo_object)
52#endif
53
54#if defined(AFS_FBSD80_ENV1)
55#define lock_vnode(v, f)_vn_lock((v), (f), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 55)
vn_lock((v), (f))_vn_lock((v), (f), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 55)
56#define ilock_vnode(v)_vn_lock((v), 0x000100|0x080000|0x000400, "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 56);
vn_lock((v), LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY)_vn_lock((v), 0x000100|0x080000|0x000400, "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 56)
;
57#define unlock_vnode(v)VOP_UNLOCK((v), 0) VOP_UNLOCK((v), 0)
58#define islocked_vnode(v)VOP_ISLOCKED((v)) VOP_ISLOCKED((v))
59#else
60#define lock_vnode(v, f)_vn_lock((v), (f), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 60)
vn_lock((v), (f), curthread)
61#define ilock_vnode(v)_vn_lock((v), 0x000100|0x080000|0x000400, "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 61);
vn_lock((v), LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY, curthread);
62#define unlock_vnode(v)VOP_UNLOCK((v), 0) VOP_UNLOCK((v), 0, curthread(__curthread()))
63#define islocked_vnode(v)VOP_ISLOCKED((v)) VOP_ISLOCKED((v), curthread(__curthread()))
64#endif
65
66/* Try to discard pages, in order to recycle a vcache entry.
67 *
68 * We also make some sanity checks: ref count, open count, held locks.
69 *
70 * We also do some non-VM-related chores, such as releasing the cred pointer
71 * (for AIX and Solaris) and releasing the gnode (for AIX).
72 *
73 * Locking: afs_xvcache lock is held. If it is dropped and re-acquired,
74 * *slept should be set to warn the caller.
75 *
76 * Formerly, afs_xvcache was dropped and re-acquired for Solaris, but now it
77 * is not dropped and re-acquired for any platform. It may be that *slept is
78 * therefore obsolescent.
79 *
80 */
81int
82osi_VM_FlushVCache(struct vcache *avc, int *slept)
83{
84 struct vnode *vp = AFSTOV(avc)((avc)->v);
85
86 if (!VI_TRYLOCK(vp)_mtx_trylock(((&(vp)->v_interlock)), (0), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 86)
) /* need interlock to check usecount */
87 return EBUSY16;
88
89 if (vp->v_usecount > 0) {
90 VI_UNLOCK(vp)_mtx_unlock_flags(((&(vp)->v_interlock)), (0), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 90)
;
91 return EBUSY16;
92 }
93
94 /* XXX
95 * The value of avc->opens here came to be, at some point,
96 * typically -1. This was caused by incorrectly performing afs_close
97 * processing on vnodes being recycled */
98 if (avc->opens) {
99 VI_UNLOCK(vp)_mtx_unlock_flags(((&(vp)->v_interlock)), (0), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 99)
;
100 return EBUSY16;
101 }
102
103 /* if a lock is held, give up */
104 if (CheckLock(&avc->lock)((&avc->lock)->excl_locked? (int) -1 : (int) (&
avc->lock)->readers_reading)
) {
105 VI_UNLOCK(vp)_mtx_unlock_flags(((&(vp)->v_interlock)), (0), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 105)
;
106 return EBUSY16;
107 }
108
109 if ((vp->v_iflag & VI_DOOMED0x0080) != 0) {
110 VI_UNLOCK(vp)_mtx_unlock_flags(((&(vp)->v_interlock)), (0), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 110)
;
111 return (0);
112 }
113
114 /* must hold the vnode before calling vgone()
115 * This code largely copied from vfs_subr.c:vlrureclaim() */
116 vholdl(vp);
117 AFS_GUNLOCK()do { (void)0; _mtx_unlock_flags(((&afs_global_mtx)), (0),
"/home/wollman/openafs/src/afs/FBSD/osi_vm.c", 117); } while
(0)
;
118 *slept = 1;
119 /* use the interlock while locking, so no one else can DOOM this */
120 ilock_vnode(vp)_vn_lock((vp), 0x000100|0x080000|0x000400, "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 120);
;
121 vgone(vp);
122 unlock_vnode(vp)VOP_UNLOCK((vp), 0);
123 vdrop(vp);
124
125 AFS_GLOCK()do { (void)0; _mtx_lock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 125); (void)0; } while (0)
;
126 return 0;
127}
128
129/* Try to store pages to cache, in order to store a file back to the server.
130 *
131 * Locking: the vcache entry's lock is held. It will usually be dropped and
132 * re-obtained.
133 */
134void
135osi_VM_StoreAllSegments(struct vcache *avc)
136{
137 struct vnode *vp;
138 struct vm_object *obj;
139 int anyio, tries;
140
141 ReleaseWriteLock(&avc->lock)do { ; (&avc->lock)->excl_locked &= ~2; if ((&
avc->lock)->wait_states) Afs_Lock_ReleaseR(&avc->
lock); (&avc->lock)->pid_writer=0; } while (0)
;
142 AFS_GUNLOCK()do { (void)0; _mtx_unlock_flags(((&afs_global_mtx)), (0),
"/home/wollman/openafs/src/afs/FBSD/osi_vm.c", 142); } while
(0)
;
143 tries = 5;
144 vp = AFSTOV(avc)((avc)->v);
145
146 /*
147 * I don't understand this. Why not just call vm_object_page_clean()
148 * and be done with it? I particularly don't understand why we're calling
149 * vget() here. Is there some reason to believe that the vnode might
150 * be being recycled at this point? I don't think there's any need for
151 * this loop, either -- if we keep the vnode locked all the time,
152 * that and the object lock will prevent any new pages from appearing.
153 * The loop is what causes the race condition. -GAW
154 */
155 do {
156 anyio = 0;
157 if (VOP_GETVOBJECT(vp, &obj)(*(&obj) = (vp)->v_bufobj.bo_object) == 0 && (obj->flags & OBJ_MIGHTBEDIRTY0x0100)) {
Access to field 'flags' results in a dereference of a null pointer (loaded from variable 'obj')
158 if (!vget(vp, LK_EXCLUSIVE0x080000 | LK_RETRY0x000400, curthread(__curthread()))) {
159 if (VOP_GETVOBJECT(vp, &obj)(*(&obj) = (vp)->v_bufobj.bo_object) == 0) {
160 VM_OBJECT_LOCK(obj)_mtx_lock_flags(((&(obj)->mtx)), (0), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 160)
;
161 vm_object_page_clean(obj, 0, 0, OBJPC_SYNC0x1);
162 VM_OBJECT_UNLOCK(obj)_mtx_unlock_flags(((&(obj)->mtx)), (0), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 162)
;
163 anyio = 1;
164 }
165 vput(vp);
166 }
167 }
168 } while (anyio && (--tries > 0));
169 AFS_GLOCK()do { (void)0; _mtx_lock_flags(((&afs_global_mtx)), (0), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 169); (void)0; } while (0)
;
170 ObtainWriteLock(&avc->lock, 94)do { ; if (!(&avc->lock)->excl_locked && !(
&avc->lock)->readers_reading) (&avc->lock) ->
excl_locked = 2; else Afs_Lock_Obtain(&avc->lock, 2);
(&avc->lock)->pid_writer = (((__curthread())->td_proc
)->p_pid ); (&avc->lock)->src_indicator = 94; } while
(0)
;
171}
172
173/* Try to invalidate pages, for "fs flush" or "fs flushv"; or
174 * try to free pages, when deleting a file.
175 *
176 * Locking: the vcache entry's lock is held. It may be dropped and
177 * re-obtained.
178 *
179 * Since we drop and re-obtain the lock, we can't guarantee that there won't
180 * be some pages around when we return, newly created by concurrent activity.
181 */
182void
183osi_VM_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync)
184{
185 struct vnode *vp;
186 int tries, code;
187 int islocked;
188
189 vp = AFSTOV(avc)((avc)->v);
190
191 VI_LOCK(vp)_mtx_lock_flags(((&(vp)->v_interlock)), (0), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 191)
;
192 if (vp->v_iflag & VI_DOOMED0x0080) {
193 VI_UNLOCK(vp)_mtx_unlock_flags(((&(vp)->v_interlock)), (0), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 193)
;
194 return;
195 }
196 VI_UNLOCK(vp)_mtx_unlock_flags(((&(vp)->v_interlock)), (0), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 196)
;
197
198 islocked = islocked_vnode(vp)VOP_ISLOCKED((vp));
199 if (islocked == LK_EXCLOTHER0x040000)
200 panic("Trying to Smush over someone else's lock");
201 else if (islocked == LK_SHARED0x200000) {
202 afs_warn("Trying to Smush with a shared lock");
203 lock_vnode(vp, LK_UPGRADE)_vn_lock((vp), (0x400000), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 203)
;
204 } else if (!islocked)
205 lock_vnode(vp, LK_EXCLUSIVE)_vn_lock((vp), (0x080000), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 205)
;
206
207 if (vp->v_bufobj.bo_object != NULL((void *)0)) {
208 VM_OBJECT_LOCK(vp->v_bufobj.bo_object)_mtx_lock_flags(((&(vp->v_bufobj.bo_object)->mtx)),
(0), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c", 208)
;
209 /*
210 * Do we really want OBJPC_SYNC? OBJPC_INVAL would be
211 * faster, if invalidation is really what we are being
212 * asked to do. (It would make more sense, too, since
213 * otherwise this function is practically identical to
214 * osi_VM_StoreAllSegments().) -GAW
215 */
216
217 /*
218 * Dunno. We no longer resemble osi_VM_StoreAllSegments,
219 * though maybe that's wrong, now. And OBJPC_SYNC is the
220 * common thing in 70 file systems, it seems. Matt.
221 */
222
223 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC0x1);
224 VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object)_mtx_unlock_flags(((&(vp->v_bufobj.bo_object)->mtx)
), (0), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c", 224)
;
225 }
226
227 tries = 5;
228 code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0)vinvalbuf((vp), (0x0001), (0x100), (0));
229 while (code && (tries > 0)) {
230 afs_warn("TryToSmush retrying vinvalbuf");
231 code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0)vinvalbuf((vp), (0x0001), (0x100), (0));
232 --tries;
233 }
234 if (islocked == LK_SHARED0x200000)
235 lock_vnode(vp, LK_DOWNGRADE)_vn_lock((vp), (0x010000), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 235)
;
236 else if (!islocked)
237 unlock_vnode(vp)VOP_UNLOCK((vp), 0);
238}
239
240/* Purge VM for a file when its callback is revoked.
241 *
242 * Locking: No lock is held, not even the global lock.
243 */
244void
245osi_VM_FlushPages(struct vcache *avc, afs_ucred_t *credp)
246{
247 struct vnode *vp;
248 struct vm_object *obj;
249
250 vp = AFSTOV(avc)((avc)->v);
251 ASSERT_VOP_LOCKED(vp, __func__)((void)0);
252 if (VOP_GETVOBJECT(vp, &obj)(*(&obj) = (vp)->v_bufobj.bo_object) == 0) {
253 VM_OBJECT_LOCK(obj)_mtx_lock_flags(((&(obj)->mtx)), (0), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 253)
;
254 vm_object_page_remove(obj, 0, 0, FALSE0);
255 VM_OBJECT_UNLOCK(obj)_mtx_unlock_flags(((&(obj)->mtx)), (0), "/home/wollman/openafs/src/afs/FBSD/osi_vm.c"
, 255)
;
256 }
257 osi_vinvalbuf(vp, 0, 0, 0)vinvalbuf((vp), (0), (0), (0));
258}
259
260/* Purge pages beyond end-of-file, when truncating a file.
261 *
262 * Locking: no lock is held, not even the global lock.
263 * activeV is raised. This is supposed to block pageins, but at present
264 * it only works on Solaris.
265 */
266void
267osi_VM_Truncate(struct vcache *avc, int alen, afs_ucred_t *acred)
268{
269 vnode_pager_setsize(AFSTOV(avc)((avc)->v), alen);
270}