Bug Summary

File:afs/afs_axscache.c
Location:line 120, column 3
Description:Access to field 'next' results in a dereference of a null pointer (loaded from variable 'i')

Annotated Source Code

1/*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
9
10#include <afsconfig.h>
11#include "afs/param.h"
12
13
14#include "afs/sysincludes.h" /* Standard vendor system headers */
15#include "afsincludes.h" /* Afs-based standard headers */
16#include "afs/afs_stats.h" /* statistics */
17#include "afs/stds.h"
18static struct axscache *afs_axsfreelist = NULL((void *)0);
19
20#define NAXSs(1000 / sizeof(struct axscache)) (1000 / sizeof(struct axscache))
21static struct xfreelist {
22 struct xfreelist *next;
23 struct axscache data[NAXSs(1000 / sizeof(struct axscache))];
24} *xfreemallocs = 0;
25static int afs_xaxscnt = 0;
26afs_rwlock_t afs_xaxs;
27
28/* takes an address of an access cache & uid, returns ptr */
29/* PRECONDITION: first field has been checked and doesn't match!
30 * INVARIANT: isparent(i,j) ^ isparent(j,i) (ie, they switch around)
31 */
32struct axscache *
33afs_SlowFindAxs(struct axscache **cachep, afs_int32 id)
34{
35 struct axscache *i, *j;
36
37 j = (*cachep);
38 i = j->next;
39 while (i) {
40 if (i->uid == id) {
41 axs_Front(cachep, j, i){(j)->next = (i)->next; (i)->next= *(cachep);*(cachep
)=(i);}
; /* maintain LRU queue */
42 return (i);
43 }
44
45 if ((j = i->next)) { /* ASSIGNMENT HERE! */
46 if (j->uid == id) {
47 axs_Front(cachep, i, j){(i)->next = (j)->next; (j)->next= *(cachep);*(cachep
)=(j);}
;
48 return (j);
49 }
50 } else
51 return ((struct axscache *)NULL((void *)0));
52 i = j->next;
53 }
54 return ((struct axscache *)NULL((void *)0));
55}
56
57
58struct axscache *
59axs_Alloc(void)
60{
61 struct axscache *i, *j;
62 struct xfreelist *h, *xsp;
63 int k;
64
65 ObtainWriteLock(&afs_xaxs, 174)do { ; if (!(&afs_xaxs)->excl_locked && !(&
afs_xaxs)->readers_reading) (&afs_xaxs) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xaxs, 2); (&afs_xaxs)
->pid_writer = (((__curthread())->td_proc)->p_pid );
(&afs_xaxs)->src_indicator = 174; } while (0)
;
66 if ((i = afs_axsfreelist)) {
67 afs_axsfreelist = i->next;
68 ReleaseWriteLock(&afs_xaxs)do { ; (&afs_xaxs)->excl_locked &= ~2; if ((&afs_xaxs
)->wait_states) Afs_Lock_ReleaseR(&afs_xaxs); (&afs_xaxs
)->pid_writer=0; } while (0)
;
69 return i;
70 } else {
71 h = afs_osi_Alloc(sizeof(struct xfreelist));
72 osi_Assert(h != NULL)(void)((h != ((void *)0)) || (osi_AssertFailK( "h != NULL" , "/home/wollman/openafs/src/afs/afs_axscache.c"
, 72), 0))
;
73 afs_xaxscnt++;
74 xsp = xfreemallocs;
75 xfreemallocs = h;
76 xfreemallocs->next = xsp;
77 i = j = h->data;
78 for (k = 0; k < NAXSs(1000 / sizeof(struct axscache)) - 1; k++, i++) {
79 i->uid = -2;
80 i->axess = 0;
81 i->next = ++j; /* need j because order of evaluation not defined */
82 }
83 i->uid = -2;
84 i->axess = 0;
85 i->next = NULL((void *)0);
86 afs_axsfreelist = (h->data)->next;
87 }
88 ReleaseWriteLock(&afs_xaxs)do { ; (&afs_xaxs)->excl_locked &= ~2; if ((&afs_xaxs
)->wait_states) Afs_Lock_ReleaseR(&afs_xaxs); (&afs_xaxs
)->pid_writer=0; } while (0)
;
89 return (h->data);
90}
91
92
93#define axs_Free(axsp){ do { ; if (!(&afs_xaxs)->excl_locked && !(&
afs_xaxs)->readers_reading) (&afs_xaxs) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xaxs, 2); (&afs_xaxs)
->pid_writer = (((__curthread())->td_proc)->p_pid );
(&afs_xaxs)->src_indicator = 175; } while (0); axsp->
next = afs_axsfreelist; afs_axsfreelist = axsp; do { ; (&
afs_xaxs)->excl_locked &= ~2; if ((&afs_xaxs)->
wait_states) Afs_Lock_ReleaseR(&afs_xaxs); (&afs_xaxs
)->pid_writer=0; } while (0); }
{ \
94 ObtainWriteLock(&afs_xaxs,175)do { ; if (!(&afs_xaxs)->excl_locked && !(&
afs_xaxs)->readers_reading) (&afs_xaxs) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xaxs, 2); (&afs_xaxs)
->pid_writer = (((__curthread())->td_proc)->p_pid );
(&afs_xaxs)->src_indicator = 175; } while (0)
; \
95 axsp->next = afs_axsfreelist; \
96 afs_axsfreelist = axsp; \
97 ReleaseWriteLock(&afs_xaxs)do { ; (&afs_xaxs)->excl_locked &= ~2; if ((&afs_xaxs
)->wait_states) Afs_Lock_ReleaseR(&afs_xaxs); (&afs_xaxs
)->pid_writer=0; } while (0)
; \
98}
99
100
101/* I optimize for speed on lookup, and don't give a RIP about delete.
102 */
103void
104afs_RemoveAxs(struct axscache **headp, struct axscache *axsp)
105{
106 struct axscache *i, *j;
107
108 if (*headp && axsp) { /* is bullet-proofing really neccessary? */
1
Taking true branch
109 if (*headp == axsp) { /* most common case, I think */
2
Taking false branch
110 *headp = axsp->next;
111 axs_Free(axsp){ do { ; if (!(&afs_xaxs)->excl_locked && !(&
afs_xaxs)->readers_reading) (&afs_xaxs) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xaxs, 2); (&afs_xaxs)
->pid_writer = (((__curthread())->td_proc)->p_pid );
(&afs_xaxs)->src_indicator = 175; } while (0); axsp->
next = afs_axsfreelist; afs_axsfreelist = axsp; do { ; (&
afs_xaxs)->excl_locked &= ~2; if ((&afs_xaxs)->
wait_states) Afs_Lock_ReleaseR(&afs_xaxs); (&afs_xaxs
)->pid_writer=0; } while (0); }
;
112 return;
113 }
114
115 i = *headp;
116 j = i->next;
117
118 while (j) {
3
Loop condition is true. Entering loop body
6
Loop condition is true. Entering loop body
119 if (j == axsp) {
4
Taking false branch
7
Taking true branch
120 i->next = j->next;
8
Access to field 'next' results in a dereference of a null pointer (loaded from variable 'i')
121 axs_Free(axsp){ do { ; if (!(&afs_xaxs)->excl_locked && !(&
afs_xaxs)->readers_reading) (&afs_xaxs) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xaxs, 2); (&afs_xaxs)
->pid_writer = (((__curthread())->td_proc)->p_pid );
(&afs_xaxs)->src_indicator = 175; } while (0); axsp->
next = afs_axsfreelist; afs_axsfreelist = axsp; do { ; (&
afs_xaxs)->excl_locked &= ~2; if ((&afs_xaxs)->
wait_states) Afs_Lock_ReleaseR(&afs_xaxs); (&afs_xaxs
)->pid_writer=0; } while (0); }
;
122 return;
123 }
124 if ((i = j->next)) { /* ASSIGNMENT HERE! */
5
Taking false branch
125 j->next = i->next;
126 axs_Free(axsp){ do { ; if (!(&afs_xaxs)->excl_locked && !(&
afs_xaxs)->readers_reading) (&afs_xaxs) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xaxs, 2); (&afs_xaxs)
->pid_writer = (((__curthread())->td_proc)->p_pid );
(&afs_xaxs)->src_indicator = 175; } while (0); axsp->
next = afs_axsfreelist; afs_axsfreelist = axsp; do { ; (&
afs_xaxs)->excl_locked &= ~2; if ((&afs_xaxs)->
wait_states) Afs_Lock_ReleaseR(&afs_xaxs); (&afs_xaxs
)->pid_writer=0; } while (0); }
;
127 return;
128 }
129 }
130 }
131 /* end of "if neither pointer is NULL" */
132 return; /* !#@ FAILED to find it! */
133}
134
135
136/*
137 * Takes an entire list of access cache structs and prepends them, lock, stock,
138 * and barrel, to the front of the freelist.
139 */
140void
141afs_FreeAllAxs(struct axscache **headp)
142{
143 struct axscache *i, *j;
144
145 i = *headp;
146 j = NULL((void *)0);
147
148 while (i) { /* chase down the list 'til we reach the end */
149 j = i->next;
150 if (!j) {
151 ObtainWriteLock(&afs_xaxs, 176)do { ; if (!(&afs_xaxs)->excl_locked && !(&
afs_xaxs)->readers_reading) (&afs_xaxs) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xaxs, 2); (&afs_xaxs)
->pid_writer = (((__curthread())->td_proc)->p_pid );
(&afs_xaxs)->src_indicator = 176; } while (0)
;
152 i->next = afs_axsfreelist; /* tack on the freelist to the end */
153 afs_axsfreelist = *headp;
154 ReleaseWriteLock(&afs_xaxs)do { ; (&afs_xaxs)->excl_locked &= ~2; if ((&afs_xaxs
)->wait_states) Afs_Lock_ReleaseR(&afs_xaxs); (&afs_xaxs
)->pid_writer=0; } while (0)
;
155 *headp = NULL((void *)0);
156 return;
157 }
158 i = j->next;
159 }
160
161 if (j) { /* we ran off the end of the list... */
162 ObtainWriteLock(&afs_xaxs, 177)do { ; if (!(&afs_xaxs)->excl_locked && !(&
afs_xaxs)->readers_reading) (&afs_xaxs) -> excl_locked
= 2; else Afs_Lock_Obtain(&afs_xaxs, 2); (&afs_xaxs)
->pid_writer = (((__curthread())->td_proc)->p_pid );
(&afs_xaxs)->src_indicator = 177; } while (0)
;
163 j->next = afs_axsfreelist; /* tack on the freelist to the end */
164 afs_axsfreelist = *headp;
165 ReleaseWriteLock(&afs_xaxs)do { ; (&afs_xaxs)->excl_locked &= ~2; if ((&afs_xaxs
)->wait_states) Afs_Lock_ReleaseR(&afs_xaxs); (&afs_xaxs
)->pid_writer=0; } while (0)
;
166 }
167 *headp = NULL((void *)0);
168 return;
169}
170
171
172void
173shutdown_xscache(void)
174{
175 struct xfreelist *xp, *nxp;
176
177 AFS_RWLOCK_INIT(&afs_xaxs, "afs_xaxs")Lock_Init(&afs_xaxs);
178 xp = xfreemallocs;
179 while (xp) {
180 nxp = xp->next;
181 afs_osi_Free(xp, sizeof(struct xfreelist));
182 xp = nxp;
183 }
184 afs_axsfreelist = NULL((void *)0);
185 xfreemallocs = NULL((void *)0);
186}