Error Trace

[Home]

Bug # 150

Show/hide error trace
Error trace
Function bodies
Blocks
  • Others...
    Function bodies without model function calls
    Initialization function calls
    Initialization function bodies
    Entry point
    Entry point body
    Function calls
    Skipped function calls
    Formal parameter names
    Declarations
    Assumes
    Assume conditions
    Returns
    Return values
    DEG initialization
    DEG function calls
    Model function calls
    Model function bodies
    Model asserts
    Model state changes
    Model function function calls
    Model function function bodies
    Model returns
    Model others
    Identation
    Line numbers
    Expand signs
-entry_point
{
873 LDV_IN_INTERRUPT = 1;
882 ldv_initialize() { /* Function call is skipped due to function is undefined */}
888 goto ldv_66141;
888 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
888 assume(tmp___0 != 0);
890 goto ldv_66140;
889 ldv_66140:;
891 tmp = nondet_int() { /* Function call is skipped due to function is undefined */}
891 switch (tmp);
892 assume(!(tmp == 0));
915 assume(!(tmp == 1));
937 assume(!(tmp == 2));
959 assume(!(tmp == 3));
981 assume(!(tmp == 4));
1003 assume(!(tmp == 5));
1024 assume(!(tmp == 6));
1045 assume(tmp == 7);
1057 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
1058 -loopback(var_group3)
{
469 -rxe_rcv(skb)
{
358 pkt = (struct rxe_pkt_info *)(&(skb->cb));
359 rxe = pkt->rxe;
363 pkt->offset = 0U;
365 int __CPAchecker_TMP_0 = (int)(pkt->offset);
365 +__builtin_expect((skb->len) < ((unsigned int)(__CPAchecker_TMP_0 + 12)), 0L)
365 assume(!(tmp != 0L));
368 +rxe_match_dgid(rxe, skb)
368 +__builtin_expect(tmp___1 < 0, 0L)
368 assume(!(tmp___2 != 0L));
373 +bth_opcode(pkt)
374 +bth_psn(pkt)
375 pkt->qp = (struct rxe_qp *)0;
376 int __CPAchecker_TMP_1 = (int)(pkt->opcode);
376 pkt->mask = (pkt->mask) | ((u32 )((rxe_opcode[__CPAchecker_TMP_1]).mask));
378 +header_size(pkt)
378 size_t __CPAchecker_TMP_2 = (size_t )(skb->len);
378 +__builtin_expect(__CPAchecker_TMP_2 < tmp___3, 0L)
378 assume(!(tmp___4 != 0L));
381 +hdr_check(pkt)
382 +__builtin_expect(err != 0, 0L)
382 assume(!(tmp___5 != 0L));
386 unsigned long __CPAchecker_TMP_3 = (unsigned long)(pkt->paylen);
386 icrcp = (__be32 *)((pkt->hdr) + (__CPAchecker_TMP_3 + 18446744073709551612UL));
387 +__fswab32(*icrcp)
387 pack_icrc = tmp___6;
389 +rxe_icrc_hdr(pkt, skb)
390 +payload_size(pkt)
390 +payload_addr(pkt)
390 calc_icrc = crc32_le(calc_icrc, (const unsigned char *)tmp___8, tmp___7) { /* Function call is skipped due to function is undefined */}
391 +__fswab32(~calc_icrc)
391 calc_icrc = tmp___9;
392 +__builtin_expect(calc_icrc != pack_icrc, 0L)
392 assume(!(tmp___13 != 0L));
406 +bth_qpn(pkt)
406 +__builtin_expect(tmp___14 == 16777215U, 0L)
406 assume(tmp___15 != 0L);
407 -rxe_rcv_mcast_pkt(rxe, skb)
{
277 pkt = (struct rxe_pkt_info *)(&(skb->cb));
285 unsigned int __CPAchecker_TMP_0 = (unsigned int)(skb->protocol);
285 assume(!(__CPAchecker_TMP_0 == 8U));
288 unsigned int __CPAchecker_TMP_1 = (unsigned int)(skb->protocol);
288 assume(!(__CPAchecker_TMP_1 == 56710U));
292 -rxe_pool_get_key(&(rxe->mc_grp_pool), (void *)(&dgid))
{
470 node = (struct rb_node *)0;
471 elem = (struct rxe_pool_entry *)0;
475 -ldv_spin_lock()
{
52 ldv_spin = 1;
53 return ;;
}
477 unsigned int __CPAchecker_TMP_0 = (unsigned int)(pool->state);
477 assume(!(__CPAchecker_TMP_0 != 1U));
480 node = pool->tree.rb_node;
482 goto ldv_63933;
482 assume(((unsigned long)node) != ((unsigned long)((struct rb_node *)0)));
484 goto ldv_63932;
483 ldv_63932:;
483 __mptr = (const struct rb_node *)node;
483 elem = ((struct rxe_pool_entry *)__mptr) + 18446744073709551584UL;
485 cmp = memcmp(((const void *)elem) + (pool->key_offset), (const void *)key, pool->key_size) { /* Function call is skipped due to function is undefined */}
488 assume(!(cmp > 0));
490 assume(!(cmp < 0));
493 goto ldv_63931;
496 assume(((unsigned long)node) != ((unsigned long)((struct rb_node *)0)));
497 +kref_get(&(elem->ref_cnt))
501 out:;
500 -spin_unlock_irqrestore(&(pool->pool_lock), flags)
{
109 -ldv_spin_unlock()
{
59 ldv_spin = 0;
60 return ;;
}
111 -ldv_spin_unlock_irqrestore_52(lock, flags)
{
378 _raw_spin_unlock_irqrestore(&(lock->__annonCompField20.rlock), flags) { /* Function call is skipped due to function is undefined */}
379 return ;;
}
112 return ;;
}
501 assume(((unsigned long)node) != ((unsigned long)((struct rb_node *)0)));
501 __CPAchecker_TMP_1 = (void *)elem;
501 return __CPAchecker_TMP_1;;
}
292 mcg = (struct rxe_mc_grp *)tmp___1;
293 assume(!(((unsigned long)mcg) == ((unsigned long)((struct rxe_mc_grp *)0))));
296 -spin_lock_bh(&(mcg->mcg_lock))
{
58 -ldv_spin_lock()
{
52 ldv_spin = 1;
53 return ;;
}
60 -ldv_spin_lock_bh_126(lock)
{
311 _raw_spin_lock_bh(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */}
312 return ;;
}
61 return ;;
}
298 __mptr = (const struct list_head *)(mcg->qp_list.next);
298 mce = ((struct rxe_mc_elem *)__mptr) + 18446744073709551552UL;
298 goto ldv_63902;
298 assume(((unsigned long)(&(mce->qp_list))) != ((unsigned long)(&(mcg->qp_list))));
300 goto ldv_63901;
299 ldv_63901:;
299 qp = mce->qp;
300 pkt = (struct rxe_pkt_info *)(&(skb->cb));
303 +check_type_state(rxe, pkt, qp)
304 assume(!(err != 0));
307 +bth_qpn(pkt)
307 +check_keys(rxe, pkt, tmp___2, qp)
308 assume(!(err != 0));
314 assume(((unsigned long)(mce->qp_list.next)) != ((unsigned long)(&(mcg->qp_list))));
314 -ldv_skb_clone_203(skb, 37748928U)
{
321 -ldv_check_alloc_flags(flags)
{
27 assume(ldv_spin != 0);
27 assume(flags != 34078752U);
27 assume(flags != 33554432U);
27 +ldv_error()
}
}
}
}
}
}
Source code
1 #ifndef _ASM_X86_ATOMIC_H 2 #define _ASM_X86_ATOMIC_H 3 4 #include <linux/compiler.h> 5 #include <linux/types.h> 6 #include <asm/alternative.h> 7 #include <asm/cmpxchg.h> 8 #include <asm/rmwcc.h> 9 #include <asm/barrier.h> 10 11 /* 12 * Atomic operations that C can't guarantee us. Useful for 13 * resource counting etc.. 14 */ 15 16 #define ATOMIC_INIT(i) { (i) } 17 18 /** 19 * atomic_read - read atomic variable 20 * @v: pointer of type atomic_t 21 * 22 * Atomically reads the value of @v. 23 */ 24 static __always_inline int atomic_read(const atomic_t *v) 25 { 26 return READ_ONCE((v)->counter); 27 } 28 29 /** 30 * atomic_set - set atomic variable 31 * @v: pointer of type atomic_t 32 * @i: required value 33 * 34 * Atomically sets the value of @v to @i. 35 */ 36 static __always_inline void atomic_set(atomic_t *v, int i) 37 { 38 WRITE_ONCE(v->counter, i); 39 } 40 41 /** 42 * atomic_add - add integer to atomic variable 43 * @i: integer value to add 44 * @v: pointer of type atomic_t 45 * 46 * Atomically adds @i to @v. 47 */ 48 static __always_inline void atomic_add(int i, atomic_t *v) 49 { 50 asm volatile(LOCK_PREFIX "addl %1,%0" 51 : "+m" (v->counter) 52 : "ir" (i)); 53 } 54 55 /** 56 * atomic_sub - subtract integer from atomic variable 57 * @i: integer value to subtract 58 * @v: pointer of type atomic_t 59 * 60 * Atomically subtracts @i from @v. 61 */ 62 static __always_inline void atomic_sub(int i, atomic_t *v) 63 { 64 asm volatile(LOCK_PREFIX "subl %1,%0" 65 : "+m" (v->counter) 66 : "ir" (i)); 67 } 68 69 /** 70 * atomic_sub_and_test - subtract value from variable and test result 71 * @i: integer value to subtract 72 * @v: pointer of type atomic_t 73 * 74 * Atomically subtracts @i from @v and returns 75 * true if the result is zero, or false for all 76 * other cases. 77 */ 78 static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) 79 { 80 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); 81 } 82 83 /** 84 * atomic_inc - increment atomic variable 85 * @v: pointer of type atomic_t 86 * 87 * Atomically increments @v by 1. 88 */ 89 static __always_inline void atomic_inc(atomic_t *v) 90 { 91 asm volatile(LOCK_PREFIX "incl %0" 92 : "+m" (v->counter)); 93 } 94 95 /** 96 * atomic_dec - decrement atomic variable 97 * @v: pointer of type atomic_t 98 * 99 * Atomically decrements @v by 1. 100 */ 101 static __always_inline void atomic_dec(atomic_t *v) 102 { 103 asm volatile(LOCK_PREFIX "decl %0" 104 : "+m" (v->counter)); 105 } 106 107 /** 108 * atomic_dec_and_test - decrement and test 109 * @v: pointer of type atomic_t 110 * 111 * Atomically decrements @v by 1 and 112 * returns true if the result is 0, or false for all other 113 * cases. 114 */ 115 static __always_inline bool atomic_dec_and_test(atomic_t *v) 116 { 117 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); 118 } 119 120 /** 121 * atomic_inc_and_test - increment and test 122 * @v: pointer of type atomic_t 123 * 124 * Atomically increments @v by 1 125 * and returns true if the result is zero, or false for all 126 * other cases. 127 */ 128 static __always_inline bool atomic_inc_and_test(atomic_t *v) 129 { 130 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); 131 } 132 133 /** 134 * atomic_add_negative - add and test if negative 135 * @i: integer value to add 136 * @v: pointer of type atomic_t 137 * 138 * Atomically adds @i to @v and returns true 139 * if the result is negative, or false when 140 * result is greater than or equal to zero. 141 */ 142 static __always_inline bool atomic_add_negative(int i, atomic_t *v) 143 { 144 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); 145 } 146 147 /** 148 * atomic_add_return - add integer and return 149 * @i: integer value to add 150 * @v: pointer of type atomic_t 151 * 152 * Atomically adds @i to @v and returns @i + @v 153 */ 154 static __always_inline int atomic_add_return(int i, atomic_t *v) 155 { 156 return i + xadd(&v->counter, i); 157 } 158 159 /** 160 * atomic_sub_return - subtract integer and return 161 * @v: pointer of type atomic_t 162 * @i: integer value to subtract 163 * 164 * Atomically subtracts @i from @v and returns @v - @i 165 */ 166 static __always_inline int atomic_sub_return(int i, atomic_t *v) 167 { 168 return atomic_add_return(-i, v); 169 } 170 171 #define atomic_inc_return(v) (atomic_add_return(1, v)) 172 #define atomic_dec_return(v) (atomic_sub_return(1, v)) 173 174 static __always_inline int atomic_fetch_add(int i, atomic_t *v) 175 { 176 return xadd(&v->counter, i); 177 } 178 179 static __always_inline int atomic_fetch_sub(int i, atomic_t *v) 180 { 181 return xadd(&v->counter, -i); 182 } 183 184 static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) 185 { 186 return cmpxchg(&v->counter, old, new); 187 } 188 189 static inline int atomic_xchg(atomic_t *v, int new) 190 { 191 return xchg(&v->counter, new); 192 } 193 194 #define ATOMIC_OP(op) \ 195 static inline void atomic_##op(int i, atomic_t *v) \ 196 { \ 197 asm volatile(LOCK_PREFIX #op"l %1,%0" \ 198 : "+m" (v->counter) \ 199 : "ir" (i) \ 200 : "memory"); \ 201 } 202 203 #define ATOMIC_FETCH_OP(op, c_op) \ 204 static inline int atomic_fetch_##op(int i, atomic_t *v) \ 205 { \ 206 int old, val = atomic_read(v); \ 207 for (;;) { \ 208 old = atomic_cmpxchg(v, val, val c_op i); \ 209 if (old == val) \ 210 break; \ 211 val = old; \ 212 } \ 213 return old; \ 214 } 215 216 #define ATOMIC_OPS(op, c_op) \ 217 ATOMIC_OP(op) \ 218 ATOMIC_FETCH_OP(op, c_op) 219 220 ATOMIC_OPS(and, &) 221 ATOMIC_OPS(or , |) 222 ATOMIC_OPS(xor, ^) 223 224 #undef ATOMIC_OPS 225 #undef ATOMIC_FETCH_OP 226 #undef ATOMIC_OP 227 228 /** 229 * __atomic_add_unless - add unless the number is already a given value 230 * @v: pointer of type atomic_t 231 * @a: the amount to add to v... 232 * @u: ...unless v is equal to u. 233 * 234 * Atomically adds @a to @v, so long as @v was not already @u. 235 * Returns the old value of @v. 236 */ 237 static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) 238 { 239 int c, old; 240 c = atomic_read(v); 241 for (;;) { 242 if (unlikely(c == (u))) 243 break; 244 old = atomic_cmpxchg((v), c, c + (a)); 245 if (likely(old == c)) 246 break; 247 c = old; 248 } 249 return c; 250 } 251 252 /** 253 * atomic_inc_short - increment of a short integer 254 * @v: pointer to type int 255 * 256 * Atomically adds 1 to @v 257 * Returns the new value of @u 258 */ 259 static __always_inline short int atomic_inc_short(short int *v) 260 { 261 asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); 262 return *v; 263 } 264 265 #ifdef CONFIG_X86_32 266 # include <asm/atomic64_32.h> 267 #else 268 # include <asm/atomic64_64.h> 269 #endif 270 271 #endif /* _ASM_X86_ATOMIC_H */

Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.

Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.

Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.

Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.

Kernel Module Rule Verifier Verdict Status Timestamp Bug report
linux-4.8-rc1.tar.xz drivers/infiniband/sw/rxe/rdma_rxe.ko 43_1a CPAchecker Bug Fixed 2016-09-02 23:47:50 L0244

Comment

Reported: 2 Sep 2016

[Home]