bitops.h (4ba24fef3eb3b142197135223b90ced2f319cd53) bitops.h (de60c1a1849c57e864f02f0d921993982b1648f8)
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8

--- 18 unchanged lines hidden (view full) ---

27#if defined(CONFIG_ARC_HAS_LLSC)
28
29static inline void set_bit(unsigned long nr, volatile unsigned long *m)
30{
31 unsigned int temp;
32
33 m += nr >> 5;
34
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8

--- 18 unchanged lines hidden (view full) ---

27#if defined(CONFIG_ARC_HAS_LLSC)
28
29static inline void set_bit(unsigned long nr, volatile unsigned long *m)
30{
31 unsigned int temp;
32
33 m += nr >> 5;
34
35 /*
36 * ARC ISA micro-optimization:
37 *
38 * Instructions dealing with bitpos only consider lower 5 bits (0-31)
39 * e.g (x << 33) is handled like (x << 1) by ASL instruction
40 * (mem pointer still needs adjustment to point to next word)
41 *
42 * Hence the masking to clamp @nr arg can be elided in general.
43 *
44 * However if @nr is a constant (above assumed it in a register),
45 * and greater than 31, gcc can optimize away (x << 33) to 0,
46 * as overflow, given the 32-bit ISA. Thus masking needs to be done
47 * for constant @nr, but no code is generated due to const prop.
48 */
35 if (__builtin_constant_p(nr))
36 nr &= 0x1f;
37
38 __asm__ __volatile__(
39 "1: llock %0, [%1] \n"
40 " bset %0, %0, %2 \n"
41 " scond %0, [%1] \n"
42 " bnz 1b \n"

--- 326 unchanged lines hidden (view full) ---

369
370 return (old & (1 << nr)) != 0;
371}
372
373/*
374 * This routine doesn't need to be atomic.
375 */
376static inline int
49 if (__builtin_constant_p(nr))
50 nr &= 0x1f;
51
52 __asm__ __volatile__(
53 "1: llock %0, [%1] \n"
54 " bset %0, %0, %2 \n"
55 " scond %0, [%1] \n"
56 " bnz 1b \n"

--- 326 unchanged lines hidden (view full) ---

383
384 return (old & (1 << nr)) != 0;
385}
386
387/*
388 * This routine doesn't need to be atomic.
389 */
390static inline int
377__constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
391test_bit(unsigned int nr, const volatile unsigned long *addr)
378{
392{
379 return ((1UL << (nr & 31)) &
380 (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
381}
382
383static inline int
384__test_bit(unsigned int nr, const volatile unsigned long *addr)
385{
386 unsigned long mask;
387
388 addr += nr >> 5;
389
393 unsigned long mask;
394
395 addr += nr >> 5;
396
390 /* ARC700 only considers 5 bits in bit-fiddling insn */
397 if (__builtin_constant_p(nr))
398 nr &= 0x1f;
399
391 mask = 1 << nr;
392
393 return ((mask & *addr) != 0);
394}
395
400 mask = 1 << nr;
401
402 return ((mask & *addr) != 0);
403}
404
396#define test_bit(nr, addr) (__builtin_constant_p(nr) ? \
397 __constant_test_bit((nr), (addr)) : \
398 __test_bit((nr), (addr)))
399
400/*
401 * Count the number of zeros, starting from MSB
402 * Helper for fls( ) friends
403 * This is a pure count, so (1-32) or (0-31) doesn't apply
404 * It could be 0 to 32, based on num of 0's in there
405 * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
406 */
407static inline __attribute__ ((const)) int clz(unsigned int x)

--- 102 unchanged lines hidden ---
405/*
406 * Count the number of zeros, starting from MSB
407 * Helper for fls( ) friends
408 * This is a pure count, so (1-32) or (0-31) doesn't apply
409 * It could be 0 to 32, based on num of 0's in there
410 * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
411 */
412static inline __attribute__ ((const)) int clz(unsigned int x)

--- 102 unchanged lines hidden ---