LCOV - code coverage report
Current view: top level - tty - tty_ldsem.c (source / functions) Coverage Total Hit
Test: TTY Combined Coverage Lines: 26.4 % 193 51
Test Date: 2025-08-26 15:45:50 Functions: 47.1 % 17 8

            Line data    Source code
       1              : // SPDX-License-Identifier: GPL-2.0
       2              : /*
       3              :  * Ldisc rw semaphore
       4              :  *
       5              :  * The ldisc semaphore is semantically a rw_semaphore but which enforces
       6              :  * an alternate policy, namely:
       7              :  *   1) Supports lock wait timeouts
       8              :  *   2) Write waiter has priority
       9              :  *   3) Downgrading is not supported
      10              :  *
      11              :  * Implementation notes:
      12              :  *   1) Upper half of semaphore count is a wait count (differs from rwsem
      13              :  *      in that rwsem normalizes the upper half to the wait bias)
      14              :  *   2) Lacks overflow checking
      15              :  *
      16              :  * The generic counting was copied and modified from include/asm-generic/rwsem.h
      17              :  * by Paul Mackerras <[email protected]>.
      18              :  *
      19              :  * The scheduling policy was copied and modified from lib/rwsem.c
      20              :  * Written by David Howells ([email protected]).
      21              :  *
      22              :  * This implementation incorporates the write lock stealing work of
      23              :  * Michel Lespinasse <[email protected]>.
      24              :  *
      25              :  * Copyright (C) 2013 Peter Hurley <[email protected]>
      26              :  */
      27              : 
      28              : #include <linux/list.h>
      29              : #include <linux/spinlock.h>
      30              : #include <linux/atomic.h>
      31              : #include <linux/tty.h>
      32              : #include <linux/sched.h>
      33              : #include <linux/sched/debug.h>
      34              : #include <linux/sched/task.h>
      35              : 
      36              : 
      37              : #if BITS_PER_LONG == 64
      38              : # define LDSEM_ACTIVE_MASK      0xffffffffL
      39              : #else
      40              : # define LDSEM_ACTIVE_MASK      0x0000ffffL
      41              : #endif
      42              : 
      43              : #define LDSEM_UNLOCKED          0L
      44              : #define LDSEM_ACTIVE_BIAS       1L
      45              : #define LDSEM_WAIT_BIAS         (-LDSEM_ACTIVE_MASK-1)
      46              : #define LDSEM_READ_BIAS         LDSEM_ACTIVE_BIAS
      47              : #define LDSEM_WRITE_BIAS        (LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS)
      48              : 
      49              : struct ldsem_waiter {
      50              :         struct list_head list;
      51              :         struct task_struct *task;
      52              : };
      53              : 
      54              : /*
      55              :  * Initialize an ldsem:
      56              :  */
      57            6 : void __init_ldsem(struct ld_semaphore *sem, const char *name,
      58              :                   struct lock_class_key *key)
      59              : {
      60              : #ifdef CONFIG_DEBUG_LOCK_ALLOC
      61              :         /*
      62              :          * Make sure we are not reinitializing a held semaphore:
      63              :          */
      64            6 :         debug_check_no_locks_freed((void *)sem, sizeof(*sem));
      65            6 :         lockdep_init_map(&sem->dep_map, name, key, 0);
      66              : #endif
      67            6 :         atomic_long_set(&sem->count, LDSEM_UNLOCKED);
      68            6 :         sem->wait_readers = 0;
      69            6 :         raw_spin_lock_init(&sem->wait_lock);
      70            6 :         INIT_LIST_HEAD(&sem->read_wait);
      71            6 :         INIT_LIST_HEAD(&sem->write_wait);
      72            6 : }
      73              : 
      74            0 : static void __ldsem_wake_readers(struct ld_semaphore *sem)
      75              : {
      76            0 :         struct ldsem_waiter *waiter, *next;
      77            0 :         struct task_struct *tsk;
      78            0 :         long adjust, count;
      79              : 
      80              :         /*
      81              :          * Try to grant read locks to all readers on the read wait list.
      82              :          * Note the 'active part' of the count is incremented by
      83              :          * the number of readers before waking any processes up.
      84              :          */
      85            0 :         adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
      86            0 :         count = atomic_long_add_return(adjust, &sem->count);
      87            0 :         do {
      88            0 :                 if (count > 0)
      89            0 :                         break;
      90            0 :                 if (atomic_long_try_cmpxchg(&sem->count, &count, count - adjust))
      91            0 :                         return;
      92            0 :         } while (1);
      93              : 
      94            0 :         list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
      95            0 :                 tsk = waiter->task;
      96            0 :                 smp_store_release(&waiter->task, NULL);
      97            0 :                 wake_up_process(tsk);
      98            0 :                 put_task_struct(tsk);
      99            0 :         }
     100            0 :         INIT_LIST_HEAD(&sem->read_wait);
     101            0 :         sem->wait_readers = 0;
     102            0 : }
     103              : 
     104            0 : static inline int writer_trylock(struct ld_semaphore *sem)
     105              : {
     106              :         /*
     107              :          * Only wake this writer if the active part of the count can be
     108              :          * transitioned from 0 -> 1
     109              :          */
     110            0 :         long count = atomic_long_add_return(LDSEM_ACTIVE_BIAS, &sem->count);
     111            0 :         do {
     112            0 :                 if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
     113            0 :                         return 1;
     114            0 :                 if (atomic_long_try_cmpxchg(&sem->count, &count, count - LDSEM_ACTIVE_BIAS))
     115            0 :                         return 0;
     116            0 :         } while (1);
     117            0 : }
     118              : 
     119            0 : static void __ldsem_wake_writer(struct ld_semaphore *sem)
     120              : {
     121            0 :         struct ldsem_waiter *waiter;
     122              : 
     123            0 :         waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list);
     124            0 :         wake_up_process(waiter->task);
     125            0 : }
     126              : 
     127              : /*
     128              :  * handle the lock release when processes blocked on it that can now run
     129              :  * - if we come here from up_xxxx(), then:
     130              :  *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
     131              :  *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
     132              :  * - the spinlock must be held by the caller
     133              :  * - woken process blocks are discarded from the list after having task zeroed
     134              :  */
     135            0 : static void __ldsem_wake(struct ld_semaphore *sem)
     136              : {
     137            0 :         if (!list_empty(&sem->write_wait))
     138            0 :                 __ldsem_wake_writer(sem);
     139            0 :         else if (!list_empty(&sem->read_wait))
     140            0 :                 __ldsem_wake_readers(sem);
     141            0 : }
     142              : 
     143            0 : static void ldsem_wake(struct ld_semaphore *sem)
     144              : {
     145            0 :         unsigned long flags;
     146              : 
     147            0 :         raw_spin_lock_irqsave(&sem->wait_lock, flags);
     148            0 :         __ldsem_wake(sem);
     149            0 :         raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
     150            0 : }
     151              : 
     152              : /*
     153              :  * wait for the read lock to be granted
     154              :  */
     155              : static struct ld_semaphore __sched *
     156            0 : down_read_failed(struct ld_semaphore *sem, long count, long timeout)
     157              : {
     158            0 :         struct ldsem_waiter waiter;
     159            0 :         long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
     160              : 
     161              :         /* set up my own style of waitqueue */
     162            0 :         raw_spin_lock_irq(&sem->wait_lock);
     163              : 
     164              :         /*
     165              :          * Try to reverse the lock attempt but if the count has changed
     166              :          * so that reversing fails, check if there are no waiters,
     167              :          * and early-out if not
     168              :          */
     169            0 :         do {
     170            0 :                 if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust)) {
     171            0 :                         count += adjust;
     172            0 :                         break;
     173              :                 }
     174            0 :                 if (count > 0) {
     175            0 :                         raw_spin_unlock_irq(&sem->wait_lock);
     176            0 :                         return sem;
     177              :                 }
     178            0 :         } while (1);
     179              : 
     180            0 :         list_add_tail(&waiter.list, &sem->read_wait);
     181            0 :         sem->wait_readers++;
     182              : 
     183            0 :         waiter.task = current;
     184            0 :         get_task_struct(current);
     185              : 
     186              :         /* if there are no active locks, wake the new lock owner(s) */
     187            0 :         if ((count & LDSEM_ACTIVE_MASK) == 0)
     188            0 :                 __ldsem_wake(sem);
     189              : 
     190            0 :         raw_spin_unlock_irq(&sem->wait_lock);
     191              : 
     192              :         /* wait to be given the lock */
     193            0 :         for (;;) {
     194            0 :                 set_current_state(TASK_UNINTERRUPTIBLE);
     195              : 
     196            0 :                 if (!smp_load_acquire(&waiter.task))
     197            0 :                         break;
     198            0 :                 if (!timeout)
     199            0 :                         break;
     200            0 :                 timeout = schedule_timeout(timeout);
     201              :         }
     202              : 
     203            0 :         __set_current_state(TASK_RUNNING);
     204              : 
     205            0 :         if (!timeout) {
     206              :                 /*
     207              :                  * Lock timed out but check if this task was just
     208              :                  * granted lock ownership - if so, pretend there
     209              :                  * was no timeout; otherwise, cleanup lock wait.
     210              :                  */
     211            0 :                 raw_spin_lock_irq(&sem->wait_lock);
     212            0 :                 if (waiter.task) {
     213            0 :                         atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
     214            0 :                         sem->wait_readers--;
     215            0 :                         list_del(&waiter.list);
     216            0 :                         raw_spin_unlock_irq(&sem->wait_lock);
     217            0 :                         put_task_struct(waiter.task);
     218            0 :                         return NULL;
     219              :                 }
     220            0 :                 raw_spin_unlock_irq(&sem->wait_lock);
     221            0 :         }
     222              : 
     223            0 :         return sem;
     224            0 : }
     225              : 
     226              : /*
     227              :  * wait for the write lock to be granted
     228              :  */
     229              : static struct ld_semaphore __sched *
     230            0 : down_write_failed(struct ld_semaphore *sem, long count, long timeout)
     231              : {
     232            0 :         struct ldsem_waiter waiter;
     233            0 :         long adjust = -LDSEM_ACTIVE_BIAS;
     234            0 :         int locked = 0;
     235              : 
     236              :         /* set up my own style of waitqueue */
     237            0 :         raw_spin_lock_irq(&sem->wait_lock);
     238              : 
     239              :         /*
     240              :          * Try to reverse the lock attempt but if the count has changed
     241              :          * so that reversing fails, check if the lock is now owned,
     242              :          * and early-out if so.
     243              :          */
     244            0 :         do {
     245            0 :                 if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust))
     246            0 :                         break;
     247            0 :                 if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
     248            0 :                         raw_spin_unlock_irq(&sem->wait_lock);
     249            0 :                         return sem;
     250              :                 }
     251            0 :         } while (1);
     252              : 
     253            0 :         list_add_tail(&waiter.list, &sem->write_wait);
     254              : 
     255            0 :         waiter.task = current;
     256              : 
     257            0 :         set_current_state(TASK_UNINTERRUPTIBLE);
     258            0 :         for (;;) {
     259            0 :                 if (!timeout)
     260            0 :                         break;
     261            0 :                 raw_spin_unlock_irq(&sem->wait_lock);
     262            0 :                 timeout = schedule_timeout(timeout);
     263            0 :                 raw_spin_lock_irq(&sem->wait_lock);
     264            0 :                 set_current_state(TASK_UNINTERRUPTIBLE);
     265            0 :                 locked = writer_trylock(sem);
     266            0 :                 if (locked)
     267            0 :                         break;
     268              :         }
     269              : 
     270            0 :         if (!locked)
     271            0 :                 atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
     272            0 :         list_del(&waiter.list);
     273              : 
     274              :         /*
     275              :          * In case of timeout, wake up every reader who gave the right of way
     276              :          * to writer. Prevent separation readers into two groups:
     277              :          * one that helds semaphore and another that sleeps.
     278              :          * (in case of no contention with a writer)
     279              :          */
     280            0 :         if (!locked && list_empty(&sem->write_wait))
     281            0 :                 __ldsem_wake_readers(sem);
     282              : 
     283            0 :         raw_spin_unlock_irq(&sem->wait_lock);
     284              : 
     285            0 :         __set_current_state(TASK_RUNNING);
     286              : 
     287              :         /* lock wait may have timed out */
     288            0 :         if (!locked)
     289            0 :                 return NULL;
     290            0 :         return sem;
     291            0 : }
     292              : 
     293              : 
     294              : 
     295          159 : static int __ldsem_down_read_nested(struct ld_semaphore *sem,
     296              :                                            int subclass, long timeout)
     297              : {
     298          159 :         long count;
     299              : 
     300          159 :         rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
     301              : 
     302          159 :         count = atomic_long_add_return(LDSEM_READ_BIAS, &sem->count);
     303          159 :         if (count <= 0) {
     304            0 :                 lock_contended(&sem->dep_map, _RET_IP_);
     305            0 :                 if (!down_read_failed(sem, count, timeout)) {
     306            0 :                         rwsem_release(&sem->dep_map, _RET_IP_);
     307            0 :                         return 0;
     308              :                 }
     309            0 :         }
     310          159 :         lock_acquired(&sem->dep_map, _RET_IP_);
     311          159 :         return 1;
     312          159 : }
     313              : 
     314           12 : static int __ldsem_down_write_nested(struct ld_semaphore *sem,
     315              :                                             int subclass, long timeout)
     316              : {
     317           12 :         long count;
     318              : 
     319           12 :         rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
     320              : 
     321           12 :         count = atomic_long_add_return(LDSEM_WRITE_BIAS, &sem->count);
     322           12 :         if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
     323            0 :                 lock_contended(&sem->dep_map, _RET_IP_);
     324            0 :                 if (!down_write_failed(sem, count, timeout)) {
     325            0 :                         rwsem_release(&sem->dep_map, _RET_IP_);
     326            0 :                         return 0;
     327              :                 }
     328            0 :         }
     329           12 :         lock_acquired(&sem->dep_map, _RET_IP_);
     330           12 :         return 1;
     331           12 : }
     332              : 
     333              : 
     334              : /*
     335              :  * lock for reading -- returns 1 if successful, 0 if timed out
     336              :  */
     337          159 : int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
     338              : {
     339          159 :         might_sleep();
     340          159 :         return __ldsem_down_read_nested(sem, 0, timeout);
     341              : }
     342              : 
     343              : /*
     344              :  * trylock for reading -- returns 1 if successful, 0 if contention
     345              :  */
     346           35 : int ldsem_down_read_trylock(struct ld_semaphore *sem)
     347              : {
     348           35 :         long count = atomic_long_read(&sem->count);
     349              : 
     350           35 :         while (count >= 0) {
     351           35 :                 if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_READ_BIAS)) {
     352           35 :                         rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
     353           35 :                         lock_acquired(&sem->dep_map, _RET_IP_);
     354           35 :                         return 1;
     355              :                 }
     356              :         }
     357            0 :         return 0;
     358           35 : }
     359              : 
     360              : /*
     361              :  * lock for writing -- returns 1 if successful, 0 if timed out
     362              :  */
     363           12 : int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
     364              : {
     365           12 :         might_sleep();
     366           12 :         return __ldsem_down_write_nested(sem, 0, timeout);
     367              : }
     368              : 
     369              : /*
     370              :  * release a read lock
     371              :  */
     372          195 : void ldsem_up_read(struct ld_semaphore *sem)
     373              : {
     374          195 :         long count;
     375              : 
     376          195 :         rwsem_release(&sem->dep_map, _RET_IP_);
     377              : 
     378          195 :         count = atomic_long_add_return(-LDSEM_READ_BIAS, &sem->count);
     379          195 :         if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
     380            0 :                 ldsem_wake(sem);
     381          195 : }
     382              : 
     383              : /*
     384              :  * release a write lock
     385              :  */
     386           12 : void ldsem_up_write(struct ld_semaphore *sem)
     387              : {
     388           12 :         long count;
     389              : 
     390           12 :         rwsem_release(&sem->dep_map, _RET_IP_);
     391              : 
     392           12 :         count = atomic_long_add_return(-LDSEM_WRITE_BIAS, &sem->count);
     393           12 :         if (count < 0)
     394            0 :                 ldsem_wake(sem);
     395           12 : }
     396              : 
     397              : 
     398              : #ifdef CONFIG_DEBUG_LOCK_ALLOC
     399              : 
     400            0 : int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
     401              : {
     402            0 :         might_sleep();
     403            0 :         return __ldsem_down_read_nested(sem, subclass, timeout);
     404              : }
     405              : 
     406            0 : int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
     407              :                             long timeout)
     408              : {
     409            0 :         might_sleep();
     410            0 :         return __ldsem_down_write_nested(sem, subclass, timeout);
     411              : }
     412              : 
     413              : #endif
        

Generated by: LCOV version 2.0-1