From: Chris Wright <chrisw@osdl.org>

Add a user_struct pointer to the sigqueue structure.  Charge sigqueue
allocation and destruction to the user_struct rather than a global pool.  This
per user rlimit accounting obsoletes the global queued_signals accouting.

The patch as charges the sigqueue struct allocation to the queue that it's
pending on (the receiver of the signal).  So the owner of the queue is charged
for whoever writes to it (much like quota for a 777 file).

The patch started out charging the task which allocated the sigqueue struct. 
In most cases, these are always the same user (permission for sending a
signal), so those cases are moot.  In the cases where it isn't the same user,
it's a privileged user sending a signal to another user.

It seems wrong to charge the allocation to the privleged user, when the other
user could block receipt as long as it feels.  The flipside is, someone else
can fill your queue (expectation is that someone else is privileged).  I think
it's right the way it is.  The change to revert is very small.


---

 25-akpm/include/linux/signal.h |    1 +
 25-akpm/kernel/signal.c        |   17 +++++++++++------
 2 files changed, 12 insertions(+), 6 deletions(-)

diff -puN include/linux/signal.h~rlim-enforce-rlimits-on-queued-signals include/linux/signal.h
--- 25/include/linux/signal.h~rlim-enforce-rlimits-on-queued-signals	2004-05-23 01:42:13.946555680 -0700
+++ 25-akpm/include/linux/signal.h	2004-05-23 01:42:13.950555072 -0700
@@ -19,6 +19,7 @@ struct sigqueue {
 	spinlock_t *lock;
 	int flags;
 	siginfo_t info;
+	struct user_struct *user;
 };
 
 /* flags values. */
diff -puN kernel/signal.c~rlim-enforce-rlimits-on-queued-signals kernel/signal.c
--- 25/kernel/signal.c~rlim-enforce-rlimits-on-queued-signals	2004-05-23 01:42:13.947555528 -0700
+++ 25-akpm/kernel/signal.c	2004-05-23 01:42:13.960553552 -0700
@@ -265,17 +265,19 @@ next_signal(struct sigpending *pending, 
 	return sig;
 }
 
-struct sigqueue *__sigqueue_alloc(void)
+static struct sigqueue *__sigqueue_alloc(void)
 {
 	struct sigqueue *q = 0;
 
-	if (atomic_read(&nr_queued_signals) < max_queued_signals)
+	if (atomic_read(&current->user->sigpending) <
+			current->rlim[RLIMIT_SIGPENDING].rlim_cur)
 		q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
 	if (q) {
-		atomic_inc(&nr_queued_signals);
 		INIT_LIST_HEAD(&q->list);
 		q->flags = 0;
 		q->lock = 0;
+		q->user = get_uid(current->user);
+		atomic_inc(&q->user->sigpending);
 	}
 	return(q);
 }
@@ -284,8 +286,9 @@ static inline void __sigqueue_free(struc
 {
 	if (q->flags & SIGQUEUE_PREALLOC)
 		return;
+	atomic_dec(&q->user->sigpending);
+	free_uid(q->user);
 	kmem_cache_free(sigqueue_cachep, q);
-	atomic_dec(&nr_queued_signals);
 }
 
 static void flush_sigqueue(struct sigpending *queue)
@@ -720,12 +723,14 @@ static int send_signal(int sig, struct s
 	   make sure at least one signal gets delivered and don't
 	   pass on the info struct.  */
 
-	if (atomic_read(&nr_queued_signals) < max_queued_signals)
+	if (atomic_read(&t->user->sigpending) <
+			t->rlim[RLIMIT_SIGPENDING].rlim_cur)
 		q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
 
 	if (q) {
-		atomic_inc(&nr_queued_signals);
 		q->flags = 0;
+		q->user = get_uid(t->user);
+		atomic_inc(&q->user->sigpending);
 		list_add_tail(&q->list, &signals->list);
 		switch ((unsigned long) info) {
 		case 0:

_