Index: sys/compat/linuxkpi/common/src/linux_rcu.c =================================================================== diff -u -N -rdb611ed6433b1559f0f519859a9127c56d83d4e6 -rf911860f0590d21c9481a78c3d55221aed770b44 --- sys/compat/linuxkpi/common/src/linux_rcu.c (.../linux_rcu.c) (revision db611ed6433b1559f0f519859a9127c56d83d4e6) +++ sys/compat/linuxkpi/common/src/linux_rcu.c (.../linux_rcu.c) (revision f911860f0590d21c9481a78c3d55221aed770b44) @@ -1,5 +1,6 @@ /*- * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io) + * Copyright (c) 2017-2020 Hans Petter Selasky (hselasky@freebsd.org) * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -89,9 +90,9 @@ */ CTASSERT(offsetof(struct linux_epoch_record, epoch_record) == 0); -static ck_epoch_t linux_epoch; -static struct linux_epoch_head linux_epoch_head; -static DPCPU_DEFINE(struct linux_epoch_record, linux_epoch_record); +static ck_epoch_t linux_epoch[RCU_TYPE_MAX]; +static struct linux_epoch_head linux_epoch_head[RCU_TYPE_MAX]; +static DPCPU_DEFINE(struct linux_epoch_record, linux_epoch_record[RCU_TYPE_MAX]); static void linux_rcu_cleaner_func(void *, int); @@ -100,23 +101,27 @@ { struct linux_epoch_head *head; int i; + int j; - ck_epoch_init(&linux_epoch); + for (j = 0; j != RCU_TYPE_MAX; j++) { + ck_epoch_init(&linux_epoch[j]); - head = &linux_epoch_head; + head = &linux_epoch_head[j]; - mtx_init(&head->lock, "LRCU-HEAD", NULL, MTX_DEF); - TASK_INIT(&head->task, 0, linux_rcu_cleaner_func, NULL); - STAILQ_INIT(&head->cb_head); + mtx_init(&head->lock, "LRCU-HEAD", NULL, MTX_DEF); + TASK_INIT(&head->task, 0, linux_rcu_cleaner_func, head); + STAILQ_INIT(&head->cb_head); - CPU_FOREACH(i) { - struct linux_epoch_record *record; + CPU_FOREACH(i) { + struct linux_epoch_record *record; - record = &DPCPU_ID_GET(i, linux_epoch_record); + record = &DPCPU_ID_GET(i, linux_epoch_record[j]); - record->cpuid = i; - ck_epoch_register(&linux_epoch, &record->epoch_record, NULL); - TAILQ_INIT(&record->ts_head); + record->cpuid = i; + ck_epoch_register(&linux_epoch[j], + &record->epoch_record, NULL); + TAILQ_INIT(&record->ts_head); + } } } SYSINIT(linux_rcu_runtime, SI_SUB_CPU, SI_ORDER_ANY, linux_rcu_runtime_init, NULL); @@ -125,24 +130,27 @@ linux_rcu_runtime_uninit(void *arg __unused) { struct linux_epoch_head *head; + int j; - head = &linux_epoch_head; + for (j = 0; j != RCU_TYPE_MAX; j++) { + head = &linux_epoch_head[j]; - /* destroy head lock */ - mtx_destroy(&head->lock); + mtx_destroy(&head->lock); + } } SYSUNINIT(linux_rcu_runtime, SI_SUB_LOCK, SI_ORDER_SECOND, linux_rcu_runtime_uninit, NULL); static void -linux_rcu_cleaner_func(void *context __unused, int pending __unused) +linux_rcu_cleaner_func(void *context, int pending __unused) { struct linux_epoch_head *head; struct callback_head *rcu; STAILQ_HEAD(, callback_head) tmp_head; + uintptr_t offset; linux_set_current(curthread); - head = &linux_epoch_head; + head = context; /* move current callbacks into own queue */ mtx_lock(&head->lock); @@ -151,11 +159,10 @@ mtx_unlock(&head->lock); /* synchronize */ - linux_synchronize_rcu(); + linux_synchronize_rcu(head - linux_epoch_head); /* dispatch all callbacks, if any */ while ((rcu = STAILQ_FIRST(&tmp_head)) != NULL) { - uintptr_t offset; STAILQ_REMOVE_HEAD(&tmp_head, entry); @@ -169,11 +176,13 @@ } void -linux_rcu_read_lock(void) +linux_rcu_read_lock(unsigned type) { struct linux_epoch_record *record; struct task_struct *ts; + MPASS(type < RCU_TYPE_MAX); + if (RCU_SKIP()) return; @@ -183,7 +192,7 @@ */ sched_pin(); - record = &DPCPU_GET(linux_epoch_record); + record = &DPCPU_GET(linux_epoch_record[type]); ts = current; /* @@ -199,15 +208,17 @@ } void -linux_rcu_read_unlock(void) +linux_rcu_read_unlock(unsigned type) { struct linux_epoch_record *record; struct task_struct *ts; + MPASS(type < RCU_TYPE_MAX); + if (RCU_SKIP()) return; - record = &DPCPU_GET(linux_epoch_record); + record = &DPCPU_GET(linux_epoch_record[type]); ts = current; /* @@ -281,14 +292,16 @@ } void -linux_synchronize_rcu(void) +linux_synchronize_rcu(unsigned type) { struct thread *td; int was_bound; int old_cpu; int old_pinned; u_char old_prio; + MPASS(type < RCU_TYPE_MAX); + if (RCU_SKIP()) return; @@ -312,7 +325,7 @@ td->td_pinned = 0; sched_bind(td, old_cpu); - ck_epoch_synchronize_wait(&linux_epoch, + ck_epoch_synchronize_wait(&linux_epoch[type], &linux_synchronize_rcu_cb, NULL); /* restore CPU binding, if any */ @@ -335,24 +348,31 @@ } void -linux_rcu_barrier(void) +linux_rcu_barrier(unsigned type) { struct linux_epoch_head *head; - linux_synchronize_rcu(); + MPASS(type < RCU_TYPE_MAX); - head = &linux_epoch_head; + linux_synchronize_rcu(type); + head = &linux_epoch_head[type]; + /* wait for callbacks to complete */ taskqueue_drain(taskqueue_fast, &head->task); } void -linux_call_rcu(struct rcu_head *context, rcu_callback_t func) +linux_call_rcu(unsigned type, struct rcu_head *context, rcu_callback_t func) { - struct callback_head *rcu = (struct callback_head *)context; - struct linux_epoch_head *head = &linux_epoch_head; + struct callback_head *rcu; + struct linux_epoch_head *head; + MPASS(type < RCU_TYPE_MAX); + + rcu = (struct callback_head *)context; + head = &linux_epoch_head[type]; + mtx_lock(&head->lock); rcu->func = func; STAILQ_INSERT_TAIL(&head->cb_head, rcu, entry); @@ -374,24 +394,24 @@ int srcu_read_lock(struct srcu_struct *srcu) { - linux_rcu_read_lock(); + linux_rcu_read_lock(RCU_TYPE_SLEEPABLE); return (0); } void srcu_read_unlock(struct srcu_struct *srcu, int key __unused) { - linux_rcu_read_unlock(); + linux_rcu_read_unlock(RCU_TYPE_SLEEPABLE); } void synchronize_srcu(struct srcu_struct *srcu) { - linux_synchronize_rcu(); + linux_synchronize_rcu(RCU_TYPE_SLEEPABLE); } void srcu_barrier(struct srcu_struct *srcu) { - linux_rcu_barrier(); + linux_rcu_barrier(RCU_TYPE_SLEEPABLE); }